Index: src/java/org/apache/lucene/index/MultiReader.java =================================================================== --- src/java/org/apache/lucene/index/MultiReader.java (revision 673635) +++ src/java/org/apache/lucene/index/MultiReader.java (working copy) @@ -88,7 +88,7 @@ } /** - * Tries to reopen the subreaders. + * Tries to reopen or clone the subreaders. *
* If one or more subreaders could be re-opened (i. e. subReader.reopen() * returned a new instance != subReader), then a new MultiReader instance @@ -106,7 +106,7 @@ * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ - public IndexReader reopen() throws CorruptIndexException, IOException { + protected IndexReader doReopenOrClone(boolean doClone) throws CorruptIndexException, IOException { ensureOpen(); boolean reopened = false; @@ -116,7 +116,10 @@ boolean success = false; try { for (int i = 0; i < subReaders.length; i++) { - newSubReaders[i] = subReaders[i].reopen(); + if (doClone) + newSubReaders[i] = (IndexReader) subReaders[i].clone(); + else + newSubReaders[i] = subReaders[i].reopen(); // if at least one of the subreaders was updated we remember that // and return a new MultiReader if (newSubReaders[i] != subReaders[i]) { Index: src/java/org/apache/lucene/index/DirectoryIndexReader.java =================================================================== --- src/java/org/apache/lucene/index/DirectoryIndexReader.java (revision 673635) +++ src/java/org/apache/lucene/index/DirectoryIndexReader.java (working copy) @@ -112,23 +112,32 @@ return (DirectoryIndexReader) finder.doBody(commit.getSegmentsFileName()); } } - - public final synchronized IndexReader reopen() throws CorruptIndexException, IOException { + + protected final synchronized IndexReader doReopenOrClone(final boolean doClone) throws CorruptIndexException, IOException { ensureOpen(); - if (this.hasChanges || this.isCurrent()) { - // this has changes, therefore we have the lock and don't need to reopen - // OR: the index in the directory hasn't changed - nothing to do here - return this; + if (doClone) { + if (hasChanges) + throw new IllegalStateException("cannot clone() a reader with uncommitted changes"); + } else { + if (hasChanges || isCurrent()) + // If we have changes we have the write lock and we + // are already "current", or the index in the + // directory hasn't changed - nothing to do here + return this; } return (DirectoryIndexReader) new SegmentInfos.FindSegmentsFile(directory) { protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException { - SegmentInfos infos = new SegmentInfos(); - infos.read(directory, segmentFileName); - - DirectoryIndexReader newReader = doReopen(infos); + SegmentInfos infos = null; + if (doClone) { + infos = (SegmentInfos) segmentInfos.clone(); + } else { + infos = new SegmentInfos(); + infos.read(directory, segmentFileName); + } + DirectoryIndexReader newReader = doReopenOrClone(infos, doClone); if (DirectoryIndexReader.this != newReader) { newReader.init(directory, infos, closeDirectory); @@ -143,7 +152,7 @@ /** * Re-opens the index using the passed-in SegmentInfos */ - protected abstract DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException; + protected abstract DirectoryIndexReader doReopenOrClone(SegmentInfos infos, boolean doClone) throws CorruptIndexException, IOException; public void setDeletionPolicy(IndexDeletionPolicy deletionPolicy) { this.deletionPolicy = deletionPolicy; Index: src/java/org/apache/lucene/index/FieldsReader.java =================================================================== --- src/java/org/apache/lucene/index/FieldsReader.java (revision 673635) +++ src/java/org/apache/lucene/index/FieldsReader.java (working copy) @@ -130,6 +130,34 @@ } /** + * For use by clone + * @param fieldInfos + * @param cloneableFieldsStream + * @param fieldsStream + * @param indexStream + * @param numTotalDocs + * @param size + * @param format + * @param formatSize + */ + private FieldsReader(FieldInfos fieldInfos, IndexInput cloneableFieldsStream, IndexInput fieldsStream, + IndexInput indexStream, int numTotalDocs, int size, int format, int formatSize) { + this.fieldInfos = fieldInfos; + this.cloneableFieldsStream = cloneableFieldsStream; + this.fieldsStream = fieldsStream; + this.indexStream = indexStream; + this.numTotalDocs = numTotalDocs; + this.size = size; + this.format = format; + this.formatSize = formatSize; + } + + public Object clone() { + return new FieldsReader(fieldInfos, (IndexInput)cloneableFieldsStream.clone(), (IndexInput)cloneableFieldsStream.clone(), + (IndexInput)indexStream.clone(), numTotalDocs, size, format, formatSize); + } + + /** * @throws AlreadyClosedException if this FieldsReader is closed */ protected final void ensureOpen() throws AlreadyClosedException { Index: src/java/org/apache/lucene/index/FilterIndexReader.java =================================================================== --- src/java/org/apache/lucene/index/FilterIndexReader.java (revision 673635) +++ src/java/org/apache/lucene/index/FilterIndexReader.java (working copy) @@ -104,6 +104,10 @@ this.in = in; } + public Object clone() { + return new FilterIndexReader(in); + } + public Directory directory() { return in.directory(); } Index: src/java/org/apache/lucene/index/ParallelReader.java =================================================================== --- src/java/org/apache/lucene/index/ParallelReader.java (revision 673635) +++ src/java/org/apache/lucene/index/ParallelReader.java (working copy) @@ -142,7 +142,7 @@ * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ - public IndexReader reopen() throws CorruptIndexException, IOException { + protected IndexReader doReopenOrClone(boolean doClone) throws CorruptIndexException, IOException { ensureOpen(); boolean reopened = false; @@ -155,7 +155,11 @@ for (int i = 0; i < readers.size(); i++) { IndexReader oldReader = (IndexReader) readers.get(i); - IndexReader newReader = oldReader.reopen(); + final IndexReader newReader; + if (doClone) + newReader = (IndexReader) oldReader.clone(); + else + newReader = oldReader.reopen(); newReaders.add(newReader); // if at least one of the subreaders was updated we remember that // and return a new MultiReader Index: src/java/org/apache/lucene/index/SegmentReader.java =================================================================== --- src/java/org/apache/lucene/index/SegmentReader.java (revision 673635) +++ src/java/org/apache/lucene/index/SegmentReader.java (working copy) @@ -45,12 +45,13 @@ private int readBufferSize; FieldInfos fieldInfos; - private FieldsReader fieldsReader; + private FieldsReader fieldsReaderCloneable; TermInfosReader tis; TermVectorsReader termVectorsReaderOrig = null; - ThreadLocal termVectorsLocal = new ThreadLocal(); - + TermVectorsReaderLocal termVectorsLocal; + FieldsReaderLocal fieldsReaderLocal; + BitVector deletedDocs = null; private boolean deletedDocsDirty = false; private boolean normsDirty = false; @@ -265,6 +266,10 @@ return instance; } + /** So a subclass can do its own initializing. */ + protected void doInitialize() { + } + private void initialize(SegmentInfo si, int readBufferSize, boolean doOpenStores) throws CorruptIndexException, IOException { segment = si.name; this.si = si; @@ -306,12 +311,12 @@ fieldsSegment = segment; if (doOpenStores) { - fieldsReader = new FieldsReader(storeDir, fieldsSegment, fieldInfos, readBufferSize, + fieldsReaderCloneable = new FieldsReader(storeDir, fieldsSegment, fieldInfos, readBufferSize, si.getDocStoreOffset(), si.docCount); - + fieldsReaderLocal = new FieldsReaderLocal(fieldsReaderCloneable); // Verify two sources of "maxDoc" agree: - if (si.getDocStoreOffset() == -1 && fieldsReader.size() != si.docCount) { - throw new CorruptIndexException("doc counts differ for segment " + si.name + ": fieldsReader shows " + fieldsReader.size() + " but segmentInfo shows " + si.docCount); + if (si.getDocStoreOffset() == -1 && fieldsReaderCloneable.size() != si.docCount) { + throw new CorruptIndexException("doc counts differ for segment " + si.name + ": fieldsReader shows " + fieldsReaderCloneable.size() + " but segmentInfo shows " + si.docCount); } } @@ -332,7 +337,9 @@ else vectorsSegment = segment; termVectorsReaderOrig = new TermVectorsReader(storeDir, vectorsSegment, fieldInfos, readBufferSize, si.getDocStoreOffset(), si.docCount); + termVectorsLocal = new TermVectorsReaderLocal(termVectorsReaderOrig); } + doInitialize(); success = true; } finally { @@ -364,26 +371,37 @@ assert si.getDelCount() == 0; } - protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException { + protected synchronized DirectoryIndexReader doReopenOrClone(SegmentInfos infos, boolean doClone) throws CorruptIndexException, IOException { + + if (deletedDocsDirty || normsDirty) { + throw new IllegalStateException("Cannot clone IndexReader with pending changes deletedDocsDirty: "+deletedDocsDirty+" normsDirty: "+normsDirty); + } + DirectoryIndexReader newReader; if (infos.size() == 1) { SegmentInfo si = infos.info(0); if (segment.equals(si.name) && si.getUseCompoundFile() == SegmentReader.this.si.getUseCompoundFile()) { - newReader = reopenSegment(si); + newReader = reopenSegment(si, doClone); } else { // segment not referenced anymore, reopen not possible // or segment format changed newReader = SegmentReader.get(infos, infos.info(0), false); } } else { - return new MultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null); + return new MultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null, doClone); } return newReader; } - synchronized SegmentReader reopenSegment(SegmentInfo si) throws CorruptIndexException, IOException { + protected BitVector cloneDeletedDocs() { + if (this.deletedDocs == null) return null; + BitVector bitVector = new BitVector(this.deletedDocs.getBits(), this.deletedDocs.size(), this.deletedDocs.count()); + return bitVector; + } + + synchronized SegmentReader reopenSegment(SegmentInfo si, boolean doClone) throws CorruptIndexException, IOException { boolean deletionsUpToDate = (this.si.hasDeletions() == si.hasDeletions()) && (!si.hasDeletions() || this.si.getDelFileName().equals(si.getDelFileName())); boolean normsUpToDate = true; @@ -399,13 +417,16 @@ } } - if (normsUpToDate && deletionsUpToDate) { + if ((normsUpToDate && deletionsUpToDate) && !doClone) { return this; } - - // clone reader - SegmentReader clone = new SegmentReader(); + SegmentReader clone; + try { + clone = (SegmentReader)IMPL.newInstance(); + } catch (Exception e) { + throw new RuntimeException("cannot load SegmentReader class: " + e, e); + } boolean success = false; try { clone.directory = directory; @@ -414,13 +435,14 @@ clone.readBufferSize = readBufferSize; clone.cfsReader = cfsReader; clone.storeCFSReader = storeCFSReader; - + clone.fieldsReaderCloneable = fieldsReaderCloneable; clone.fieldInfos = fieldInfos; clone.tis = tis; clone.freqStream = freqStream; clone.proxStream = proxStream; clone.termVectorsReaderOrig = termVectorsReaderOrig; - + clone.fieldsReaderLocal = fieldsReaderLocal; + clone.termVectorsLocal = termVectorsLocal; // we have to open a new FieldsReader, because it is not thread-safe // and can thus not be shared among multiple SegmentReaders @@ -440,19 +462,19 @@ storeDir = cfsReader; } } - - if (fieldsReader != null) { - clone.fieldsReader = new FieldsReader(storeDir, fieldsSegment, fieldInfos, readBufferSize, - si.getDocStoreOffset(), si.docCount); - } - if (!deletionsUpToDate) { // load deleted docs clone.deletedDocs = null; clone.loadDeletedDocs(); } else { - clone.deletedDocs = this.deletedDocs; + if (doClone) { + if (this.deletedDocs != null) { + clone.deletedDocs = cloneDeletedDocs(); + } + } else { + clone.deletedDocs = this.deletedDocs; + } } clone.norms = new HashMap(); @@ -553,10 +575,6 @@ undeleteAll = false; } - FieldsReader getFieldsReader() { - return fieldsReader; - } - protected void doClose() throws IOException { boolean hasReferencedReader = (referencedSegmentReader != null); @@ -576,17 +594,14 @@ singleNormStream = null; } - // re-opened SegmentReaders have their own instance of FieldsReader - if (fieldsReader != null) { - fieldsReader.close(); - } - if (!hasReferencedReader) { // close everything, nothing is shared anymore with other readers if (tis != null) { tis.close(); } - + if (fieldsReaderCloneable != null) { + fieldsReaderCloneable.close(); + } if (freqStream != null) freqStream.close(); if (proxStream != null) @@ -661,11 +676,12 @@ * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ - public synchronized Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException { + public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException { ensureOpen(); if (isDeleted(n)) throw new IllegalArgumentException ("attempt to access a deleted document"); + FieldsReader fieldsReader = getFieldsReader(); return fieldsReader.doc(n, fieldSelector); } @@ -910,16 +926,43 @@ } /** + * Create a clone from the initial FieldsReader and store it in the ThreadLocal. + * @return FieldsReader + */ + FieldsReader getFieldsReader() { + return (FieldsReader) fieldsReaderLocal.get(); + } + + public static class FieldsReaderLocal extends ThreadLocal { + private FieldsReader fieldsReaderCloneable; + + public FieldsReaderLocal(FieldsReader fieldsReaderCloneable) { + this.fieldsReaderCloneable = fieldsReaderCloneable; + } + + protected Object initialValue() { + return fieldsReaderCloneable.clone(); + } + } + + public static class TermVectorsReaderLocal extends ThreadLocal { + private TermVectorsReader termVectorsReaderOrig; + + public TermVectorsReaderLocal(TermVectorsReader termVectorsReaderOrig) { + this.termVectorsReaderOrig = termVectorsReaderOrig; + } + + protected Object initialValue() { + return termVectorsReaderOrig.clone(); + } + } + + /** * Create a clone from the initial TermVectorsReader and store it in the ThreadLocal. * @return TermVectorsReader */ private TermVectorsReader getTermVectorsReader() { - TermVectorsReader tvReader = (TermVectorsReader)termVectorsLocal.get(); - if (tvReader == null) { - tvReader = (TermVectorsReader)termVectorsReaderOrig.clone(); - termVectorsLocal.set(tvReader); - } - return tvReader; + return (TermVectorsReader) termVectorsLocal.get(); } /** Return a term frequency vector for the specified document and field. The Index: src/java/org/apache/lucene/index/IndexReader.java =================================================================== --- src/java/org/apache/lucene/index/IndexReader.java (revision 673635) +++ src/java/org/apache/lucene/index/IndexReader.java (working copy) @@ -54,7 +54,7 @@ @version $Id$ */ -public abstract class IndexReader { +public abstract class IndexReader implements Cloneable { /** * Constants describing field properties, for example used for @@ -270,9 +270,21 @@ * @throws IOException if there is a low-level IO error */ public synchronized IndexReader reopen() throws CorruptIndexException, IOException { - throw new UnsupportedOperationException("This reader does not support reopen()."); + return doReopenOrClone(false); } + + public Object clone() { + try { + return doReopenOrClone(true); + } catch (Exception exception) { + throw new RuntimeException(exception); + } + } + protected IndexReader doReopenOrClone(boolean forced) throws CorruptIndexException, IOException { + throw new UnsupportedOperationException("This reader does not support reopen() or clone()"); + } + /** * Returns the directory associated with this index. The Default * implementation returns the directory specified by subclasses when Index: src/java/org/apache/lucene/index/MultiSegmentReader.java =================================================================== --- src/java/org/apache/lucene/index/MultiSegmentReader.java (revision 673635) +++ src/java/org/apache/lucene/index/MultiSegmentReader.java (working copy) @@ -70,7 +70,7 @@ } /** This contructor is only used for {@link #reopen()} */ - MultiSegmentReader(Directory directory, SegmentInfos infos, boolean closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Map oldNormsCache) throws IOException { + MultiSegmentReader(Directory directory, SegmentInfos infos, boolean closeDirectory, SegmentReader[] oldReaders, int[] oldStarts, Map oldNormsCache, boolean doClone) throws IOException { super(directory, infos, closeDirectory); // we put the old SegmentReaders in a map, that allows us @@ -108,7 +108,7 @@ // this is a new reader; in case we hit an exception we can close it safely newReader = SegmentReader.get(infos.info(i)); } else { - newReader = (SegmentReader) newReaders[i].reopenSegment(infos.info(i)); + newReader = (SegmentReader) newReaders[i].reopenSegment(infos.info(i), doClone); } if (newReader == newReaders[i]) { // this reader will be shared between the old and the new one, @@ -193,14 +193,14 @@ starts[subReaders.length] = maxDoc; } - protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException { + protected synchronized DirectoryIndexReader doReopenOrClone(SegmentInfos infos, boolean doClone) throws CorruptIndexException, IOException { if (infos.size() == 1) { // The index has only one segment now, so we can't refresh the MultiSegmentReader. // Return a new SegmentReader instead SegmentReader newReader = SegmentReader.get(infos, infos.info(0), false); return newReader; } else { - return new MultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache); + return new MultiSegmentReader(directory, infos, closeDirectory, subReaders, starts, normsCache, doClone); } }