Index: lucene/CHANGES.txt =================================================================== --- lucene/CHANGES.txt (revision 984234) +++ lucene/CHANGES.txt (working copy) @@ -555,6 +555,12 @@ * LUCENE-2589: Add a VariableSizedIntIndexInput, which, when used w/ Sep*, makes it simple to take any variable sized int block coders (like Simple9/16) and use them in a codec. (Mike McCandless) + +* LUCENE-2597: Add oal.index.SlowMultiReaderWrapper, to wrap a + composite reader (eg MultiReader or DirectoryReader), making it + pretend it's an atomic reader. This is a convenience class (you can + use MultiFields static methods directly, instead) if you need to use + the flex APIs directly on a composite reader. (Mike McCandless) Optimizations Index: lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java (revision 984234) +++ lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java (working copy) @@ -17,27 +17,33 @@ * limitations under the License. */ +import java.util.Random; import java.io.IOException; -import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockRAMDirectory; -import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.OpenBitSet; import org.apache.lucene.util.OpenBitSetDISI; public class TestCachingWrapperFilter extends LuceneTestCase { + Random rand; + + @Override + public void setUp() throws Exception { + super.setUp(); + rand = newRandom(); + } + public void testCachingWorks() throws Exception { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(rand, dir); writer.close(); IndexReader reader = IndexReader.open(dir, true); @@ -62,8 +68,7 @@ public void testNullDocIdSet() throws Exception { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(rand, dir); writer.close(); IndexReader reader = IndexReader.open(dir, true); @@ -84,8 +89,7 @@ public void testNullDocIdSetIterator() throws Exception { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(rand, dir); writer.close(); IndexReader reader = IndexReader.open(dir, true); @@ -125,19 +129,21 @@ public void testIsCacheAble() throws Exception { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(rand, dir); + writer.addDocument(new Document()); writer.close(); IndexReader reader = IndexReader.open(dir, true); + IndexReader slowReader = SlowMultiReaderWrapper.wrap(reader); // not cacheable: - assertDocIdSetCacheable(reader, new QueryWrapperFilter(new TermQuery(new Term("test","value"))), false); + assertDocIdSetCacheable(slowReader, new QueryWrapperFilter(new TermQuery(new Term("test","value"))), false); // returns default empty docidset, always cacheable: - assertDocIdSetCacheable(reader, NumericRangeFilter.newIntRange("test", Integer.valueOf(10000), Integer.valueOf(-10000), true, true), true); + assertDocIdSetCacheable(slowReader, NumericRangeFilter.newIntRange("test", Integer.valueOf(10000), Integer.valueOf(-10000), true, true), true); // is cacheable: - assertDocIdSetCacheable(reader, FieldCacheRangeFilter.newIntRange("test", Integer.valueOf(10), Integer.valueOf(20), true, true), true); + assertDocIdSetCacheable(slowReader, FieldCacheRangeFilter.newIntRange("test", Integer.valueOf(10), Integer.valueOf(20), true, true), true); // a openbitset filter is always cacheable - assertDocIdSetCacheable(reader, new Filter() { + assertDocIdSetCacheable(slowReader, new Filter() { @Override public DocIdSet getDocIdSet(IndexReader reader) { return new OpenBitSet(); @@ -149,8 +155,13 @@ public void testEnforceDeletions() throws Exception { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); - IndexReader reader = writer.getReader(); + RandomIndexWriter writer = new RandomIndexWriter(rand, dir); + + // NOTE: cannot use writer.getReader because RIW (on + // flipping a coin) may give us a newly opened reader, + // but we use .reopen on this reader below and expect to + // (must) get an NRT reader: + IndexReader reader = writer.w.getReader(); IndexSearcher searcher = new IndexSearcher(reader); // add a doc, refresh the reader, and check that its there Index: lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java (revision 984234) +++ lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java (working copy) @@ -36,6 +36,7 @@ import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.index.Term; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.store.MockRAMDirectory; @@ -266,7 +267,7 @@ // only one doc has "a" assertEquals(tp.NO_MORE_DOCS, tp.nextDoc()); - IndexSearcher is = new IndexSearcher(r); + IndexSearcher is = new IndexSearcher(SlowMultiReaderWrapper.wrap(r)); SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a")); SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k")); Index: lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java (revision 984234) +++ lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java (working copy) @@ -35,10 +35,10 @@ import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Payload; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; +import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -130,7 +130,7 @@ reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = new IndexSearcher(SlowMultiReaderWrapper.wrap(reader)); searcher.setSimilarity(similarity); } Index: lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java =================================================================== --- lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java (revision 984234) +++ lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java (working copy) @@ -26,6 +26,8 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.util.English; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Similarity; import static org.apache.lucene.util.LuceneTestCaseJ4.TEST_VERSION_CURRENT; @@ -46,6 +48,8 @@ public static final String MULTI_FIELD = "multiField"; public static final String FIELD = "field"; + public IndexReader reader; + public final class PayloadAnalyzer extends Analyzer { @@ -106,6 +110,8 @@ public IndexSearcher setUp(Similarity similarity, int numDocs) throws IOException { MockRAMDirectory directory = new MockRAMDirectory(); PayloadAnalyzer analyzer = new PayloadAnalyzer(); + + // TODO randomize this IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setSimilarity(similarity)); // writer.infoStream = System.out; @@ -116,11 +122,15 @@ doc.add(new Field(NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(doc); } - //writer.optimize(); + reader = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(directory, true); + IndexSearcher searcher = new IndexSearcher(SlowMultiReaderWrapper.wrap(reader)); searcher.setSimilarity(similarity); return searcher; } + + public void tearDown() throws Exception { + reader.close(); + } } Index: lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (revision 984234) +++ lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (working copy) @@ -21,6 +21,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.queryParser.QueryParser; @@ -62,7 +63,7 @@ } reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = new IndexSearcher(SlowMultiReaderWrapper.wrap(reader)); } protected String[] docFields = { Index: lucene/src/test/org/apache/lucene/search/spans/TestBasics.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestBasics.java (revision 984234) +++ lucene/src/test/org/apache/lucene/search/spans/TestBasics.java (working copy) @@ -25,6 +25,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -69,7 +70,7 @@ writer.addDocument(doc); } reader = writer.getReader(); - searcher = new IndexSearcher(reader); + searcher = new IndexSearcher(SlowMultiReaderWrapper.wrap(reader)); writer.close(); } Index: lucene/src/test/org/apache/lucene/search/spans/TestSpans.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestSpans.java (revision 984234) +++ lucene/src/test/org/apache/lucene/search/spans/TestSpans.java (working copy) @@ -26,12 +26,12 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.Searcher; -import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -197,7 +197,7 @@ makeSpanTermQuery("t3") }, slop, ordered); - Spans spans = snq.getSpans(searcher.getIndexReader()); + Spans spans = snq.getSpans(SlowMultiReaderWrapper.wrap(searcher.getIndexReader())); assertTrue("first range", spans.next()); assertEquals("first doc", 11, spans.doc()); @@ -223,7 +223,7 @@ makeSpanTermQuery("u2") }, 0, false); - Spans spans = snq.getSpans(searcher.getIndexReader()); + Spans spans = snq.getSpans(SlowMultiReaderWrapper.wrap(searcher.getIndexReader())); assertTrue("Does not have next and it should", spans.next()); assertEquals("doc", 4, spans.doc()); assertEquals("start", 1, spans.start()); @@ -259,7 +259,7 @@ }, 1, false); - spans = snq.getSpans(searcher.getIndexReader()); + spans = snq.getSpans(SlowMultiReaderWrapper.wrap(searcher.getIndexReader())); assertTrue("Does not have next and it should", spans.next()); assertEquals("doc", 4, spans.doc()); assertEquals("start", 0, spans.start()); @@ -317,7 +317,7 @@ for (int i = 0; i < terms.length; i++) { sqa[i] = makeSpanTermQuery(terms[i]); } - return (new SpanOrQuery(sqa)).getSpans(searcher.getIndexReader()); + return (new SpanOrQuery(sqa)).getSpans(SlowMultiReaderWrapper.wrap(searcher.getIndexReader())); } private void tstNextSpans(Spans spans, int doc, int start, int end) @@ -422,7 +422,7 @@ } }; - Scorer spanScorer = snq.weight(searcher).scorer(searcher.getIndexReader(), true, false); + Scorer spanScorer = snq.weight(searcher).scorer(SlowMultiReaderWrapper.wrap(searcher.getIndexReader()), true, false); assertTrue("first doc", spanScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertEquals("first doc number", spanScorer.docID(), 11); Index: lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java (revision 984234) +++ lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java (working copy) @@ -24,6 +24,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.index.Term; import org.apache.lucene.search.CheckHits; import org.apache.lucene.search.IndexSearcher; @@ -112,7 +113,7 @@ field("last", "jones") })); reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(reader); + searcher = new IndexSearcher(SlowMultiReaderWrapper.wrap(reader)); } @Override Index: lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (revision 984234) +++ lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (working copy) @@ -22,9 +22,9 @@ import java.util.Collection; import java.util.HashSet; import java.util.Set; +import java.util.Random; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; @@ -34,9 +34,9 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.index.Payload; import org.apache.lucene.index.Term; import org.apache.lucene.search.DefaultSimilarity; @@ -54,6 +54,8 @@ private IndexSearcher searcher; private Similarity similarity = new DefaultSimilarity(); protected IndexReader indexReader; + private IndexReader closeIndexReader; + private Random rand; public TestPayloadSpans(String s) { super(s); @@ -62,6 +64,7 @@ @Override protected void setUp() throws Exception { super.setUp(); + rand = newRandom(); PayloadHelper helper = new PayloadHelper(); searcher = helper.setUp(similarity, 1000); indexReader = searcher.getIndexReader(); @@ -109,27 +112,22 @@ clauses[1] = new SpanTermQuery(new Term(PayloadHelper.FIELD, "three")); SpanQuery spq = new SpanNearQuery(clauses, 5, true); SpanNotQuery snq = new SpanNotQuery(spq, new SpanTermQuery(new Term(PayloadHelper.FIELD, "two"))); - checkSpans(snq.getSpans(getSpanNotSearcher().getIndexReader()), 1,new int[]{2}); - } - - public IndexSearcher getSpanNotSearcher() - throws IOException { + + + MockRAMDirectory directory = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( - TEST_VERSION_CURRENT, new PayloadAnalyzer()).setSimilarity( - similarity)); + RandomIndexWriter writer = new RandomIndexWriter(rand, directory, + newIndexWriterConfig(rand, TEST_VERSION_CURRENT, new PayloadAnalyzer()).setSimilarity(similarity)); Document doc = new Document(); doc.add(new Field(PayloadHelper.FIELD, "one two three one four three", Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(doc); - + IndexReader reader = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(directory, true); - searcher.setSimilarity(similarity); - return searcher; - + checkSpans(snq.getSpans(SlowMultiReaderWrapper.wrap(reader)), 1,new int[]{2}); + reader.close(); } public void testNestedSpans() throws Exception { @@ -185,6 +183,7 @@ spans = nestedSpanNearQuery.getSpans(searcher.getIndexReader()); assertTrue("spans is null and it shouldn't be", spans != null); checkSpans(spans, 2, new int[]{3,3}); + closeIndexReader.close(); } public void testFirstClauseWithoutPayload() throws Exception { @@ -215,6 +214,7 @@ spans = nestedSpanNearQuery.getSpans(searcher.getIndexReader()); assertTrue("spans is null and it shouldn't be", spans != null); checkSpans(spans, 1, new int[]{3}); + closeIndexReader.close(); } public void testHeavilyNestedSpanQuery() throws Exception { @@ -250,20 +250,23 @@ spans = nestedSpanNearQuery.getSpans(searcher.getIndexReader()); assertTrue("spans is null and it shouldn't be", spans != null); checkSpans(spans, 2, new int[]{8, 8}); + closeIndexReader.close(); } public void testShrinkToAfterShortestMatch() throws CorruptIndexException, LockObtainFailedException, IOException { MockRAMDirectory directory = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( - TEST_VERSION_CURRENT, new TestPayloadAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(rand, directory, + newIndexWriterConfig(rand, TEST_VERSION_CURRENT, new TestPayloadAnalyzer())); + Document doc = new Document(); doc.add(new Field("content", new StringReader("a b c d e f g h i j a k"))); writer.addDocument(doc); + + IndexReader reader = writer.getReader(); + IndexSearcher is = new IndexSearcher(SlowMultiReaderWrapper.wrap(reader)); writer.close(); - IndexSearcher is = new IndexSearcher(directory, true); - SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a")); SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k")); SpanQuery[] sqs = { stq1, stq2 }; @@ -284,20 +287,22 @@ assertEquals(2, payloadSet.size()); assertTrue(payloadSet.contains("a:Noise:10")); assertTrue(payloadSet.contains("k:Noise:11")); + reader.close(); } public void testShrinkToAfterShortestMatch2() throws CorruptIndexException, LockObtainFailedException, IOException { MockRAMDirectory directory = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( - TEST_VERSION_CURRENT, new TestPayloadAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(rand, directory, + newIndexWriterConfig(rand, TEST_VERSION_CURRENT, new TestPayloadAnalyzer())); + Document doc = new Document(); doc.add(new Field("content", new StringReader("a b a d k f a h i k a k"))); writer.addDocument(doc); + IndexReader reader = writer.getReader(); + IndexSearcher is = new IndexSearcher(SlowMultiReaderWrapper.wrap(reader)); writer.close(); - IndexSearcher is = new IndexSearcher(directory, true); - SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a")); SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k")); SpanQuery[] sqs = { stq1, stq2 }; @@ -317,20 +322,22 @@ assertEquals(2, payloadSet.size()); assertTrue(payloadSet.contains("a:Noise:10")); assertTrue(payloadSet.contains("k:Noise:11")); + reader.close(); } public void testShrinkToAfterShortestMatch3() throws CorruptIndexException, LockObtainFailedException, IOException { MockRAMDirectory directory = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( - TEST_VERSION_CURRENT, new TestPayloadAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(rand, directory, + newIndexWriterConfig(rand, TEST_VERSION_CURRENT, new TestPayloadAnalyzer())); + Document doc = new Document(); doc.add(new Field("content", new StringReader("j k a l f k k p a t a k l k t a"))); writer.addDocument(doc); + IndexReader reader = writer.getReader(); + IndexSearcher is = new IndexSearcher(SlowMultiReaderWrapper.wrap(reader)); writer.close(); - IndexSearcher is = new IndexSearcher(directory, true); - SpanTermQuery stq1 = new SpanTermQuery(new Term("content", "a")); SpanTermQuery stq2 = new SpanTermQuery(new Term("content", "k")); SpanQuery[] sqs = { stq1, stq2 }; @@ -356,23 +363,23 @@ } assertTrue(payloadSet.contains("a:Noise:10")); assertTrue(payloadSet.contains("k:Noise:11")); + reader.close(); } public void testPayloadSpanUtil() throws Exception { MockRAMDirectory directory = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( - TEST_VERSION_CURRENT, new PayloadAnalyzer()).setSimilarity( - similarity)); + RandomIndexWriter writer = new RandomIndexWriter(rand, directory, + newIndexWriterConfig(rand, TEST_VERSION_CURRENT, new PayloadAnalyzer()).setSimilarity(similarity)); + Document doc = new Document(); doc.add(new Field(PayloadHelper.FIELD,"xx rr yy mm pp", Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(doc); + IndexReader reader = writer.getReader(); writer.close(); + IndexSearcher searcher = new IndexSearcher(SlowMultiReaderWrapper.wrap(reader)); - IndexSearcher searcher = new IndexSearcher(directory, true); - - IndexReader reader = searcher.getIndexReader(); - PayloadSpanUtil psu = new PayloadSpanUtil(reader); + PayloadSpanUtil psu = new PayloadSpanUtil(searcher.getIndexReader()); Collection payloads = psu.getPayloadsForQuery(new TermQuery(new Term(PayloadHelper.FIELD, "rr"))); if(VERBOSE) @@ -381,7 +388,7 @@ if(VERBOSE) System.out.println(new String(bytes)); } - + reader.close(); } private void checkSpans(Spans spans, int expectedNumSpans, int expectedNumPayloads, @@ -420,8 +427,8 @@ private IndexSearcher getSearcher() throws Exception { MockRAMDirectory directory = new MockRAMDirectory(); String[] docs = new String[]{"xx rr yy mm pp","xx yy mm rr pp", "nopayload qq ss pp np", "one two three four five six seven eight nine ten eleven", "nine one two three four five six seven eight eleven ten"}; - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( - TEST_VERSION_CURRENT, new PayloadAnalyzer()).setSimilarity(similarity)); + RandomIndexWriter writer = new RandomIndexWriter(rand, directory, + newIndexWriterConfig(rand, TEST_VERSION_CURRENT, new PayloadAnalyzer()).setSimilarity(similarity)); Document doc = null; for(int i = 0; i < docs.length; i++) { @@ -431,9 +438,10 @@ writer.addDocument(doc); } + closeIndexReader = writer.getReader(); writer.close(); - IndexSearcher searcher = new IndexSearcher(directory, true); + IndexSearcher searcher = new IndexSearcher(SlowMultiReaderWrapper.wrap(closeIndexReader)); return searcher; } Index: lucene/src/test/org/apache/lucene/search/TestTermScorer.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestTermScorer.java (revision 984234) +++ lucene/src/test/org/apache/lucene/search/TestTermScorer.java (working copy) @@ -27,6 +27,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; +import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.store.MockRAMDirectory; public class TestTermScorer extends LuceneTestCase { @@ -57,7 +58,7 @@ } indexReader = writer.getReader(); writer.close(); - indexSearcher = new IndexSearcher(indexReader); + indexSearcher = new IndexSearcher(SlowMultiReaderWrapper.wrap(indexReader)); } @Override Index: lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java (revision 984234) +++ lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java (working copy) @@ -22,6 +22,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; @@ -147,7 +148,7 @@ r = writer.getReader(); writer.close(); - s = new IndexSearcher(r); + s = new IndexSearcher(SlowMultiReaderWrapper.wrap(r)); s.setSimilarity(sim); } @@ -167,7 +168,7 @@ QueryUtils.check(dq, s); final Weight dw = dq.weight(s); - final Scorer ds = dw.scorer(r, true, false); + final Scorer ds = dw.scorer(s.getIndexReader(), true, false); final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS; if (skipOk) { fail("firsttime skipTo found a match? ... " @@ -183,7 +184,7 @@ QueryUtils.check(dq, s); final Weight dw = dq.weight(s); - final Scorer ds = dw.scorer(r, true, false); + final Scorer ds = dw.scorer(s.getIndexReader(), true, false); assertTrue("firsttime skipTo found no match", ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS); assertEquals("found wrong docid", "d4", r.document(ds.docID()).get("id")); Index: lucene/src/test/org/apache/lucene/search/TestSpanQueryFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestSpanQueryFilter.java (revision 984234) +++ lucene/src/test/org/apache/lucene/search/TestSpanQueryFilter.java (working copy) @@ -22,6 +22,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.index.Term; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.store.Directory; @@ -50,7 +51,7 @@ SpanTermQuery query = new SpanTermQuery(new Term("field", English.intToEnglish(10).trim())); SpanQueryFilter filter = new SpanQueryFilter(query); - SpanFilterResult result = filter.bitSpans(reader); + SpanFilterResult result = filter.bitSpans(SlowMultiReaderWrapper.wrap(reader)); DocIdSet docIdSet = result.getDocIdSet(); assertTrue("docIdSet is null and it shouldn't be", docIdSet != null); assertContainsDocId("docIdSet doesn't contain docId 10", docIdSet, 10); Index: lucene/src/test/org/apache/lucene/index/RandomIndexWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/RandomIndexWriter.java (revision 984234) +++ lucene/src/test/org/apache/lucene/index/RandomIndexWriter.java (working copy) @@ -90,6 +90,9 @@ public void addDocument(Document doc) throws IOException { w.addDocument(doc); if (docCount++ == flushAt) { + if (LuceneTestCaseJ4.VERBOSE) { + System.out.println("RIW.addDocument: now doing a commit"); + } w.commit(); flushAt += _TestUtil.nextInt(r, 10, 1000); } Index: lucene/src/java/org/apache/lucene/search/spans/SpanTermQuery.java =================================================================== --- lucene/src/java/org/apache/lucene/search/spans/SpanTermQuery.java (revision 984234) +++ lucene/src/java/org/apache/lucene/search/spans/SpanTermQuery.java (working copy) @@ -20,8 +20,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.index.DocsAndPositionsEnum; -import org.apache.lucene.index.MultiFields; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.ToStringUtils; import java.io.IOException; @@ -83,17 +81,14 @@ @Override public Spans getSpans(final IndexReader reader) throws IOException { - // NOTE: debateably, the caller should never pass in a - // multi reader... - final DocsAndPositionsEnum postings = MultiFields.getTermPositionsEnum(reader, - MultiFields.getDeletedDocs(reader), - term.field(), - term.bytes()); + final DocsAndPositionsEnum postings = reader.termPositionsEnum(reader.getDeletedDocs(), + term.field(), + term.bytes()); if (postings != null) { return new TermSpans(postings, term); } else { - if (MultiFields.getTermDocsEnum(reader, MultiFields.getDeletedDocs(reader), term.field(), term.bytes()) != null) { + if (reader.termDocsEnum(reader.getDeletedDocs(), term.field(), term.bytes()) != null) { // term does exist, but has no positions throw new IllegalStateException("field \"" + term.field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run SpanTermQuery (term=" + term.text() + ")"); } else { Index: lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java =================================================================== --- lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java (revision 984234) +++ lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java (working copy) @@ -23,7 +23,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.index.DocsEnum; -import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.ToStringUtils; @@ -171,7 +170,7 @@ if (termArrays.size() == 0) // optimize zero-term case return null; - final Bits delDocs = MultiFields.getDeletedDocs(reader); + final Bits delDocs = reader.getDeletedDocs(); PhraseQuery.PostingsAndFreq[] postingsFreqs = new PhraseQuery.PostingsAndFreq[termArrays.size()]; @@ -191,22 +190,22 @@ docFreq += reader.docFreq(terms[termIdx]); } } else { - final BytesRef text = new BytesRef(terms[0].text()); + final Term term = terms[0]; postingsEnum = reader.termPositionsEnum(delDocs, - terms[0].field(), - text); + term.field(), + term.bytes()); if (postingsEnum == null) { - if (MultiFields.getTermDocsEnum(reader, delDocs, terms[0].field(), text) != null) { + if (reader.termDocsEnum(delDocs, term.field(), term.bytes()) != null) { // term does exist, but has no positions - throw new IllegalStateException("field \"" + terms[0].field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run PhraseQuery (term=" + terms[0].text() + ")"); + throw new IllegalStateException("field \"" + term.field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run PhraseQuery (term=" + term.text() + ")"); } else { // term does not exist return null; } } - docFreq = reader.docFreq(terms[0].field(), text); + docFreq = reader.docFreq(term.field(), term.bytes()); } postingsFreqs[pos] = new PhraseQuery.PostingsAndFreq(postingsEnum, docFreq, positions.get(pos).intValue()); @@ -497,7 +496,7 @@ public UnionDocsAndPositionsEnum(IndexReader indexReader, Term[] terms) throws IOException { List docsEnums = new LinkedList(); - final Bits delDocs = MultiFields.getDeletedDocs(indexReader); + final Bits delDocs = indexReader.getDeletedDocs(); for (int i = 0; i < terms.length; i++) { DocsAndPositionsEnum postings = indexReader.termPositionsEnum(delDocs, terms[i].field(), @@ -505,7 +504,7 @@ if (postings != null) { docsEnums.add(postings); } else { - if (MultiFields.getTermDocsEnum(indexReader, delDocs, terms[i].field(), terms[i].bytes()) != null) { + if (indexReader.termDocsEnum(delDocs, terms[i].field(), terms[i].bytes()) != null) { // term does exist, but has no positions throw new IllegalStateException("field \"" + terms[i].field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run PhraseQuery (term=" + terms[i].text() + ")"); } Index: lucene/src/java/org/apache/lucene/search/PrefixQuery.java =================================================================== --- lucene/src/java/org/apache/lucene/search/PrefixQuery.java (revision 984234) +++ lucene/src/java/org/apache/lucene/search/PrefixQuery.java (working copy) @@ -48,6 +48,8 @@ protected TermsEnum getTermsEnum(IndexReader reader) throws IOException { if (prefix.bytes().length == 0) { // no prefix -- match all terms for this field: + // NOTE: for now, MultiTermQuery enums terms at the + // MultiReader level, so we must use MultiFields here: final Terms terms = MultiFields.getTerms(reader, getField()); return (terms != null) ? terms.iterator() : TermsEnum.EMPTY; } Index: lucene/src/java/org/apache/lucene/search/PhraseQuery.java =================================================================== --- lucene/src/java/org/apache/lucene/search/PhraseQuery.java (revision 984234) +++ lucene/src/java/org/apache/lucene/search/PhraseQuery.java (working copy) @@ -23,10 +23,8 @@ import java.util.Arrays; import org.apache.lucene.index.Term; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; import org.apache.lucene.search.Explanation.IDFExplanation; import org.apache.lucene.util.ToStringUtils; import org.apache.lucene.util.Bits; @@ -181,17 +179,16 @@ return null; PostingsAndFreq[] postingsFreqs = new PostingsAndFreq[terms.size()]; - final Bits delDocs = MultiFields.getDeletedDocs(reader); + final Bits delDocs = reader.getDeletedDocs(); for (int i = 0; i < terms.size(); i++) { final Term t = terms.get(i); - DocsAndPositionsEnum postingsEnum = MultiFields.getTermPositionsEnum(reader, - delDocs, - t.field(), - t.bytes()); + DocsAndPositionsEnum postingsEnum = reader.termPositionsEnum(delDocs, + t.field(), + t.bytes()); // PhraseQuery on a field that did not index // positions. if (postingsEnum == null) { - if (MultiFields.getTermDocsEnum(reader, delDocs, t.field(), t.bytes()) != null) { + if (reader.termDocsEnum(delDocs, t.field(), t.bytes()) != null) { // term does exist, but has no positions throw new IllegalStateException("field \"" + t.field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run PhraseQuery (term=" + t.text() + ")"); } else { Index: lucene/src/java/org/apache/lucene/search/TermRangeQuery.java =================================================================== --- lucene/src/java/org/apache/lucene/search/TermRangeQuery.java (revision 984234) +++ lucene/src/java/org/apache/lucene/search/TermRangeQuery.java (working copy) @@ -135,8 +135,8 @@ return TermsEnum.EMPTY; } if ((lowerTerm == null || (collator == null && includeLower && "".equals(lowerTerm))) && upperTerm == null) { - // NOTE: debateably, the caller should never pass in a - // multi reader... + // NOTE: for now, MultiTermQuery enums terms at the + // MultiReader level, so we must use MultiFields here: final Terms terms = MultiFields.getTerms(reader, field); return (terms != null) ? terms.iterator() : null; } Index: lucene/src/java/org/apache/lucene/search/TermQuery.java =================================================================== --- lucene/src/java/org/apache/lucene/search/TermQuery.java (revision 984234) +++ lucene/src/java/org/apache/lucene/search/TermQuery.java (working copy) @@ -22,8 +22,6 @@ import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.Term; -import org.apache.lucene.index.MultiFields; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.Explanation.IDFExplanation; import org.apache.lucene.util.ToStringUtils; @@ -73,9 +71,10 @@ @Override public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { - // NOTE: debateably, the caller should never pass in a - // multi reader... - DocsEnum docs = MultiFields.getTermDocsEnum(reader, MultiFields.getDeletedDocs(reader), term.field(), term.bytes()); + DocsEnum docs = reader.termDocsEnum(reader.getDeletedDocs(), + term.field(), + term.bytes()); + if (docs == null) { return null; } @@ -118,7 +117,7 @@ Explanation tfExplanation = new Explanation(); int tf = 0; - DocsEnum docs = reader.termDocsEnum(MultiFields.getDeletedDocs(reader), term.field(), term.bytes()); + DocsEnum docs = reader.termDocsEnum(reader.getDeletedDocs(), term.field(), term.bytes()); if (docs != null) { int newDoc = docs.advance(doc); if (newDoc == doc) { Index: lucene/src/java/org/apache/lucene/search/AutomatonQuery.java =================================================================== --- lucene/src/java/org/apache/lucene/search/AutomatonQuery.java (revision 984234) +++ lucene/src/java/org/apache/lucene/search/AutomatonQuery.java (working copy) @@ -92,6 +92,8 @@ // matches all possible strings if (BasicOperations.isTotal(automaton)) { + // NOTE: for now, MultiTermQuery enums terms at the + // MultiReader level, so we must use MultiFields here: return MultiFields.getTerms(reader, getField()).iterator(); } Index: lucene/src/java/org/apache/lucene/index/IndexWriter.java =================================================================== --- lucene/src/java/org/apache/lucene/index/IndexWriter.java (revision 984234) +++ lucene/src/java/org/apache/lucene/index/IndexWriter.java (working copy) @@ -4267,6 +4267,21 @@ } } + // called only from assert + private boolean filesExist(SegmentInfos toSync) throws IOException { + Collection files = toSync.files(directory, false); + for(final String fileName: files) { + assert directory.fileExists(fileName): "file " + fileName + " does not exist"; + // If this trips it means we are missing a call to + // .checkpoint somewhere, because by the time we + // are called, deleter should know about every + // file referenced by the current head + // segmentInfos: + assert deleter.exists(fileName); + } + return true; + } + /** Walk through all files referenced by the current * segmentInfos and ask the Directory to sync each file, * if it wasn't already. If that succeeds, then we @@ -4275,10 +4290,8 @@ private void startCommit(long sizeInBytes, Map commitUserData) throws IOException { assert testPoint("startStartCommit"); + assert pendingCommit == null; - // TODO: as of LUCENE-2095, we can simplify this method, - // since only 1 thread can be in here at once - if (hitOOM) { throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot commit"); } @@ -4288,7 +4301,7 @@ if (infoStream != null) message("startCommit(): start sizeInBytes=" + sizeInBytes); - SegmentInfos toSync = null; + final SegmentInfos toSync; final long myChangeCount; synchronized(this) { @@ -4303,9 +4316,7 @@ // First, we clone & incref the segmentInfos we intend // to sync, then, without locking, we sync() each file - // referenced by toSync, in the background. Multiple - // threads can be doing this at once, if say a large - // merge and a small merge finish at the same time: + // referenced by toSync, in the background. if (infoStream != null) message("startCommit index=" + segString(segmentInfos) + " changeCount=" + changeCount); @@ -4319,78 +4330,41 @@ deleter.incRef(toSync, false); myChangeCount = changeCount; - - Collection files = toSync.files(directory, false); - for(final String fileName: files) { - assert directory.fileExists(fileName): "file " + fileName + " does not exist"; - // If this trips it means we are missing a call to - // .checkpoint somewhere, because by the time we - // are called, deleter should know about every - // file referenced by the current head - // segmentInfos: - assert deleter.exists(fileName); - } + assert filesExist(toSync); } assert testPoint("midStartCommit"); - boolean setPending = false; - try { + // This call can take a long time -- 10s of seconds + // or more: directory.sync(toSync.files(directory, false)); assert testPoint("midStartCommit2"); synchronized(this) { - // If someone saved a newer version of segments file - // since I first started syncing my version, I can - // safely skip saving myself since I've been - // superseded: - while(true) { - if (myChangeCount <= lastCommitChangeCount) { - if (infoStream != null) { - message("sync superseded by newer infos"); - } - break; - } else if (pendingCommit == null) { - // My turn to commit + assert pendingCommit == null; - if (segmentInfos.getGeneration() > toSync.getGeneration()) - toSync.updateGeneration(segmentInfos); + if (segmentInfos.getGeneration() > toSync.getGeneration()) + toSync.updateGeneration(segmentInfos); - boolean success = false; - try { - - // Exception here means nothing is prepared - // (this method unwinds everything it did on - // an exception) - try { - toSync.prepareCommit(directory); - } finally { - // Have our master segmentInfos record the - // generations we just prepared. We do this - // on error or success so we don't - // double-write a segments_N file. - segmentInfos.updateGeneration(toSync); - } - - assert pendingCommit == null; - setPending = true; - pendingCommit = toSync; - pendingCommitChangeCount = myChangeCount; - success = true; - } finally { - if (!success && infoStream != null) - message("hit exception committing segments file"); - } - break; - } else { - // Must wait for other commit to complete - doWait(); - } + // Exception here means nothing is prepared + // (this method unwinds everything it did on + // an exception) + try { + toSync.prepareCommit(directory); + } finally { + // Have our master segmentInfos record the + // generations we just prepared. We do this + // on error or success so we don't + // double-write a segments_N file. + segmentInfos.updateGeneration(toSync); } + + pendingCommit = toSync; + pendingCommitChangeCount = myChangeCount; } if (infoStream != null) @@ -4400,8 +4374,13 @@ } finally { synchronized(this) { - if (!setPending) + if (pendingCommit == null) { + if (infoStream != null) { + message("hit exception committing segments file"); + } + deleter.decRef(toSync); + } } } } catch (OutOfMemoryError oom) { Index: lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java =================================================================== --- lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java (revision 0) +++ lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java (revision 0) @@ -0,0 +1,81 @@ +package org.apache.lucene.index; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.util.List; +import java.util.ArrayList; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.ReaderUtil; + +/** + * This class forces a composite reader (eg a {@link + * MultiReader} or {@link DirectoryReader} or any other + * IndexReader subclass that returns non-null from {@link + * IndexReader#getSequentialSubReaders}) to emulate an + * atomic reader. This requires implementing the postings + * APIs on-the-fly, by stepping through the sub-readers to + * merge fields/terms, appending docs, etc. + * + *

If you ever hit an UnsupportedOperationException saying + * "please use MultiFields.XXX instead", the simple + * but non-performant workaround is to wrap your reader + * using this class.

+ * + *

NOTE: this class almost always results in a + * performance hit. If this is important to your use case, + * it's better to get the sequential sub readers (see {@link + * ReaderUtil#gatherSubReaders}, instead, and iterate through them + * yourself.

+ */ + +public final class SlowMultiReaderWrapper extends FilterIndexReader { + /** This method may return the reader back, if the + * incoming reader is already atomic. */ + public static IndexReader wrap(IndexReader reader) { + final List subs = new ArrayList(); + ReaderUtil.gatherSubReaders(subs, reader); + if (subs == null) { + // already an atomic reader + return reader; + } else if (subs.size() == 1) { + return subs.get(0); + } else { + return new SlowMultiReaderWrapper(reader); + } + } + + private SlowMultiReaderWrapper(IndexReader other) { + super(other); + } + + @Override + public Fields fields() throws IOException { + return MultiFields.getFields(in); + } + + @Override + public Bits getDeletedDocs() throws IOException { + return MultiFields.getDeletedDocs(in); + } + + @Override + public void doClose() throws IOException { + throw new UnsupportedOperationException("please call close on the original reader instead"); + } +} Index: lucene/src/java/org/apache/lucene/util/ReaderUtil.java =================================================================== --- lucene/src/java/org/apache/lucene/util/ReaderUtil.java (revision 984234) +++ lucene/src/java/org/apache/lucene/util/ReaderUtil.java (working copy) @@ -62,7 +62,7 @@ try { new Gather(reader) { @Override - protected void add(int base, IndexReader r) { + protected void add(int base, IndexReader r) { allSubReaders.add(r); } }.run(); Index: lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/CartesianShapeFilter.java =================================================================== --- lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/CartesianShapeFilter.java (revision 984234) +++ lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/CartesianShapeFilter.java (working copy) @@ -21,7 +21,6 @@ import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; import org.apache.lucene.search.Filter; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; @@ -47,7 +46,7 @@ @Override public DocIdSet getDocIdSet(final IndexReader reader) throws IOException { - final Bits delDocs = MultiFields.getDeletedDocs(reader); + final Bits delDocs = reader.getDeletedDocs(); final List area = shape.getArea(); final int sz = area.size(); @@ -59,7 +58,7 @@ return new DocIdSet() { @Override public DocIdSetIterator iterator() throws IOException { - return MultiFields.getTermDocsEnum(reader, delDocs, fieldName, bytesRef); + return reader.termDocsEnum(delDocs, fieldName, bytesRef); } @Override @@ -72,7 +71,7 @@ for (int i =0; i< sz; i++) { double boxId = area.get(i).doubleValue(); NumericUtils.longToPrefixCoded(NumericUtils.doubleToSortableLong(boxId), 0, bytesRef); - final DocsEnum docsEnum = MultiFields.getTermDocsEnum(reader, delDocs, fieldName, bytesRef); + final DocsEnum docsEnum = reader.termDocsEnum(delDocs, fieldName, bytesRef); if (docsEnum == null) continue; // iterate through all documents // which have this boxId Index: lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java (revision 984234) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java (working copy) @@ -25,6 +25,7 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.MockRAMDirectory; +import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.OpenBitSet; @@ -58,8 +59,10 @@ doc.add(new Field(fieldName,""+term,Field.Store.YES,Field.Index.NOT_ANALYZED)); w.addDocument(doc); } - IndexReader reader = w.getReader(); + IndexReader mainReader = w.getReader(); w.close(); + + IndexReader reader = SlowMultiReaderWrapper.wrap(mainReader); TermsFilter tf=new TermsFilter(); tf.addTerm(new Term(fieldName,"19")); @@ -78,7 +81,7 @@ bits = (OpenBitSet)tf.getDocIdSet(reader); assertEquals("Must match 2", 2, bits.cardinality()); - reader.close(); + mainReader.close(); rd.close(); } } Index: lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java (revision 984234) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java (working copy) @@ -26,12 +26,14 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.SlowMultiReaderWrapper; import org.apache.lucene.index.Term; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.util.LuceneTestCase; public class BooleanFilterTest extends LuceneTestCase { private MockRAMDirectory directory; + private IndexReader mainReader; private IndexReader reader; @Override @@ -46,13 +48,14 @@ addDoc(writer, "guest", "020", "20050101","Y"); addDoc(writer, "admin", "020", "20050101","Maybe"); addDoc(writer, "admin guest", "030", "20050101","N"); - reader = writer.getReader(); + mainReader = writer.getReader(); + reader = SlowMultiReaderWrapper.wrap(mainReader); writer.close(); } @Override protected void tearDown() throws Exception { - reader.close(); + mainReader.close(); directory.close(); super.tearDown(); } Index: lucene/contrib/queries/src/java/org/apache/lucene/search/TermsFilter.java =================================================================== --- lucene/contrib/queries/src/java/org/apache/lucene/search/TermsFilter.java (revision 984234) +++ lucene/contrib/queries/src/java/org/apache/lucene/search/TermsFilter.java (working copy) @@ -27,7 +27,6 @@ import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.Fields; import org.apache.lucene.util.OpenBitSet; import org.apache.lucene.util.BytesRef; @@ -60,9 +59,9 @@ @Override public DocIdSet getDocIdSet(IndexReader reader) throws IOException { OpenBitSet result=new OpenBitSet(reader.maxDoc()); - Fields fields = MultiFields.getFields(reader); + Fields fields = reader.fields(); BytesRef br = new BytesRef(); - Bits delDocs = MultiFields.getDeletedDocs(reader); + Bits delDocs = reader.getDeletedDocs(); if (fields != null) { String lastField = null; Terms termsC = null;