Index: CHANGES.txt =================================================================== --- CHANGES.txt (revision 786579) +++ CHANGES.txt (working copy) @@ -40,6 +40,17 @@ values internally in certain places, so if you have hits with such scores it will cause problems. (Shai Erera via Mike McCandless) + 2. LUCENE-1630: Deprecate Weight in favor of QueryWeight: added + matching methods to Searcher to take QueryWeight and deprecated + those taking Weight. If you have a Weight implementation, you can + turn it into a QueryWeight with QueryWeightWrapper (will be + removed in 3.0). All of the Weight-based methods were implemented + by calling the QueryWeight variants by wrapping the given Weight. + Going forward Searchable will be kept for convenience only and may + be changed between minor releases without any deprecation + process. It is not recommended to implement it, but rather extend + Searcher. (Shai Erera via Mike McCandless) + Changes in runtime behavior 1. LUCENE-1424: QueryParser now by default uses constant score query @@ -212,6 +223,24 @@ NumericRangeQuery and its new indexing format for numeric or date values. (Uwe Schindler) +23. LUCENE-1630: Deprecate Weight in favor of QueryWeight, which adds + a scorer(IndexReader, boolean /* scoreDocsInOrder */, boolean /* + topScorer */) method instead of scorer(IndexReader) (now + deprecated). The new method is used by IndexSearcher to mate + between Collector and Scorer orderness of doc IDs. Some Scorers + (like BooleanScorer) are much more efficient if out-of-order + documents scoring is allowed by a Collector. Collector must now + implement acceptsDocsOutOfOrder. If you write a Collector which + does not care about doc ID orderness, it is recommended that you + return true. QueryWeight has the scoresDocsOutOfOrder method, + which by default returns false. If you create a QueryWeight which + will score documents out of order if that's requested, you should + override that method to return true. Also deprecated + BooleanQuery's setAllowDocsOutOfOrder and getAllowDocsOutOfOrder + as they are not needed anymore. BooleanQuery will now score docs + out of order when used with a Collector that can accept docs out + of order. (Shai Erera via Mike McCandless) + Bug fixes 1. LUCENE-1415: MultiPhraseQuery has incorrect hashCode() and equals() Index: src/test/org/apache/lucene/search/TestTimeLimitingCollector.java =================================================================== --- src/test/org/apache/lucene/search/TestTimeLimitingCollector.java (revision 786579) +++ src/test/org/apache/lucene/search/TestTimeLimitingCollector.java (working copy) @@ -332,6 +332,10 @@ docBase = base; } + public boolean acceptsDocsOutOfOrder() { + return false; + } + } } Index: src/test/org/apache/lucene/search/TestMultiTermConstantScore.java =================================================================== --- src/test/org/apache/lucene/search/TestMultiTermConstantScore.java (revision 786579) +++ src/test/org/apache/lucene/search/TestMultiTermConstantScore.java (working copy) @@ -180,6 +180,9 @@ public void setNextReader(IndexReader reader, int docBase) { base = docBase; } + public boolean acceptsDocsOutOfOrder() { + return true; + } }); // Index: src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java =================================================================== --- src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (revision 786579) +++ src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (working copy) @@ -17,23 +17,18 @@ * limitations under the License. */ -import org.apache.lucene.search.Explanation; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.Weight; -import org.apache.lucene.search.CheckHits; -import org.apache.lucene.store.RAMDirectory; - -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.Term; - import org.apache.lucene.analysis.WhitespaceAnalyzer; - import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; - +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.Term; import org.apache.lucene.queryParser.QueryParser; - +import org.apache.lucene.search.CheckHits; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.QueryWeight; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; public class TestNearSpansOrdered extends LuceneTestCase { @@ -163,8 +158,8 @@ */ public void testSpanNearScorerSkipTo1() throws Exception { SpanNearQuery q = makeQuery(); - Weight w = q.createWeight(searcher); - Scorer s = w.scorer(searcher.getIndexReader()); + QueryWeight w = q.queryWeight(searcher); + Scorer s = w.scorer(searcher.getIndexReader(), true, false); assertEquals(1, s.advance(1)); } /** @@ -173,8 +168,8 @@ */ public void testSpanNearScorerExplain() throws Exception { SpanNearQuery q = makeQuery(); - Weight w = q.createWeight(searcher); - Scorer s = w.scorer(searcher.getIndexReader()); + QueryWeight w = q.queryWeight(searcher); + Scorer s = w.scorer(searcher.getIndexReader(), true, false); Explanation e = s.explain(1); assertTrue("Scorer explanation value for doc#1 isn't positive: " + e.toString(), Index: src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java =================================================================== --- src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java (revision 786579) +++ src/test/org/apache/lucene/search/spans/JustCompileSearchSpans.java (working copy) @@ -21,6 +21,7 @@ import java.util.Collection; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.QueryWeight; import org.apache.lucene.search.Similarity; import org.apache.lucene.search.Weight; @@ -69,6 +70,7 @@ throw new UnsupportedOperationException(UNSUPPORTED_MSG); } + /** @deprecated delete in 3.0. */ public Collection getTerms() { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } @@ -113,10 +115,16 @@ static final class JustCompileSpanScorer extends SpanScorer { + /** @deprecated delete in 3.0 */ protected JustCompileSpanScorer(Spans spans, Weight weight, Similarity similarity, byte[] norms) throws IOException { super(spans, weight, similarity, norms); } + + protected JustCompileSpanScorer(Spans spans, QueryWeight weight, + Similarity similarity, byte[] norms) throws IOException { + super(spans, weight, similarity, norms); + } protected boolean setFreqCurrentDoc() throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); Index: src/test/org/apache/lucene/search/CheckHits.java =================================================================== --- src/test/org/apache/lucene/search/CheckHits.java (revision 786579) +++ src/test/org/apache/lucene/search/CheckHits.java (working copy) @@ -17,15 +17,15 @@ * limitations under the License. */ -import org.apache.lucene.store.Directory; -import org.apache.lucene.index.IndexReader; - -import junit.framework.TestCase; - import java.io.IOException; import java.util.Set; import java.util.TreeSet; +import junit.framework.Assert; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.store.Directory; + public class CheckHits { /** @@ -55,9 +55,9 @@ if (ignore.contains(new Integer(doc))) continue; Explanation exp = searcher.explain(q, doc); - TestCase.assertNotNull("Explanation of [["+d+"]] for #"+doc+" is null", + Assert.assertNotNull("Explanation of [["+d+"]] for #"+doc+" is null", exp); - TestCase.assertEquals("Explanation of [["+d+"]] for #"+doc+ + Assert.assertEquals("Explanation of [["+d+"]] for #"+doc+ " doesn't indicate non-match: " + exp.toString(), 0.0f, exp.getValue(), 0.0f); } @@ -95,12 +95,14 @@ public void collect(int doc) { actual.add(new Integer(doc + base)); } - public void setNextReader(IndexReader reader, int docBase) { base = docBase; } + public boolean acceptsDocsOutOfOrder() { + return true; + } }); - TestCase.assertEquals(query.toString(defaultFieldName), correct, actual); + Assert.assertEquals(query.toString(defaultFieldName), correct, actual); QueryUtils.check(query,searcher); } @@ -126,7 +128,7 @@ int[] results) throws IOException { if (searcher instanceof IndexSearcher) { - QueryUtils.check(query,(IndexSearcher)searcher); + QueryUtils.check(query,searcher); } ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; @@ -141,7 +143,7 @@ actual.add(new Integer(hits[i].doc)); } - TestCase.assertEquals(query.toString(defaultFieldName), correct, actual); + Assert.assertEquals(query.toString(defaultFieldName), correct, actual); QueryUtils.check(query,searcher); } @@ -149,9 +151,9 @@ /** Tests that a Hits has an expected order of documents */ public static void checkDocIds(String mes, int[] results, ScoreDoc[] hits) throws IOException { - TestCase.assertEquals(mes + " nr of hits", hits.length, results.length); + Assert.assertEquals(mes + " nr of hits", hits.length, results.length); for (int i = 0; i < results.length; i++) { - TestCase.assertEquals(mes + " doc nrs for hit " + i, results[i], hits[i].doc); + Assert.assertEquals(mes + " doc nrs for hit " + i, results[i], hits[i].doc); } } @@ -173,11 +175,11 @@ public static void checkEqual(Query query, ScoreDoc[] hits1, ScoreDoc[] hits2) throws IOException { final float scoreTolerance = 1.0e-6f; if (hits1.length != hits2.length) { - TestCase.fail("Unequal lengths: hits1="+hits1.length+",hits2="+hits2.length); + Assert.fail("Unequal lengths: hits1="+hits1.length+",hits2="+hits2.length); } for (int i = 0; i < hits1.length; i++) { if (hits1[i].doc != hits2[i].doc) { - TestCase.fail("Hit " + i + " docnumbers don't match\n" + Assert.fail("Hit " + i + " docnumbers don't match\n" + hits2str(hits1, hits2,0,0) + "for query:" + query.toString()); } @@ -185,7 +187,7 @@ if ((hits1[i].doc != hits2[i].doc) || Math.abs(hits1[i].score - hits2[i].score) > scoreTolerance) { - TestCase.fail("Hit " + i + ", doc nrs " + hits1[i].doc + " and " + hits2[i].doc + Assert.fail("Hit " + i + ", doc nrs " + hits1[i].doc + " and " + hits2[i].doc + "\nunequal : " + hits1[i].score + "\n and: " + hits2[i].score + "\nfor query:" + query.toString()); @@ -294,7 +296,7 @@ boolean deep, Explanation expl) { float value = expl.getValue(); - TestCase.assertEquals(q+": score(doc="+doc+")="+score+ + Assert.assertEquals(q+": score(doc="+doc+")="+score+ " != explanationScore="+value+" Explanation: "+expl, score,value,EXPLAIN_SCORE_TOLERANCE_DELTA); @@ -331,7 +333,7 @@ } } } - TestCase.assertTrue( + Assert.assertTrue( q+": multi valued explanation description=\""+descr +"\" must be 'max of plus x times others' or end with 'product of'" +" or 'sum of:' or 'max of:' - "+expl, @@ -356,9 +358,9 @@ } else if (maxTimesOthers) { combined = max + x * (sum - max); } else { - TestCase.assertTrue("should never get here!",false); + Assert.assertTrue("should never get here!",false); } - TestCase.assertEquals(q+": actual subDetails combined=="+combined+ + Assert.assertEquals(q+": actual subDetails combined=="+combined+ " != value="+value+" Explanation: "+expl, combined,value,EXPLAIN_SCORE_TOLERANCE_DELTA); } @@ -466,14 +468,15 @@ ("exception in hitcollector of [["+d+"]] for #"+doc, e); } - TestCase.assertNotNull("Explanation of [["+d+"]] for #"+doc+" is null", - exp); + Assert.assertNotNull("Explanation of [["+d+"]] for #"+doc+" is null", exp); verifyExplanation(d,doc,scorer.score(),deep,exp); } public void setNextReader(IndexReader reader, int docBase) { base = docBase; } - + public boolean acceptsDocsOutOfOrder() { + return true; + } } } Index: src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java =================================================================== --- src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java (revision 786579) +++ src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java (working copy) @@ -99,6 +99,10 @@ this.scorer = new ScoreCachingWrappingScorer(scorer); } + public boolean acceptsDocsOutOfOrder() { + return true; + } + } private static final float[] scores = new float[] { 0.7767749f, 1.7839992f, Index: src/test/org/apache/lucene/search/TestTopDocsCollector.java =================================================================== --- src/test/org/apache/lucene/search/TestTopDocsCollector.java (revision 786579) +++ src/test/org/apache/lucene/search/TestTopDocsCollector.java (working copy) @@ -69,6 +69,10 @@ // Don't do anything. Assign scores in random } + public boolean acceptsDocsOutOfOrder() { + return true; + } + } // Scores array to be used by MyTopDocsCollector. If it is changed, MAX_SCORE Index: src/test/org/apache/lucene/search/TestTermScorer.java =================================================================== --- src/test/org/apache/lucene/search/TestTermScorer.java (revision 786579) +++ src/test/org/apache/lucene/search/TestTermScorer.java (working copy) @@ -70,7 +70,7 @@ Term allTerm = new Term(FIELD, "all"); TermQuery termQuery = new TermQuery(allTerm); - Weight weight = termQuery.weight(indexSearcher); + QueryWeight weight = termQuery.queryWeight(indexSearcher); TermScorer ts = new TermScorer(weight, indexReader.termDocs(allTerm), indexSearcher.getSimilarity(), @@ -98,6 +98,9 @@ public void setNextReader(IndexReader reader, int docBase) { base = docBase; } + public boolean acceptsDocsOutOfOrder() { + return true; + } }); assertTrue("docs Size: " + docs.size() + " is not: " + 2, docs.size() == 2); TestHit doc0 = (TestHit) docs.get(0); @@ -129,7 +132,7 @@ Term allTerm = new Term(FIELD, "all"); TermQuery termQuery = new TermQuery(allTerm); - Weight weight = termQuery.weight(indexSearcher); + QueryWeight weight = termQuery.queryWeight(indexSearcher); TermScorer ts = new TermScorer(weight, indexReader.termDocs(allTerm), indexSearcher.getSimilarity(), @@ -146,14 +149,14 @@ Term allTerm = new Term(FIELD, "all"); TermQuery termQuery = new TermQuery(allTerm); - Weight weight = termQuery.weight(indexSearcher); + QueryWeight weight = termQuery.queryWeight(indexSearcher); TermScorer ts = new TermScorer(weight, indexReader.termDocs(allTerm), indexSearcher.getSimilarity(), indexReader.norms(FIELD)); assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS); //The next doc should be doc 5 - assertTrue("doc should be number 5", ts.doc() == 5); + assertTrue("doc should be number 5", ts.docID() == 5); } public void testExplain() throws Exception @@ -161,7 +164,7 @@ Term allTerm = new Term(FIELD, "all"); TermQuery termQuery = new TermQuery(allTerm); - Weight weight = termQuery.weight(indexSearcher); + QueryWeight weight = termQuery.queryWeight(indexSearcher); TermScorer ts = new TermScorer(weight, indexReader.termDocs(allTerm), indexSearcher.getSimilarity(), @@ -179,7 +182,7 @@ Term dogsTerm = new Term(FIELD, "dogs"); termQuery = new TermQuery(dogsTerm); - weight = termQuery.weight(indexSearcher); + weight = termQuery.queryWeight(indexSearcher); ts = new TermScorer(weight, indexReader.termDocs(dogsTerm), indexSearcher.getSimilarity(), indexReader.norms(FIELD)); Index: src/test/org/apache/lucene/search/TestSetNorm.java =================================================================== --- src/test/org/apache/lucene/search/TestSetNorm.java (revision 786579) +++ src/test/org/apache/lucene/search/TestSetNorm.java (working copy) @@ -76,6 +76,9 @@ public void setNextReader(IndexReader reader, int docBase) { base = docBase; } + public boolean acceptsDocsOutOfOrder() { + return true; + } }); float lastScore = 0.0f; Index: src/test/org/apache/lucene/search/TestScorerPerf.java =================================================================== --- src/test/org/apache/lucene/search/TestScorerPerf.java (revision 786579) +++ src/test/org/apache/lucene/search/TestScorerPerf.java (working copy) @@ -114,6 +114,9 @@ public void setNextReader(IndexReader reader, int base) { docBase = base; } + public boolean acceptsDocsOutOfOrder() { + return true; + } } Index: src/test/org/apache/lucene/search/JustCompileSearch.java =================================================================== --- src/test/org/apache/lucene/search/JustCompileSearch.java (revision 786579) +++ src/test/org/apache/lucene/search/JustCompileSearch.java (working copy) @@ -38,8 +38,12 @@ private static final String UNSUPPORTED_MSG = "unsupported: used for back-compat testing only !"; - static final class JustCompileSearchable implements Searchable { + static final class JustCompileSearcher extends Searcher { + protected QueryWeight createQueryWeight(Query query) throws IOException { + throw new UnsupportedOperationException(UNSUPPORTED_MSG); + } + public void close() throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } @@ -48,68 +52,49 @@ throw new UnsupportedOperationException(UNSUPPORTED_MSG); } - public Document doc(int n, FieldSelector fieldSelector) - throws CorruptIndexException, IOException { - throw new UnsupportedOperationException(UNSUPPORTED_MSG); - } - - public int docFreq(Term term) throws IOException { - throw new UnsupportedOperationException(UNSUPPORTED_MSG); - } - public int[] docFreqs(Term[] terms) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } - public Explanation explain(Weight weight, int doc) throws IOException { + public Explanation explain(Query query, int doc) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } - public int maxDoc() throws IOException { + public Similarity getSimilarity() { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } - public Query rewrite(Query query) throws IOException { + public void search(Query query, Collector results) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } - public void search(Weight weight, Filter filter, HitCollector results) + public void search(Query query, Filter filter, Collector results) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } - public void search(Weight weight, Filter filter, Collector collector) - throws IOException { + public TopDocs search(Query query, Filter filter, int n) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } - - public TopDocs search(Weight weight, Filter filter, int n) + + public TopFieldDocs search(Query query, Filter filter, int n, Sort sort) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } - - public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort) - throws IOException { + + public TopDocs search(Query query, int n) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } - - } - - static final class JustCompileSearcher extends Searcher { - - public void close() throws IOException { + + public void setSimilarity(Similarity similarity) { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } - - public Document doc(int i) throws CorruptIndexException, IOException { - throw new UnsupportedOperationException(UNSUPPORTED_MSG); - } - + public int docFreq(Term term) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } - public Explanation explain(Weight weight, int doc) throws IOException { + public Explanation explain(QueryWeight weight, int doc) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } @@ -121,26 +106,21 @@ throw new UnsupportedOperationException(UNSUPPORTED_MSG); } - public void search(Weight weight, Filter filter, HitCollector results) + public void search(QueryWeight weight, Filter filter, Collector results) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } - public void search(Weight weight, Filter filter, Collector results) + public TopDocs search(QueryWeight weight, Filter filter, int n) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } - public TopDocs search(Weight weight, Filter filter, int n) + public TopFieldDocs search(QueryWeight weight, Filter filter, int n, Sort sort) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } - public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort) - throws IOException { - throw new UnsupportedOperationException(UNSUPPORTED_MSG); - } - public Document doc(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); @@ -163,6 +143,10 @@ throw new UnsupportedOperationException(UNSUPPORTED_MSG); } + public boolean acceptsDocsOutOfOrder() { + throw new UnsupportedOperationException(UNSUPPORTED_MSG); + } + } static final class JustCompileDocIdSet extends DocIdSet { @@ -459,7 +443,7 @@ static final class JustCompilePhraseScorer extends PhraseScorer { - JustCompilePhraseScorer(Weight weight, TermPositions[] tps, int[] offsets, + JustCompilePhraseScorer(QueryWeight weight, TermPositions[] tps, int[] offsets, Similarity similarity, byte[] norms) { super(weight, tps, offsets, similarity, norms); } @@ -580,9 +564,13 @@ throw new UnsupportedOperationException(UNSUPPORTED_MSG); } + public boolean acceptsDocsOutOfOrder() { + throw new UnsupportedOperationException(UNSUPPORTED_MSG); + } + } - static final class JustCompileWeight implements Weight { + static final class JustCompileWeight extends QueryWeight { public Explanation explain(IndexReader reader, int doc) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); @@ -600,6 +588,7 @@ throw new UnsupportedOperationException(UNSUPPORTED_MSG); } + /** @deprecated delete in 3.0 */ public Scorer scorer(IndexReader reader) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } @@ -607,6 +596,11 @@ public float sumOfSquaredWeights() throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MSG); } + + public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) + throws IOException { + throw new UnsupportedOperationException(UNSUPPORTED_MSG); + } } Index: src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java =================================================================== --- src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java (revision 786579) +++ src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java (working copy) @@ -134,8 +134,8 @@ QueryUtils.check(dq,s); - final Weight dw = dq.weight(s); - final Scorer ds = dw.scorer(r); + final QueryWeight dw = dq.queryWeight(s); + final Scorer ds = dw.scorer(r, true, false); final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS; if (skipOk) { fail("firsttime skipTo found a match? ... " + r.document(ds.docID()).get("id")); @@ -149,40 +149,37 @@ QueryUtils.check(dq,s); - final Weight dw = dq.weight(s); - final Scorer ds = dw.scorer(r); + final QueryWeight dw = dq.queryWeight(s); + final Scorer ds = dw.scorer(r, true, false); assertTrue("firsttime skipTo found no match", ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS); assertEquals("found wrong docid", "d4", r.document(ds.docID()).get("id")); } + public void testSimpleEqualScores1() throws Exception { + DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f); + q.add(tq("hed","albino")); + q.add(tq("hed","elephant")); + QueryUtils.check(q,s); - public void testSimpleEqualScores1() throws Exception { + ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; - DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f); - q.add(tq("hed","albino")); - q.add(tq("hed","elephant")); - QueryUtils.check(q,s); + try { + assertEquals("all docs should match " + q.toString(), + 4, h.length); - ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; - - try { - assertEquals("all docs should match " + q.toString(), - 4, h.length); - - float score = h[0].score; - for (int i = 1; i < h.length; i++) { - assertEquals("score #" + i + " is not the same", - score, h[i].score, SCORE_COMP_THRESH); - } - } catch (Error e) { - printHits("testSimpleEqualScores1",h,s); - throw e; - } - - + float score = h[0].score; + for (int i = 1; i < h.length; i++) { + assertEquals("score #" + i + " is not the same", + score, h[i].score, SCORE_COMP_THRESH); + } + } catch (Error e) { + printHits("testSimpleEqualScores1",h,s); + throw e; } + } + public void testSimpleEqualScores2() throws Exception { DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f); Index: src/test/org/apache/lucene/search/TestSimilarity.java =================================================================== --- src/test/org/apache/lucene/search/TestSimilarity.java (revision 786579) +++ src/test/org/apache/lucene/search/TestSimilarity.java (working copy) @@ -74,9 +74,7 @@ Term b = new Term("field", "b"); Term c = new Term("field", "c"); - searcher.search - (new TermQuery(b), - new Collector() { + searcher.search(new TermQuery(b), new Collector() { private Scorer scorer; public void setScorer(Scorer scorer) throws IOException { this.scorer = scorer; @@ -85,15 +83,16 @@ assertTrue(scorer.score() == 1.0f); } public void setNextReader(IndexReader reader, int docBase) {} + public boolean acceptsDocsOutOfOrder() { + return true; + } }); BooleanQuery bq = new BooleanQuery(); bq.add(new TermQuery(a), BooleanClause.Occur.SHOULD); bq.add(new TermQuery(b), BooleanClause.Occur.SHOULD); //System.out.println(bq.toString("field")); - searcher.search - (bq, - new Collector() { + searcher.search(bq, new Collector() { private int base = 0; private Scorer scorer; public void setScorer(Scorer scorer) throws IOException { @@ -106,6 +105,9 @@ public void setNextReader(IndexReader reader, int docBase) { base = docBase; } + public boolean acceptsDocsOutOfOrder() { + return true; + } }); PhraseQuery pq = new PhraseQuery(); @@ -124,13 +126,14 @@ assertTrue(scorer.score() == 1.0f); } public void setNextReader(IndexReader reader, int docBase) {} + public boolean acceptsDocsOutOfOrder() { + return true; + } }); pq.setSlop(2); //System.out.println(pq.toString("field")); - searcher.search - (pq, - new Collector() { + searcher.search(pq, new Collector() { private Scorer scorer; public void setScorer(Scorer scorer) throws IOException { this.scorer = scorer; @@ -140,6 +143,9 @@ assertTrue(scorer.score() == 2.0f); } public void setNextReader(IndexReader reader, int docBase) {} + public boolean acceptsDocsOutOfOrder() { + return true; + } }); } } Index: src/test/org/apache/lucene/search/TestCustomSearcherSort.java =================================================================== --- src/test/org/apache/lucene/search/TestCustomSearcherSort.java (revision 786579) +++ src/test/org/apache/lucene/search/TestCustomSearcherSort.java (working copy) @@ -122,9 +122,8 @@ new SortField("publicationDate_"), SortField.FIELD_SCORE }); - Searcher searcher = - new MultiSearcher(new Searchable[] { - new CustomSearcher (index, 2)}); + Searcher searcher = new MultiSearcher(new Searcher[] { new CustomSearcher( + index, 2) }); // search and check hits matchHits(searcher, custSort); } Index: src/test/org/apache/lucene/search/TestDocBoost.java =================================================================== --- src/test/org/apache/lucene/search/TestDocBoost.java (revision 786579) +++ src/test/org/apache/lucene/search/TestDocBoost.java (working copy) @@ -80,6 +80,9 @@ public void setNextReader(IndexReader reader, int docBase) { base = docBase; } + public boolean acceptsDocsOutOfOrder() { + return true; + } }); float lastScore = 0.0f; Index: src/test/org/apache/lucene/search/QueryUtils.java =================================================================== --- src/test/org/apache/lucene/search/QueryUtils.java (revision 786579) +++ src/test/org/apache/lucene/search/QueryUtils.java (working copy) @@ -105,7 +105,7 @@ * @throws IOException if serialization check fail. */ private static void checkSerialization(Query q, Searcher s) throws IOException { - Weight w = q.weight(s); + QueryWeight w = q.queryWeight(s); try { ByteArrayOutputStream bos = new ByteArrayOutputStream(); ObjectOutputStream oos = new ObjectOutputStream(bos); @@ -150,8 +150,8 @@ //System.out.print("Order:");for (int i = 0; i < order.length; i++) System.out.print(order[i]==skip_op ? " skip()":" next()"); System.out.println(); final int opidx[] = {0}; - final Weight w = q.weight(s); - final Scorer scorer = w.scorer(s.getIndexReader()); + final QueryWeight w = q.queryWeight(s); + final Scorer scorer = w.scorer(s.getIndexReader(), true, false); // FUTURE: ensure scorer.doc()==-1 @@ -200,6 +200,9 @@ public void setNextReader(IndexReader reader, int docBase) { base = docBase; } + public boolean acceptsDocsOutOfOrder() { + return true; + } }); // make sure next call to scorer is false. @@ -228,8 +231,8 @@ float score = scorer.score(); try { for (int i=lastDoc[0]+1; i<=doc; i++) { - Weight w = q.weight(s); - Scorer scorer = w.scorer(s.getIndexReader()); + QueryWeight w = q.queryWeight(s); + Scorer scorer = w.scorer(s.getIndexReader(), true, false); Assert.assertTrue("query collected "+doc+" but skipTo("+i+") says no more docs!",scorer.advance(i) != DocIdSetIterator.NO_MORE_DOCS); Assert.assertEquals("query collected "+doc+" but skipTo("+i+") got to "+scorer.docID(),doc,scorer.docID()); float skipToScore = scorer.score(); @@ -244,9 +247,12 @@ public void setNextReader(IndexReader reader, int docBase) { base = docBase; } + public boolean acceptsDocsOutOfOrder() { + return false; + } }); - Weight w = q.weight(s); - Scorer scorer = w.scorer(s.getIndexReader()); + QueryWeight w = q.queryWeight(s); + Scorer scorer = w.scorer(s.getIndexReader(), true, false); boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS; if (more) Assert.assertFalse("query's last doc was "+lastDoc[0]+" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more); Index: src/test/org/apache/lucene/index/TestOmitTf.java =================================================================== --- src/test/org/apache/lucene/index/TestOmitTf.java (revision 786579) +++ src/test/org/apache/lucene/index/TestOmitTf.java (working copy) @@ -384,5 +384,8 @@ public void setNextReader(IndexReader reader, int docBase) { this.docBase = docBase; } + public boolean acceptsDocsOutOfOrder() { + return true; + } } } Index: src/test/org/apache/lucene/index/TestIndexReaderReopen.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexReaderReopen.java (revision 786579) +++ src/test/org/apache/lucene/index/TestIndexReaderReopen.java (working copy) @@ -1073,7 +1073,6 @@ protected void setUp() throws Exception { - // TODO Auto-generated method stub super.setUp(); String tempDir = System.getProperty("java.io.tmpdir"); if (tempDir == null) Index: src/java/org/apache/lucene/search/ConstantScoreQuery.java =================================================================== --- src/java/org/apache/lucene/search/ConstantScoreQuery.java (revision 786579) +++ src/java/org/apache/lucene/search/ConstantScoreQuery.java (working copy) @@ -50,11 +50,11 @@ // but may not be OK for highlighting } - protected class ConstantWeight implements Weight { + protected class ConstantWeight extends QueryWeight { private Similarity similarity; private float queryNorm; private float queryWeight; - + public ConstantWeight(Searcher searcher) { this.similarity = getSimilarity(searcher); } @@ -77,13 +77,13 @@ queryWeight *= this.queryNorm; } - public Scorer scorer(IndexReader reader) throws IOException { + public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { return new ConstantScorer(similarity, reader, this); } public Explanation explain(IndexReader reader, int doc) throws IOException { - ConstantScorer cs = (ConstantScorer)scorer(reader); + ConstantScorer cs = (ConstantScorer) scorer(reader, true, false); boolean exists = cs.docIdSetIterator.advance(doc) == doc; ComplexExplanation result = new ComplexExplanation(); @@ -110,7 +110,7 @@ final float theScore; int doc = -1; - public ConstantScorer(Similarity similarity, IndexReader reader, Weight w) throws IOException { + public ConstantScorer(Similarity similarity, IndexReader reader, QueryWeight w) throws IOException { super(similarity); theScore = w.getValue(); docIdSetIterator = filter.getDocIdSet(reader).iterator(); @@ -152,7 +152,7 @@ } } - protected Weight createWeight(Searcher searcher) { + public QueryWeight createQueryWeight(Searcher searcher) { return new ConstantScoreQuery.ConstantWeight(searcher); } Index: src/java/org/apache/lucene/search/QueryWeightWrapper.java =================================================================== --- src/java/org/apache/lucene/search/QueryWeightWrapper.java (revision 0) +++ src/java/org/apache/lucene/search/QueryWeightWrapper.java (revision 0) @@ -0,0 +1,68 @@ +package org.apache.lucene.search; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; + +import org.apache.lucene.index.IndexReader; + +/** + * A wrapper class for the deprecated {@link Weight}. + * Please re-implement any custom Weight classes as {@link + * QueryWeight} instead. + * + * @deprecated will be removed in 3.0 + */ +public class QueryWeightWrapper extends QueryWeight { + + private Weight weight; + + public QueryWeightWrapper(Weight weight) { + this.weight = weight; + } + + public Explanation explain(IndexReader reader, int doc) throws IOException { + return weight.explain(reader, doc); + } + + public Query getQuery() { + return weight.getQuery(); + } + + public float getValue() { + return weight.getValue(); + } + + public void normalize(float norm) { + weight.normalize(norm); + } + + public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) + throws IOException { + return weight.scorer(reader); + } + + public float sumOfSquaredWeights() throws IOException { + return weight.sumOfSquaredWeights(); + } + + public Scorer scorer(IndexReader reader) throws IOException { + return weight.scorer(reader); + } + +} Property changes on: src/java/org/apache/lucene/search/QueryWeightWrapper.java ___________________________________________________________________ Added: svn:eol-styte + native Index: src/java/org/apache/lucene/search/BooleanScorer2.java =================================================================== --- src/java/org/apache/lucene/search/BooleanScorer2.java (revision 786579) +++ src/java/org/apache/lucene/search/BooleanScorer2.java (working copy) @@ -30,9 +30,10 @@ *
Uses ConjunctionScorer, DisjunctionScorer, ReqOptScorer and ReqExclScorer. */ class BooleanScorer2 extends Scorer { - private ArrayList requiredScorers = new ArrayList(); - private ArrayList optionalScorers = new ArrayList(); - private ArrayList prohibitedScorers = new ArrayList(); + + private final List requiredScorers; + private final List optionalScorers; + private final List prohibitedScorers; private class Coordinator { float[] coordFactors = null; @@ -54,90 +55,52 @@ /** The scorer to which all scoring will be delegated, * except for computing and using the coordination factor. */ - private Scorer countingSumScorer = null; + private final Scorer countingSumScorer; /** The number of optionalScorers that need to match (if there are any) */ private final int minNrShouldMatch; - /** Whether it is allowed to return documents out of order. - * This can accelerate the scoring of disjunction queries. - */ - private boolean allowDocsOutOfOrder; - private int doc = -1; - /** Create a BooleanScorer2. - * @param similarity The similarity to be used. - * @param minNrShouldMatch The minimum number of optional added scorers - * that should match during the search. - * In case no required scorers are added, - * at least one of the optional scorers will have to - * match during the search. - * @param allowDocsOutOfOrder Whether it is allowed to return documents out of order. - * This can accelerate the scoring of disjunction queries. + /** + * Creates a {@link Scorer} with the given similarity and lists of required, + * prohibited and optional scorers. In no required scorers are added, at least + * one of the optional scorers will have to match during the search. + * + * @param similarity + * The similarity to be used. + * @param minNrShouldMatch + * The minimum number of optional added scorers that should match + * during the search. In case no required scorers are added, at least + * one of the optional scorers will have to match during the search. + * @param required + * the list of required scorers. + * @param prohibited + * the list of prohibited scorers. + * @param optional + * the list of optional scorers. */ - public BooleanScorer2(Similarity similarity, int minNrShouldMatch, boolean allowDocsOutOfOrder) throws IOException { + public BooleanScorer2(Similarity similarity, int minNrShouldMatch, + List required, List prohibited, List optional) throws IOException { super(similarity); if (minNrShouldMatch < 0) { throw new IllegalArgumentException("Minimum number of optional scorers should not be negative"); } coordinator = new Coordinator(); this.minNrShouldMatch = minNrShouldMatch; - this.allowDocsOutOfOrder = allowDocsOutOfOrder; - } - /** Create a BooleanScorer2. - * In no required scorers are added, - * at least one of the optional scorers will have to match during the search. - * @param similarity The similarity to be used. - * @param minNrShouldMatch The minimum number of optional added scorers - * that should match during the search. - * In case no required scorers are added, - * at least one of the optional scorers will have to - * match during the search. - */ - public BooleanScorer2(Similarity similarity, int minNrShouldMatch) throws IOException { - this(similarity, minNrShouldMatch, false); - } - - /** Create a BooleanScorer2. - * In no required scorers are added, - * at least one of the optional scorers will have to match during the search. - * @param similarity The similarity to be used. - */ - public BooleanScorer2(Similarity similarity) throws IOException { - this(similarity, 0, false); - } + optionalScorers = optional; + coordinator.maxCoord += optional.size(); - public void add(final Scorer scorer, boolean required, boolean prohibited) throws IOException { - if (!prohibited) { - coordinator.maxCoord++; - } - - if (required) { - if (prohibited) { - throw new IllegalArgumentException("scorer cannot be required and prohibited"); - } - requiredScorers.add(scorer); - } else if (prohibited) { - prohibitedScorers.add(scorer); - } else { - optionalScorers.add(scorer); - } - } - - /** Initialize the match counting scorer that sums all the - * scores.

- * When "counting" is used in a name it means counting the number - * of matching scorers.
- * When "sum" is used in a name it means score value summing - * over the matching scorers - */ - private void initCountingSumScorer() throws IOException { + requiredScorers = required; + coordinator.maxCoord += required.size(); + + prohibitedScorers = prohibited; + coordinator.init(); countingSumScorer = makeCountingSumScorer(); } - + /** Count a scorer as a single match. */ private class SingleMatchScorer extends Scorer { private Scorer scorer; @@ -333,19 +296,10 @@ *
When this method is used the {@link #explain(int)} method should not be used. */ public void score(Collector collector) throws IOException { - if (allowDocsOutOfOrder && requiredScorers.size() == 0 - && prohibitedScorers.size() < 32) { - new BooleanScorer(getSimilarity(), minNrShouldMatch, optionalScorers, - prohibitedScorers).score(collector); - } else { - if (countingSumScorer == null) { - initCountingSumScorer(); - } - collector.setScorer(this); - int doc; - while ((doc = countingSumScorer.nextDoc()) != NO_MORE_DOCS) { - collector.collect(doc); - } + collector.setScorer(this); + int doc; + while ((doc = countingSumScorer.nextDoc()) != NO_MORE_DOCS) { + collector.collect(doc); } } @@ -386,9 +340,6 @@ } public int nextDoc() throws IOException { - if (countingSumScorer == null) { - initCountingSumScorer(); - } return doc = countingSumScorer.nextDoc(); } @@ -404,9 +355,6 @@ } public int advance(int target) throws IOException { - if (countingSumScorer == null) { - initCountingSumScorer(); - } return doc = countingSumScorer.advance(target); } Index: src/java/org/apache/lucene/search/MatchAllDocsQuery.java =================================================================== --- src/java/org/apache/lucene/search/MatchAllDocsQuery.java (revision 786579) +++ src/java/org/apache/lucene/search/MatchAllDocsQuery.java (working copy) @@ -49,7 +49,7 @@ final byte[] norms; private int doc = -1; - MatchAllScorer(IndexReader reader, Similarity similarity, Weight w, + MatchAllScorer(IndexReader reader, Similarity similarity, QueryWeight w, byte[] norms) throws IOException { super(similarity); this.termDocs = reader.termDocs(null); @@ -93,7 +93,7 @@ } } - private class MatchAllDocsWeight implements Weight { + private class MatchAllDocsWeight extends QueryWeight { private Similarity similarity; private float queryWeight; private float queryNorm; @@ -124,7 +124,7 @@ queryWeight *= this.queryNorm; } - public Scorer scorer(IndexReader reader) throws IOException { + public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { return new MatchAllScorer(reader, similarity, this, normsField != null ? reader.norms(normsField) : null); } @@ -142,7 +142,7 @@ } } - protected Weight createWeight(Searcher searcher) { + public QueryWeight createQueryWeight(Searcher searcher) { return new MatchAllDocsWeight(searcher); } Index: src/java/org/apache/lucene/search/DisjunctionMaxQuery.java =================================================================== --- src/java/org/apache/lucene/search/DisjunctionMaxQuery.java (revision 786579) +++ src/java/org/apache/lucene/search/DisjunctionMaxQuery.java (working copy) @@ -86,7 +86,7 @@ } /* The Weight for DisjunctionMaxQuery's, used to normalize, score and explain these queries */ - private class DisjunctionMaxWeight implements Weight { + private class DisjunctionMaxWeight extends QueryWeight { private Similarity similarity; // The similarity which we are associated. private ArrayList weights = new ArrayList(); // The Weight's for our subqueries, in 1-1 correspondence with disjuncts @@ -94,8 +94,9 @@ /* Construct the Weight for this Query searched by searcher. Recursively construct subquery weights. */ public DisjunctionMaxWeight(Searcher searcher) throws IOException { this.similarity = searcher.getSimilarity(); - for (int i = 0; i < disjuncts.size(); i++) - weights.add(((Query) disjuncts.get(i)).createWeight(searcher)); + for (Iterator iter = disjuncts.iterator(); iter.hasNext();) { + weights.add(((Query) iter.next()).createQueryWeight(searcher)); + } } /* Return our associated DisjunctionMaxQuery */ @@ -107,28 +108,32 @@ /* Compute the sub of squared weights of us applied to our subqueries. Used for normalization. */ public float sumOfSquaredWeights() throws IOException { float max = 0.0f, sum = 0.0f; - for (int i = 0; i < weights.size(); i++) { - float sub = ((Weight) weights.get(i)).sumOfSquaredWeights(); + for (Iterator iter = weights.iterator(); iter.hasNext();) { + float sub = ((QueryWeight) iter.next()).sumOfSquaredWeights(); sum += sub; max = Math.max(max, sub); + } - return (((sum - max) * tieBreakerMultiplier * tieBreakerMultiplier) + max) * getBoost() * getBoost(); + float boost = getBoost(); + return (((sum - max) * tieBreakerMultiplier * tieBreakerMultiplier) + max) * boost * boost; } /* Apply the computed normalization factor to our subqueries */ public void normalize(float norm) { norm *= getBoost(); // Incorporate our boost - for (int i = 0 ; i < weights.size(); i++) - ((Weight) weights.get(i)).normalize(norm); + for (Iterator iter = weights.iterator(); iter.hasNext();) { + ((QueryWeight) iter.next()).normalize(norm); + } } /* Create the scorer used to score our associated DisjunctionMaxQuery */ - public Scorer scorer(IndexReader reader) throws IOException { + public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, + boolean topScorer) throws IOException { Scorer[] scorers = new Scorer[weights.size()]; int idx = 0; for (Iterator iter = weights.iterator(); iter.hasNext();) { - Weight w = (Weight) iter.next(); - Scorer subScorer = w.scorer(reader); + QueryWeight w = (QueryWeight) iter.next(); + Scorer subScorer = w.scorer(reader, true, false); if (subScorer == null) { return null; } else if (subScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { @@ -142,12 +147,12 @@ /* Explain the score we computed for doc */ public Explanation explain(IndexReader reader, int doc) throws IOException { - if ( disjuncts.size() == 1) return ((Weight) weights.get(0)).explain(reader,doc); + if (disjuncts.size() == 1) return ((QueryWeight) weights.get(0)).explain(reader,doc); ComplexExplanation result = new ComplexExplanation(); float max = 0.0f, sum = 0.0f; result.setDescription(tieBreakerMultiplier == 0.0f ? "max of:" : "max plus " + tieBreakerMultiplier + " times others of:"); - for (int i = 0 ; i < weights.size(); i++) { - Explanation e = ((Weight) weights.get(i)).explain(reader, doc); + for (Iterator iter = weights.iterator(); iter.hasNext();) { + Explanation e = ((QueryWeight) iter.next()).explain(reader, doc); if (e.isMatch()) { result.setMatch(Boolean.TRUE); result.addDetail(e); @@ -155,14 +160,14 @@ max = Math.max(max, e.getValue()); } } - result.setValue(max + (sum - max)*tieBreakerMultiplier); + result.setValue(max + (sum - max) * tieBreakerMultiplier); return result; } - + } // end of DisjunctionMaxWeight inner class - /* Create the Weight used to score us */ - protected Weight createWeight(Searcher searcher) throws IOException { + /* Create the QueryWeight used to score us */ + public QueryWeight createQueryWeight(Searcher searcher) throws IOException { return new DisjunctionMaxWeight(searcher); } @@ -170,7 +175,8 @@ * @param reader the IndexReader we query * @return an optimized copy of us (which may not be a copy if there is nothing to optimize) */ public Query rewrite(IndexReader reader) throws IOException { - if (disjuncts.size() == 1) { + int numDisjunctions = disjuncts.size(); + if (numDisjunctions == 1) { Query singleton = (Query) disjuncts.get(0); Query result = singleton.rewrite(reader); if (getBoost() != 1.0f) { @@ -180,7 +186,7 @@ return result; } DisjunctionMaxQuery clone = null; - for (int i = 0 ; i < disjuncts.size(); i++) { + for (int i = 0 ; i < numDisjunctions; i++) { Query clause = (Query) disjuncts.get(i); Query rewrite = clause.rewrite(reader); if (rewrite != clause) { @@ -200,15 +206,13 @@ return clone; } - // inherit javadoc public void extractTerms(Set terms) { - for (int i = 0; i < disjuncts.size(); i++) { - ((Query)disjuncts.get(i)).extractTerms(terms); - } + for (Iterator iter = disjuncts.iterator(); iter.hasNext();) { + ((Query) iter.next()).extractTerms(terms); + } } - /** Prettyprint us. * @param field the field to which we are applied * @return a string that shows what we do, of the form "(disjunct1 | disjunct2 | ... | disjunctn)^boost" @@ -216,7 +220,8 @@ public String toString(String field) { StringBuffer buffer = new StringBuffer(); buffer.append("("); - for (int i = 0 ; i < disjuncts.size(); i++) { + int numDisjunctions = disjuncts.size(); + for (int i = 0 ; i < numDisjunctions; i++) { Query subquery = (Query) disjuncts.get(i); if (subquery instanceof BooleanQuery) { // wrap sub-bools in parens buffer.append("("); @@ -224,7 +229,7 @@ buffer.append(")"); } else buffer.append(subquery.toString(field)); - if (i != disjuncts.size()-1) buffer.append(" | "); + if (i != numDisjunctions-1) buffer.append(" | "); } buffer.append(")"); if (tieBreakerMultiplier != 0.0f) { Index: src/java/org/apache/lucene/search/TopScoreDocCollector.java =================================================================== --- src/java/org/apache/lucene/search/TopScoreDocCollector.java (revision 786579) +++ src/java/org/apache/lucene/search/TopScoreDocCollector.java (working copy) @@ -55,6 +55,10 @@ pqTop.score = score; pqTop = (ScoreDoc) pq.updateTop(); } + + public boolean acceptsDocsOutOfOrder() { + return false; + } } // Assumes docs are scored out of order. @@ -74,6 +78,10 @@ pqTop.score = score; pqTop = (ScoreDoc) pq.updateTop(); } + + public boolean acceptsDocsOutOfOrder() { + return true; + } } /** Index: src/java/org/apache/lucene/search/payloads/BoostingTermQuery.java =================================================================== --- src/java/org/apache/lucene/search/payloads/BoostingTermQuery.java (revision 786579) +++ src/java/org/apache/lucene/search/payloads/BoostingTermQuery.java (working copy) @@ -41,29 +41,23 @@ */ public class BoostingTermQuery extends SpanTermQuery{ - public BoostingTermQuery(Term term) { super(term); } - - protected Weight createWeight(Searcher searcher) throws IOException { + public QueryWeight createQueryWeight(Searcher searcher) throws IOException { return new BoostingTermWeight(this, searcher); } - protected class BoostingTermWeight extends SpanWeight implements Weight { + protected class BoostingTermWeight extends SpanWeight { - public BoostingTermWeight(BoostingTermQuery query, Searcher searcher) throws IOException { super(query, searcher); } - - - - public Scorer scorer(IndexReader reader) throws IOException { - return new BoostingSpanScorer((TermSpans)query.getSpans(reader), this, similarity, - reader.norms(query.getField())); + public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { + return new BoostingSpanScorer((TermSpans) query.getSpans(reader), this, + similarity, reader.norms(query.getField())); } protected class BoostingSpanScorer extends SpanScorer { @@ -74,7 +68,7 @@ protected float payloadScore; private int payloadsSeen; - public BoostingSpanScorer(TermSpans spans, Weight weight, + public BoostingSpanScorer(TermSpans spans, QueryWeight weight, Similarity similarity, byte[] norms) throws IOException { super(spans, weight, similarity, norms); positions = spans.getPositions(); Index: src/java/org/apache/lucene/search/Searcher.java =================================================================== --- src/java/org/apache/lucene/search/Searcher.java (revision 786579) +++ src/java/org/apache/lucene/search/Searcher.java (working copy) @@ -19,15 +19,17 @@ import java.io.IOException; +import org.apache.lucene.document.Document; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.Term; -import org.apache.lucene.document.Document; -/** An abstract base class for search implementations. - * Implements the main search methods. +/** + * An abstract base class for search implementations. Implements the main search + * methods. * - *

Note that you can only access Hits from a Searcher as long as it is - * not yet closed, otherwise an IOException will be thrown. + *

+ * Note that you can only access hits from a Searcher as long as it is not yet + * closed, otherwise an IOException will be thrown. */ public abstract class Searcher implements Searchable { @@ -87,7 +89,7 @@ */ public TopFieldDocs search(Query query, Filter filter, int n, Sort sort) throws IOException { - return search(createWeight(query), filter, n, sort); + return search(createQueryWeight(query), filter, n, sort); } /** Lower-level search API. @@ -107,7 +109,7 @@ */ public void search(Query query, HitCollector results) throws IOException { - search(query, (Filter)null, results); + search(createQueryWeight(query), null, new HitCollectorWrapper(results)); } /** Lower-level search API. @@ -125,7 +127,7 @@ */ public void search(Query query, Collector results) throws IOException { - search(query, (Filter)null, results); + search(createQueryWeight(query), null, results); } /** Lower-level search API. @@ -147,7 +149,7 @@ */ public void search(Query query, Filter filter, HitCollector results) throws IOException { - search(createWeight(query), filter, results); + search(createQueryWeight(query), filter, new HitCollectorWrapper(results)); } /** Lower-level search API. @@ -168,7 +170,7 @@ */ public void search(Query query, Filter filter, Collector results) throws IOException { - search(createWeight(query), filter, results); + search(createQueryWeight(query), filter, results); } /** Finds the top n @@ -178,7 +180,7 @@ */ public TopDocs search(Query query, Filter filter, int n) throws IOException { - return search(createWeight(query), filter, n); + return search(createQueryWeight(query), filter, n); } /** Finds the top n @@ -200,7 +202,7 @@ * entire index. */ public Explanation explain(Query query, int doc) throws IOException { - return explain(createWeight(query), doc); + return explain(createQueryWeight(query), doc); } /** The Similarity implementation used by this searcher. */ @@ -213,7 +215,7 @@ public void setSimilarity(Similarity similarity) { this.similarity = similarity; } - + /** Expert: Return the Similarity implementation used by this Searcher. * *

This defaults to the current value of {@link Similarity#getDefault()}. @@ -224,11 +226,16 @@ /** * creates a weight for query - * @return new weight + * + * @deprecated use {@link #createQueryWeight(Query)} instead. */ protected Weight createWeight(Query query) throws IOException { - return query.weight(this); + return createQueryWeight(query); } + + protected QueryWeight createQueryWeight(Query query) throws IOException { + return query.queryWeight(this); + } // inherit javadoc public int[] docFreqs(Term[] terms) throws IOException { @@ -245,15 +252,34 @@ /** * @deprecated use {@link #search(Weight, Filter, Collector)} instead. */ - abstract public void search(Weight weight, Filter filter, HitCollector results) throws IOException; - abstract public void search(Weight weight, Filter filter, Collector results) throws IOException; + public void search(Weight weight, Filter filter, HitCollector results) throws IOException { + search(new QueryWeightWrapper(weight), filter, new HitCollectorWrapper(results)); + } + /** @deprecated delete in 3.0. */ + public void search(Weight weight, Filter filter, Collector collector) + throws IOException { + search(new QueryWeightWrapper(weight), filter, collector); + } + abstract public void search(QueryWeight weight, Filter filter, Collector results) throws IOException; abstract public void close() throws IOException; abstract public int docFreq(Term term) throws IOException; abstract public int maxDoc() throws IOException; - abstract public TopDocs search(Weight weight, Filter filter, int n) throws IOException; + /** @deprecated use {@link #search(QueryWeight, Filter, int)} instead. */ + public TopDocs search(Weight weight, Filter filter, int n) throws IOException { + return search(new QueryWeightWrapper(weight), filter, n); + } + abstract public TopDocs search(QueryWeight weight, Filter filter, int n) throws IOException; abstract public Document doc(int i) throws CorruptIndexException, IOException; abstract public Query rewrite(Query query) throws IOException; - abstract public Explanation explain(Weight weight, int doc) throws IOException; - abstract public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort) throws IOException; + /** @deprecated use {@link #explain(QueryWeight, int)} instead. */ + public Explanation explain(Weight weight, int doc) throws IOException { + return explain(new QueryWeightWrapper(weight), doc); + } + abstract public Explanation explain(QueryWeight weight, int doc) throws IOException; + /** @deprecated use {@link #search(QueryWeight, Filter, int, Sort)} instead. */ + public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort) throws IOException { + return search(new QueryWeightWrapper(weight), filter, n, sort); + } + abstract public TopFieldDocs search(QueryWeight weight, Filter filter, int n, Sort sort) throws IOException; /* End patch for GCJ bug #15411. */ } Index: src/java/org/apache/lucene/search/spans/FieldMaskingSpanQuery.java =================================================================== --- src/java/org/apache/lucene/search/spans/FieldMaskingSpanQuery.java (revision 786579) +++ src/java/org/apache/lucene/search/spans/FieldMaskingSpanQuery.java (working copy) @@ -23,9 +23,9 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWeight; import org.apache.lucene.search.Searcher; import org.apache.lucene.search.Similarity; -import org.apache.lucene.search.Weight; import org.apache.lucene.util.ToStringUtils; /** @@ -97,6 +97,7 @@ return maskedQuery.getPayloadSpans(reader); } + /** @deprecated use {@link #extractTerms(Set)} instead. */ public Collection getTerms() { return maskedQuery.getTerms(); } @@ -105,8 +106,8 @@ maskedQuery.extractTerms(terms); } - protected Weight createWeight(Searcher searcher) throws IOException { - return maskedQuery.createWeight(searcher); + public QueryWeight createQueryWeight(Searcher searcher) throws IOException { + return maskedQuery.createQueryWeight(searcher); } public Similarity getSimilarity(Searcher searcher) { Index: src/java/org/apache/lucene/search/spans/SpanWeight.java =================================================================== --- src/java/org/apache/lucene/search/spans/SpanWeight.java (revision 786579) +++ src/java/org/apache/lucene/search/spans/SpanWeight.java (working copy) @@ -29,7 +29,7 @@ /** * Expert-only. Public for use by other weight implementations */ -public class SpanWeight implements Weight { +public class SpanWeight extends QueryWeight { protected Similarity similarity; protected float value; protected float idf; @@ -63,10 +63,9 @@ value = queryWeight * idf; // idf for document } - public Scorer scorer(IndexReader reader) throws IOException { - return new SpanScorer(query.getSpans(reader), this, - similarity, - reader.norms(query.getField())); + public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { + return new SpanScorer(query.getSpans(reader), this, similarity, reader + .norms(query.getField())); } public Explanation explain(IndexReader reader, int doc) @@ -115,7 +114,7 @@ fieldExpl.setDescription("fieldWeight("+field+":"+query.toString(field)+ " in "+doc+"), product of:"); - Explanation tfExpl = scorer(reader).explain(doc); + Explanation tfExpl = scorer(reader, true, false).explain(doc); fieldExpl.addDetail(tfExpl); fieldExpl.addDetail(idfExpl); Index: src/java/org/apache/lucene/search/spans/SpanScorer.java =================================================================== --- src/java/org/apache/lucene/search/spans/SpanScorer.java (revision 786579) +++ src/java/org/apache/lucene/search/spans/SpanScorer.java (working copy) @@ -17,19 +17,21 @@ * limitations under the License. */ +import java.io.IOException; + import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.QueryWeight; +import org.apache.lucene.search.QueryWeightWrapper; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Similarity; import org.apache.lucene.search.Weight; -import java.io.IOException; - /** * Public for extension only. */ public class SpanScorer extends Scorer { protected Spans spans; - protected Weight weight; + protected QueryWeight weight; protected byte[] norms; protected float value; @@ -40,8 +42,14 @@ protected int doc; protected float freq; + /** @deprecated use {@link #SpanScorer(Spans, QueryWeight, Similarity, byte[])} instead.*/ protected SpanScorer(Spans spans, Weight weight, Similarity similarity, byte[] norms) throws IOException { + this(spans, new QueryWeightWrapper(weight), similarity, norms); + } + + protected SpanScorer(Spans spans, QueryWeight weight, Similarity similarity, byte[] norms) + throws IOException { super(similarity); this.spans = spans; this.norms = norms; Index: src/java/org/apache/lucene/search/spans/SpanQuery.java =================================================================== --- src/java/org/apache/lucene/search/spans/SpanQuery.java (revision 786579) +++ src/java/org/apache/lucene/search/spans/SpanQuery.java (working copy) @@ -17,15 +17,15 @@ * limitations under the License. */ +import java.io.IOException; +import java.util.Collection; + import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWeight; import org.apache.lucene.search.Searcher; import org.apache.lucene.search.Weight; -import java.io.IOException; -import java.util.Collection; -import java.util.Set; - /** Base class for span-based queries. */ public abstract class SpanQuery extends Query { /** Expert: Returns the matches for this query in an index. Used internally @@ -46,7 +46,7 @@ */ public PayloadSpans getPayloadSpans(IndexReader reader) throws IOException{ return null; - }; + } /** Returns the name of the field matched by this query.*/ public abstract String getField(); @@ -57,9 +57,13 @@ */ public abstract Collection getTerms(); + /** @deprecated delete in 3.0. */ protected Weight createWeight(Searcher searcher) throws IOException { + return createQueryWeight(searcher); + } + + public QueryWeight createQueryWeight(Searcher searcher) throws IOException { return new SpanWeight(this, searcher); } } - Index: src/java/org/apache/lucene/search/SloppyPhraseScorer.java =================================================================== --- src/java/org/apache/lucene/search/SloppyPhraseScorer.java (revision 786579) +++ src/java/org/apache/lucene/search/SloppyPhraseScorer.java (working copy) @@ -28,7 +28,7 @@ private PhrasePositions tmpPos[]; // for flipping repeating pps. private boolean checkedRepeats; - SloppyPhraseScorer(Weight weight, TermPositions[] tps, int[] offsets, Similarity similarity, + SloppyPhraseScorer(QueryWeight weight, TermPositions[] tps, int[] offsets, Similarity similarity, int slop, byte[] norms) { super(weight, tps, offsets, similarity, norms); this.slop = slop; Index: src/java/org/apache/lucene/search/QueryWeight.java =================================================================== --- src/java/org/apache/lucene/search/QueryWeight.java (revision 0) +++ src/java/org/apache/lucene/search/QueryWeight.java (revision 0) @@ -0,0 +1,119 @@ +package org.apache.lucene.search; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.io.Serializable; + +import org.apache.lucene.index.IndexReader; + +/** + * Expert: Calculate query weights and build query scorers. + *

+ * The purpose of {@link QueryWeight} is to ensure searching does not + * modify a {@link Query}, so that a {@link Query} instance can be reused.
+ * {@link Searcher} dependent state of the query should reside in the + * {@link QueryWeight}.
+ * {@link IndexReader} dependent state should reside in the {@link Scorer}. + *

+ * A QueryWeight is used in the following way: + *

    + *
  1. A QueryWeight is constructed by a top-level query, given a + * Searcher ({@link Query#createWeight(Searcher)}). + *
  2. The {@link #sumOfSquaredWeights()} method is called on the + * QueryWeight to compute the query normalization factor + * {@link Similarity#queryNorm(float)} of the query clauses contained in the + * query. + *
  3. The query normalization factor is passed to {@link #normalize(float)}. At + * this point the weighting is complete. + *
  4. A Scorer is constructed by {@link #scorer(IndexReader)}. + *
+ * + * @since 2.9 + */ +public abstract class QueryWeight implements Weight, Serializable { + + /** An explanation of the score computation for the named document. */ + public abstract Explanation explain(IndexReader reader, int doc) throws IOException; + + /** The query that this concerns. */ + public abstract Query getQuery(); + + /** The weight for this query. */ + public abstract float getValue(); + + /** Assigns the query normalization factor to this. */ + public abstract void normalize(float norm); + + /** + * @deprecated use {@link #scorer(IndexReader, boolean, boolean)} instead. + * Currently this defaults to asking a scorer in out-of-order + * mode, but will be removed in 3.0. + */ + public Scorer scorer(IndexReader reader) throws IOException { + return scorer(reader, true, false); + } + + /** + * Returns a {@link Scorer} which scores documents in/out-of order according + * to scoreDocsInOrder. + *

+ * NOTE: even if scoreDocsInOrder is false, it is + * recommended to check whether the returned Scorer indeed scores + * documents out of order (i.e., call {@link #scoresDocsOutOfOrder()}), as some + * Scorer implementations will always return documents in-order. + * + * @param reader + * the {@link IndexReader} for which to return the {@link Scorer}. + * @param scoreDocsInOrder + * specifies whether in-order scoring of documents is required. Note + * that if set to false (i.e., out-of-order scoring is required), + * this method can return whatever scoring mode it supports, as every + * in-order scorer is also an out-of-order one. However, an + * out-of-order scorer may not support {@link Scorer#nextDoc()} + * and/or {@link Scorer#advance(int)}, therfore it is recommended to + * request an in-order scorer if use of these methods is required. + * @param topScorer + * specifies whether the returned {@link Scorer} will be used as a + * top scorer or as in iterator. I.e., if true, + * {@link Scorer#score(Collector)} will be called; if false, + * {@link Scorer#nextDoc()} and/or {@link Scorer#advance(int)} will + * be called. + * @return a {@link Scorer} which scores documents in/out-of order. + * @throws IOException + */ + public abstract Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, + boolean topScorer) throws IOException; + + /** The sum of squared weights of contained query clauses. */ + public abstract float sumOfSquaredWeights() throws IOException; + + /** + * Returns true iff this implementation scores docs only out of order. This + * method is used in conjunction with {@link Collector}'s + * {@link Collector#acceptsDocsOutOfOrder() acceptsDocsOutOfOrder} and + * {@link #scorer(org.apache.lucene.index.IndexReader, boolean, boolean)} to + * create a matching {@link Scorer} instance for a given {@link Collector}, or + * vice versa. + *

+ * NOTE: the default implementation returns false, i.e. + * the Scorer scores documents in-order. + */ + public boolean scoresDocsOutOfOrder() { return false; } + +} Property changes on: src/java/org/apache/lucene/search/QueryWeight.java ___________________________________________________________________ Added: svn:eol-styte + native Index: src/java/org/apache/lucene/search/MultiPhraseQuery.java =================================================================== --- src/java/org/apache/lucene/search/MultiPhraseQuery.java (revision 786579) +++ src/java/org/apache/lucene/search/MultiPhraseQuery.java (working copy) @@ -123,7 +123,7 @@ } - private class MultiPhraseWeight implements Weight { + private class MultiPhraseWeight extends QueryWeight { private Similarity similarity; private float value; private float idf; @@ -158,7 +158,7 @@ value = queryWeight * idf; // idf for document } - public Scorer scorer(IndexReader reader) throws IOException { + public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { if (termArrays.size() == 0) // optimize zero-term case return null; @@ -217,7 +217,7 @@ fieldExpl.setDescription("fieldWeight("+getQuery()+" in "+doc+ "), product of:"); - Explanation tfExpl = scorer(reader).explain(doc); + Explanation tfExpl = scorer(reader, true, false).explain(doc); fieldExpl.addDetail(tfExpl); fieldExpl.addDetail(idfExpl); @@ -261,7 +261,7 @@ } } - protected Weight createWeight(Searcher searcher) throws IOException { + public QueryWeight createQueryWeight(Searcher searcher) throws IOException { return new MultiPhraseWeight(searcher); } Index: src/java/org/apache/lucene/search/FilteredQuery.java =================================================================== --- src/java/org/apache/lucene/search/FilteredQuery.java (revision 786579) +++ src/java/org/apache/lucene/search/FilteredQuery.java (working copy) @@ -54,16 +54,14 @@ this.filter = filter; } - - /** * Returns a Weight that applies the filter to the enclosed query's Weight. * This is accomplished by overriding the Scorer returned by the Weight. */ - protected Weight createWeight (final Searcher searcher) throws IOException { - final Weight weight = query.createWeight (searcher); + public QueryWeight createQueryWeight(final Searcher searcher) throws IOException { + final QueryWeight weight = query.createQueryWeight (searcher); final Similarity similarity = query.getSimilarity(searcher); - return new Weight() { + return new QueryWeight() { private float value; // pass these methods through to enclosed query's weight @@ -99,8 +97,9 @@ public Query getQuery() { return FilteredQuery.this; } // return a filtering scorer - public Scorer scorer (IndexReader indexReader) throws IOException { - final Scorer scorer = weight.scorer(indexReader); + public Scorer scorer(IndexReader indexReader, boolean scoreDocsInOrder, boolean topScorer) + throws IOException { + final Scorer scorer = weight.scorer(indexReader, scoreDocsInOrder, false); final DocIdSetIterator docIdSetIterator = filter.getDocIdSet(indexReader).iterator(); return new Scorer(similarity) { Index: src/java/org/apache/lucene/search/Collector.java =================================================================== --- src/java/org/apache/lucene/search/Collector.java (revision 786579) +++ src/java/org/apache/lucene/search/Collector.java (working copy) @@ -121,6 +121,8 @@ * *

NOTE: This API is experimental and might change * in incompatible ways in the next release.

+ * + * @since 2.9 */ public abstract class Collector { @@ -156,5 +158,17 @@ * @param docBase */ public abstract void setNextReader(IndexReader reader, int docBase) throws IOException; + + /** + * Returns true iff this {@link Collector} can accept documents given to + * {@link #collect(int)} out of order. + *

+ * NOTE: some collectors can work in either mode, with a more efficient + * implementation for in-order docs collection. If your collector can work in + * either mode, it is recommended that you create two variants of it, since + * some queries work much faster if out-of-order collection is supported by a + * {@link Collector}. + */ + public abstract boolean acceptsDocsOutOfOrder(); } Index: src/java/org/apache/lucene/search/Searchable.java =================================================================== --- src/java/org/apache/lucene/search/Searchable.java (revision 786579) +++ src/java/org/apache/lucene/search/Searchable.java (working copy) @@ -17,25 +17,32 @@ * limitations under the License. */ +import java.io.IOException; + import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldSelector; -import org.apache.lucene.index.IndexReader; // for javadoc +import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.Term; -import org.apache.lucene.index.CorruptIndexException; -import java.io.IOException; // for javadoc - -/** The interface for search implementations. - * - *

Searchable is the abstract network protocol for searching. - * Implementations provide search over a single index, over multiple - * indices, and over indices on remote servers. - * - *

Queries, filters and sort criteria are designed to be compact so that - * they may be efficiently passed to a remote index, with only the top-scoring - * hits being returned, rather than every matching hit. +/** + * The interface for search implementations. + * + *

+ * Searchable is the abstract network protocol for searching. Implementations + * provide search over a single index, over multiple indices, and over indices + * on remote servers. + * + *

+ * Queries, filters and sort criteria are designed to be compact so that they + * may be efficiently passed to a remote index, with only the top-scoring hits + * being returned, rather than every matching hit. + * + * NOTE: this interface is kept public for convenience. Since it is not + * expected to be implemented directly, it may be changed unexpectedly between + * releases. */ public interface Searchable { + /** Lower-level search API. * *

{@link HitCollector#collect(int,float)} is called for every non-zero @@ -51,7 +58,7 @@ * @param filter if non-null, used to permit documents to be collected. * @param results to receive hits * @throws BooleanQuery.TooManyClauses - * @deprecated use {@link #search(Weight, Filter, Collector)} instead. + * @deprecated use {@link #search(QueryWeight, Filter, Collector)} instead. */ void search(Weight weight, Filter filter, HitCollector results) throws IOException; @@ -75,9 +82,33 @@ * @param collector * to receive hits * @throws BooleanQuery.TooManyClauses + * + * @deprecated use {@link #search(QueryWeight, Filter, Collector)} instead. */ void search(Weight weight, Filter filter, Collector collector) throws IOException; + /** + * Lower-level search API. + * + *

+ * {@link Collector#collect(int)} is called for every document.
+ * Collector-based access to remote indexes is discouraged. + * + *

+ * Applications should only use this if they need all of the matching + * documents. The high-level search API ({@link Searcher#search(Query)}) is + * usually more efficient, as it skips non-high-scoring hits. + * + * @param weight + * to match documents + * @param filter + * if non-null, used to permit documents to be collected. + * @param collector + * to receive hits + * @throws BooleanQuery.TooManyClauses + */ + void search(QueryWeight weight, Filter filter, Collector collector) throws IOException; + /** Frees resources associated with this Searcher. * Be careful not to call this method while you are still using objects * like {@link Hits}. @@ -86,7 +117,7 @@ /** Expert: Returns the number of documents containing term. * Called by search code to compute term weights. - * @see IndexReader#docFreq(Term) + * @see org.apache.lucene.index.IndexReader#docFreq(Term) */ int docFreq(Term term) throws IOException; @@ -98,7 +129,7 @@ /** Expert: Returns one greater than the largest possible document number. * Called by search code to compute term weights. - * @see IndexReader#maxDoc() + * @see org.apache.lucene.index.IndexReader#maxDoc() */ int maxDoc() throws IOException; @@ -110,12 +141,24 @@ *

Applications should usually call {@link Searcher#search(Query)} or * {@link Searcher#search(Query,Filter)} instead. * @throws BooleanQuery.TooManyClauses + * @deprecated use {@link #search(QueryWeight, Filter, int)} instead. */ TopDocs search(Weight weight, Filter filter, int n) throws IOException; + + /** Expert: Low-level search implementation. Finds the top n + * hits for query, applying filter if non-null. + * + *

Called by {@link Hits}. + * + *

Applications should usually call {@link Searcher#search(Query)} or + * {@link Searcher#search(Query,Filter)} instead. + * @throws BooleanQuery.TooManyClauses + */ + TopDocs search(QueryWeight weight, Filter filter, int n) throws IOException; /** Expert: Returns the stored fields of document i. * Called by {@link HitCollector} implementations. - * @see IndexReader#document(int) + * @see org.apache.lucene.index.IndexReader#document(int) * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ @@ -136,7 +179,7 @@ * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error * - * @see IndexReader#document(int, FieldSelector) + * @see org.apache.lucene.index.IndexReader#document(int, FieldSelector) * @see org.apache.lucene.document.Fieldable * @see org.apache.lucene.document.FieldSelector * @see org.apache.lucene.document.SetBasedFieldSelector @@ -159,10 +202,23 @@ * entire index. *

Applications should call {@link Searcher#explain(Query, int)}. * @throws BooleanQuery.TooManyClauses + * @deprecated use {@link #explain(QueryWeight, int)} instead. */ Explanation explain(Weight weight, int doc) throws IOException; + + /** Expert: low-level implementation method + * Returns an Explanation that describes how doc scored against + * weight. + * + *

This is intended to be used in developing Similarity implementations, + * and, for good performance, should not be displayed with every hit. + * Computing an explanation is as expensive as executing the query over the + * entire index. + *

Applications should call {@link Searcher#explain(Query, int)}. + * @throws BooleanQuery.TooManyClauses + */ + Explanation explain(QueryWeight weight, int doc) throws IOException; - // TODO: change the javadoc in 3.0 to remove the last NOTE section. /** Expert: Low-level search implementation with arbitrary sorting. Finds * the top n hits for query, applying * filter if non-null, and sorting the hits by the criteria in @@ -171,15 +227,23 @@ *

Applications should usually call {@link * Searcher#search(Query,Filter,Sort)} instead. * - * NOTE: currently, this method tracks document scores and sets them in - * the returned {@link FieldDoc}, however in 3.0 it will move to not track - * document scores. If document scores tracking is still needed, you can use - * {@link #search(Weight, Filter, Collector)} and pass in a - * {@link TopFieldCollector} instance. - * * @throws BooleanQuery.TooManyClauses + * @deprecated use {@link #search(QueryWeight, Filter, int, Sort)} instead. */ TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort) throws IOException; + + /** Expert: Low-level search implementation with arbitrary sorting. Finds + * the top n hits for query, applying + * filter if non-null, and sorting the hits by the criteria in + * sort. + * + *

Applications should usually call {@link + * Searcher#search(Query,Filter,Sort)} instead. + * + * @throws BooleanQuery.TooManyClauses + */ + TopFieldDocs search(QueryWeight weight, Filter filter, int n, Sort sort) + throws IOException; } Index: src/java/org/apache/lucene/search/PositiveScoresOnlyCollector.java =================================================================== --- src/java/org/apache/lucene/search/PositiveScoresOnlyCollector.java (revision 786579) +++ src/java/org/apache/lucene/search/PositiveScoresOnlyCollector.java (working copy) @@ -26,7 +26,6 @@ * {@link Collector} and makes sure only documents with * scores > 0 are collected. */ - public class PositiveScoresOnlyCollector extends Collector { final private Collector c; @@ -53,4 +52,8 @@ c.setScorer(this.scorer); } + public boolean acceptsDocsOutOfOrder() { + return c.acceptsDocsOutOfOrder(); + } + } Index: src/java/org/apache/lucene/search/PhraseScorer.java =================================================================== --- src/java/org/apache/lucene/search/PhraseScorer.java (revision 786579) +++ src/java/org/apache/lucene/search/PhraseScorer.java (working copy) @@ -19,7 +19,7 @@ import java.io.IOException; -import org.apache.lucene.index.*; +import org.apache.lucene.index.TermPositions; /** Expert: Scoring functionality for phrase queries. *
A document is considered matching if it contains the phrase-query terms @@ -32,7 +32,7 @@ * means a match. */ abstract class PhraseScorer extends Scorer { - private Weight weight; + private QueryWeight weight; protected byte[] norms; protected float value; @@ -43,7 +43,7 @@ private float freq; //prhase frequency in current doc as computed by phraseFreq(). - PhraseScorer(Weight weight, TermPositions[] tps, int[] offsets, + PhraseScorer(QueryWeight weight, TermPositions[] tps, int[] offsets, Similarity similarity, byte[] norms) { super(similarity); this.norms = norms; Index: src/java/org/apache/lucene/search/TimeLimitingCollector.java =================================================================== --- src/java/org/apache/lucene/search/TimeLimitingCollector.java (revision 786579) +++ src/java/org/apache/lucene/search/TimeLimitingCollector.java (working copy) @@ -216,4 +216,8 @@ collector.setScorer(scorer); } + public boolean acceptsDocsOutOfOrder() { + return collector.acceptsDocsOutOfOrder(); + } + } Index: src/java/org/apache/lucene/search/Query.java =================================================================== --- src/java/org/apache/lucene/search/Query.java (revision 786579) +++ src/java/org/apache/lucene/search/Query.java (working copy) @@ -80,24 +80,50 @@ return toString(""); } - /** Expert: Constructs an appropriate Weight implementation for this query. - * - *

Only implemented by primitive queries, which re-write to themselves. + /** + * Expert: Constructs an appropriate Weight implementation for this query. + * + *

+ * Only implemented by primitive queries, which re-write to themselves. + * @deprecated use {@link #createQueryWeight(Searcher)} instead. */ protected Weight createWeight(Searcher searcher) throws IOException { + return createQueryWeight(searcher); + } + + /** + * Expert: Constructs an appropriate {@link QueryWeight} implementation for + * this query. + * + *

+ * Only implemented by primitive queries, which re-write to themselves. + */ + public QueryWeight createQueryWeight(Searcher searcher) throws IOException { throw new UnsupportedOperationException(); } - /** Expert: Constructs and initializes a Weight for a top-level query. */ - public Weight weight(Searcher searcher) - throws IOException { + /** + * Expert: Constructs and initializes a Weight for a top-level query. + * + * @deprecated use {@link #queryWeight(Searcher)} instead. + */ + public Weight weight(Searcher searcher) throws IOException { + return queryWeight(searcher); + } + + /** + * Expert: Constructs and initializes a {@link QueryWeight} for a top-level + * query. + */ + public QueryWeight queryWeight(Searcher searcher) throws IOException { Query query = searcher.rewrite(this); - Weight weight = query.createWeight(searcher); + QueryWeight weight = query.createQueryWeight(searcher); float sum = weight.sumOfSquaredWeights(); float norm = getSimilarity(searcher).queryNorm(sum); weight.normalize(norm); return weight; } + /** Expert: called to re-write queries into primitive queries. For example, * a PrefixQuery will be rewritten into a BooleanQuery that consists @@ -106,6 +132,7 @@ public Query rewrite(IndexReader reader) throws IOException { return this; } + /** Expert: called when re-writing queries under MultiSearcher. * @@ -151,6 +178,7 @@ result.add((Query) it.next(), BooleanClause.Occur.SHOULD); return result; } + /** * Expert: adds all terms occuring in this query to the terms set. Only @@ -162,6 +190,7 @@ // needs to be implemented by query subclasses throw new UnsupportedOperationException(); } + /** Expert: merges the clauses of a set of BooleanQuery's into a single @@ -187,6 +216,7 @@ } return result; } + /** Expert: Returns the Similarity implementation to be used for this query. * Subclasses may override this method to specify their own Similarity @@ -199,7 +229,7 @@ /** Returns a clone of this query. */ public Object clone() { try { - return (Query)super.clone(); + return super.clone(); } catch (CloneNotSupportedException e) { throw new RuntimeException("Clone not supported: " + e.getMessage()); } Index: src/java/org/apache/lucene/search/BooleanQuery.java =================================================================== --- src/java/org/apache/lucene/search/BooleanQuery.java (revision 786579) +++ src/java/org/apache/lucene/search/BooleanQuery.java (working copy) @@ -30,7 +30,6 @@ */ public class BooleanQuery extends Query { - private static int maxClauseCount = 1024; /** Thrown when an attempt is made to add more than {@link @@ -173,7 +172,7 @@ /** Returns the list of clauses in this query. */ public List clauses() { return clauses; } - private class BooleanWeight implements Weight { + private class BooleanWeight extends QueryWeight { protected Similarity similarity; protected ArrayList weights; @@ -183,7 +182,7 @@ weights = new ArrayList(clauses.size()); for (int i = 0 ; i < clauses.size(); i++) { BooleanClause c = (BooleanClause)clauses.get(i); - weights.add(c.getQuery().createWeight(searcher)); + weights.add(c.getQuery().createQueryWeight(searcher)); } } @@ -194,7 +193,7 @@ float sum = 0.0f; for (int i = 0 ; i < weights.size(); i++) { BooleanClause c = (BooleanClause)clauses.get(i); - Weight w = (Weight)weights.get(i); + QueryWeight w = (QueryWeight)weights.get(i); // call sumOfSquaredWeights for all clauses in case of side effects float s = w.sumOfSquaredWeights(); // sum sub weights if (!c.isProhibited()) @@ -210,39 +209,13 @@ public void normalize(float norm) { norm *= getBoost(); // incorporate boost - for (int i = 0 ; i < weights.size(); i++) { - Weight w = (Weight)weights.get(i); + for (Iterator iter = weights.iterator(); iter.hasNext();) { + QueryWeight w = (QueryWeight) iter.next(); // normalize all clauses, (even if prohibited in case of side affects) w.normalize(norm); } } - /** - * @return Returns BooleanScorer2 that uses and provides advance(), and - * scores documents in document number order. - */ - public Scorer scorer(IndexReader reader) throws IOException { - // TODO (3.0): instantiate either BS or BS2, according to - // allowDocsOutOfOrder (basically, try to inline BS2.score(Collector)'s - // logic. - - BooleanScorer2 result = new BooleanScorer2(similarity, - minNrShouldMatch, - allowDocsOutOfOrder); - - for (int i = 0 ; i < weights.size(); i++) { - BooleanClause c = (BooleanClause)clauses.get(i); - Weight w = (Weight)weights.get(i); - Scorer subScorer = w.scorer(reader); - if (subScorer != null) - result.add(subScorer, c.isRequired(), c.isProhibited()); - else if (c.isRequired()) - return null; - } - - return result; - } - public Explanation explain(IndexReader reader, int doc) throws IOException { final int minShouldMatch = @@ -256,7 +229,7 @@ int shouldMatchCount = 0; for (int i = 0 ; i < weights.size(); i++) { BooleanClause c = (BooleanClause)clauses.get(i); - Weight w = (Weight)weights.get(i); + QueryWeight w = (QueryWeight)weights.get(i); Explanation e = w.explain(reader, doc); if (!c.isProhibited()) maxCoord++; if (e.isMatch()) { @@ -310,41 +283,101 @@ return result; } } + + public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) + throws IOException { + List required = new ArrayList(); + List prohibited = new ArrayList(); + List optional = new ArrayList(); + for (Iterator wIter = weights.iterator(), cIter = clauses.iterator(); wIter.hasNext();) { + QueryWeight w = (QueryWeight) wIter.next(); + BooleanClause c = (BooleanClause) cIter.next(); + Scorer subScorer = w.scorer(reader, true, false); + if (subScorer == null) { + return null; + } else if (c.isRequired()) { + required.add(subScorer); + } else if (c.isProhibited()) { + prohibited.add(subScorer); + } else { + optional.add(subScorer); + } + } + + // Check if we can return a BooleanScorer + scoreDocsInOrder |= !allowDocsOutOfOrder; // until it is removed, factor in the static setting. + if (!scoreDocsInOrder && topScorer && required.size() == 0 && prohibited.size() < 32) { + return new BooleanScorer(similarity, minNrShouldMatch, optional, prohibited); + } + + // Return a BooleanScorer2 + return new BooleanScorer2(similarity, minNrShouldMatch, required, prohibited, optional); + } + + public boolean scoresDocsOutOfOrder() { + int numProhibited = 0; + for (Iterator cIter = clauses.iterator(); cIter.hasNext();) { + BooleanClause c = (BooleanClause) cIter.next(); + if (c.isRequired()) { + return false; // BS2 (in-order) will be used by scorer() + } else if (c.isProhibited()) { + ++numProhibited; + } + } + + if (numProhibited > 32) { // cannot use BS + return false; + } + + // scorer() will return an out-of-order scorer if requested. + return true; + } + } - /** Whether hit docs may be collected out of docid order. */ - private static boolean allowDocsOutOfOrder = false; + /** + * Whether hit docs may be collected out of docid order. + * + * @deprecated this will not be needed anymore, as + * {@link QueryWeight#scoresDocsOutOfOrder()} is used. + */ + private static boolean allowDocsOutOfOrder = true; /** - * Expert: Indicates whether hit docs may be collected out of docid - * order. - * + * Expert: Indicates whether hit docs may be collected out of docid order. + * *

* Background: although the contract of the Scorer class requires that * documents be iterated in order of doc id, this was not true in early - * versions of Lucene. Many pieces of functionality in the current - * Lucene code base have undefined behavior if this contract is not - * upheld, but in some specific simple cases may be faster. (For - * example: disjunction queries with less than 32 prohibited clauses; - * This setting has no effect for other queries.) + * versions of Lucene. Many pieces of functionality in the current Lucene code + * base have undefined behavior if this contract is not upheld, but in some + * specific simple cases may be faster. (For example: disjunction queries with + * less than 32 prohibited clauses; This setting has no effect for other + * queries.) *

- * + * *

- * Specifics: By setting this option to true, docid N might be scored - * for a single segment before docid N-1. Across multiple segments, - * docs may be scored out of order regardless of this setting - it only - * applies to scoring a single segment. + * Specifics: By setting this option to true, docid N might be scored for a + * single segment before docid N-1. Across multiple segments, docs may be + * scored out of order regardless of this setting - it only applies to scoring + * a single segment. * * Being static, this setting is system wide. *

+ * + * @deprecated this is not needed anymore, as + * {@link QueryWeight#scoresDocsOutOfOrder()} is used. */ public static void setAllowDocsOutOfOrder(boolean allow) { allowDocsOutOfOrder = allow; - } - + } + /** * Whether hit docs may be collected out of docid order. + * * @see #setAllowDocsOutOfOrder(boolean) + * @deprecated this is not needed anymore, as + * {@link QueryWeight#scoresDocsOutOfOrder()} is used. */ public static boolean getAllowDocsOutOfOrder() { return allowDocsOutOfOrder; @@ -364,7 +397,7 @@ return getAllowDocsOutOfOrder(); } - protected Weight createWeight(Searcher searcher) throws IOException { + public QueryWeight createQueryWeight(Searcher searcher) throws IOException { return new BooleanWeight(searcher); } Index: src/java/org/apache/lucene/search/function/ValueSourceQuery.java =================================================================== --- src/java/org/apache/lucene/search/function/ValueSourceQuery.java (revision 786579) +++ src/java/org/apache/lucene/search/function/ValueSourceQuery.java (working copy) @@ -62,7 +62,7 @@ // no terms involved here } - private class ValueSourceWeight implements Weight { + private class ValueSourceWeight extends QueryWeight { Similarity similarity; float queryNorm; float queryWeight; @@ -93,14 +93,13 @@ queryWeight *= this.queryNorm; } - /*(non-Javadoc) @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader) */ - public Scorer scorer(IndexReader reader) throws IOException { + public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { return new ValueSourceScorer(similarity, reader, this); } /*(non-Javadoc) @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int) */ public Explanation explain(IndexReader reader, int doc) throws IOException { - return scorer(reader).explain(doc); + return scorer(reader, true, false).explain(doc); } } @@ -173,12 +172,10 @@ } } - /*(non-Javadoc) @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher) */ - protected Weight createWeight(Searcher searcher) { + public QueryWeight createQueryWeight(Searcher searcher) { return new ValueSourceQuery.ValueSourceWeight(searcher); } - /* (non-Javadoc) @see org.apache.lucene.search.Query#toString(java.lang.String) */ public String toString(String field) { return valSrc.toString() + ToStringUtils.boost(getBoost()); } Index: src/java/org/apache/lucene/search/function/CustomScoreQuery.java =================================================================== --- src/java/org/apache/lucene/search/function/CustomScoreQuery.java (revision 786579) +++ src/java/org/apache/lucene/search/function/CustomScoreQuery.java (working copy) @@ -24,10 +24,10 @@ import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWeight; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Searcher; import org.apache.lucene.search.Similarity; -import org.apache.lucene.search.Weight; import org.apache.lucene.util.ToStringUtils; /** @@ -271,19 +271,18 @@ //=========================== W E I G H T ============================ - private class CustomWeight implements Weight { + private class CustomWeight extends QueryWeight { Similarity similarity; - Weight subQueryWeight; - Weight[] valSrcWeights; + QueryWeight subQueryWeight; + QueryWeight[] valSrcWeights; boolean qStrict; public CustomWeight(Searcher searcher) throws IOException { this.similarity = getSimilarity(searcher); - this.subQueryWeight = subQuery.weight(searcher); - this.subQueryWeight = subQuery.weight(searcher); - this.valSrcWeights = new Weight[valSrcQueries.length]; + this.subQueryWeight = subQuery.queryWeight(searcher); + this.valSrcWeights = new QueryWeight[valSrcQueries.length]; for(int i = 0; i < valSrcQueries.length; i++) { - this.valSrcWeights[i] = valSrcQueries[i].createWeight(searcher); + this.valSrcWeights[i] = valSrcQueries[i].createQueryWeight(searcher); } this.qStrict = strict; } @@ -325,20 +324,28 @@ } } - /*(non-Javadoc) @see org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.IndexReader) */ - public Scorer scorer(IndexReader reader) throws IOException { - Scorer subQueryScorer = subQueryWeight.scorer(reader); + public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { + // Pass true for "scoresDocsInOrder", because we + // require in-order scoring, even if caller does not, + // since we call advance on the valSrcScorers. Pass + // false for "topScorer" because we will not invoke + // score(Collector) on these scorers: + Scorer subQueryScorer = subQueryWeight.scorer(reader, true, false); Scorer[] valSrcScorers = new Scorer[valSrcWeights.length]; for(int i = 0; i < valSrcScorers.length; i++) { - valSrcScorers[i] = valSrcWeights[i].scorer(reader); + valSrcScorers[i] = valSrcWeights[i].scorer(reader, true, false); } return new CustomScorer(similarity, reader, this, subQueryScorer, valSrcScorers); } - /*(non-Javadoc) @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int) */ public Explanation explain(IndexReader reader, int doc) throws IOException { - return scorer(reader).explain(doc); + return scorer(reader, true, false).explain(doc); } + + public boolean scoresDocsOutOfOrder() { + return false; + } + } @@ -435,8 +442,7 @@ } } - /*(non-Javadoc) @see org.apache.lucene.search.Query#createWeight(org.apache.lucene.search.Searcher) */ - protected Weight createWeight(Searcher searcher) throws IOException { + public QueryWeight createQueryWeight(Searcher searcher) throws IOException { return new CustomWeight(searcher); } Index: src/java/org/apache/lucene/search/PhraseQuery.java =================================================================== --- src/java/org/apache/lucene/search/PhraseQuery.java (revision 786579) +++ src/java/org/apache/lucene/search/PhraseQuery.java (working copy) @@ -106,7 +106,7 @@ return result; } - private class PhraseWeight implements Weight { + private class PhraseWeight extends QueryWeight { private Similarity similarity; private float value; private float idf; @@ -136,7 +136,7 @@ value = queryWeight * idf; // idf for document } - public Scorer scorer(IndexReader reader) throws IOException { + public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { if (terms.size() == 0) // optimize zero-term case return null; @@ -209,7 +209,7 @@ fieldExpl.setDescription("fieldWeight("+field+":"+query+" in "+doc+ "), product of:"); - Explanation tfExpl = scorer(reader).explain(doc); + Explanation tfExpl = scorer(reader, true, false).explain(doc); fieldExpl.addDetail(tfExpl); fieldExpl.addDetail(idfExpl); @@ -237,12 +237,12 @@ } } - protected Weight createWeight(Searcher searcher) throws IOException { + public QueryWeight createQueryWeight(Searcher searcher) throws IOException { if (terms.size() == 1) { // optimize one-term case Term term = (Term)terms.get(0); Query termQuery = new TermQuery(term); termQuery.setBoost(getBoost()); - return termQuery.createWeight(searcher); + return termQuery.createQueryWeight(searcher); } return new PhraseWeight(searcher); } Index: src/java/org/apache/lucene/search/HitCollectorWrapper.java =================================================================== --- src/java/org/apache/lucene/search/HitCollectorWrapper.java (revision 786579) +++ src/java/org/apache/lucene/search/HitCollectorWrapper.java (working copy) @@ -25,7 +25,9 @@ * Wrapper for ({@link HitCollector}) implementations, which * simply re-bases the incoming docID before calling {@link * HitCollector#collect}. - * @deprecated this class will be removed when {@link HitCollector} is removed. + * @deprecated this class will be removed when {@link + * HitCollector} is removed. Please migrate custom + * HitCollectors to the new {@link Collector} class. */ public class HitCollectorWrapper extends Collector { private HitCollector collector; @@ -47,4 +49,9 @@ public void setScorer(Scorer scorer) throws IOException { this.scorer = scorer; } + + public boolean acceptsDocsOutOfOrder() { + return false; + } + } Index: src/java/org/apache/lucene/search/BooleanScorer.java =================================================================== --- src/java/org/apache/lucene/search/BooleanScorer.java (revision 786579) +++ src/java/org/apache/lucene/search/BooleanScorer.java (working copy) @@ -92,9 +92,15 @@ public void setNextReader(IndexReader reader, int docBase) { // not needed by this implementation } + public void setScorer(Scorer scorer) throws IOException { this.scorer = scorer; } + + public boolean acceptsDocsOutOfOrder() { + return true; + } + } // An internal class which is used in score(Collector, int) for setting the Index: src/java/org/apache/lucene/search/TermQuery.java =================================================================== --- src/java/org/apache/lucene/search/TermQuery.java (revision 786579) +++ src/java/org/apache/lucene/search/TermQuery.java (working copy) @@ -31,7 +31,7 @@ public class TermQuery extends Query { private Term term; - private class TermWeight implements Weight { + private class TermWeight extends QueryWeight { private Similarity similarity; private float value; private float idf; @@ -60,14 +60,13 @@ value = queryWeight * idf; // idf for document } - public Scorer scorer(IndexReader reader) throws IOException { + public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { TermDocs termDocs = reader.termDocs(term); if (termDocs == null) return null; - return new TermScorer(this, termDocs, similarity, - reader.norms(term.field())); + return new TermScorer(this, termDocs, similarity, reader.norms(term.field())); } public Explanation explain(IndexReader reader, int doc) @@ -104,7 +103,7 @@ fieldExpl.setDescription("fieldWeight("+term+" in "+doc+ "), product of:"); - Explanation tfExpl = scorer(reader).explain(doc); + Explanation tfExpl = scorer(reader, true, false).explain(doc); fieldExpl.addDetail(tfExpl); fieldExpl.addDetail(idfExpl); @@ -142,7 +141,7 @@ /** Returns the term of this query. */ public Term getTerm() { return term; } - protected Weight createWeight(Searcher searcher) throws IOException { + public QueryWeight createQueryWeight(Searcher searcher) throws IOException { return new TermWeight(searcher); } Index: src/java/org/apache/lucene/search/MultiSearcher.java =================================================================== --- src/java/org/apache/lucene/search/MultiSearcher.java (revision 786579) +++ src/java/org/apache/lucene/search/MultiSearcher.java (working copy) @@ -35,11 +35,12 @@ * or {@link #search(Query,Filter)} methods. */ public class MultiSearcher extends Searcher { - /** - * Document Frequency cache acting as a Dummy-Searcher. - * This class is no full-fledged Searcher, but only supports - * the methods necessary to initialize Weights. - */ + + /** + * Document Frequency cache acting as a Dummy-Searcher. This class is no + * full-fledged Searcher, but only supports the methods necessary to + * initialize Weights. + */ private static class CachedDfSource extends Searcher { private Map dfMap; // Map from Terms to corresponding doc freqs private int maxDoc; // document count @@ -93,34 +94,28 @@ throw new UnsupportedOperationException(); } - public Explanation explain(Weight weight,int doc) { + public Explanation explain(QueryWeight weight,int doc) { throw new UnsupportedOperationException(); } - /** @deprecated use {@link #search(Weight, Filter, Collector)} instead. */ - public void search(Weight weight, Filter filter, HitCollector results) { + public void search(QueryWeight weight, Filter filter, Collector results) { throw new UnsupportedOperationException(); } - public void search(Weight weight, Filter filter, Collector collector) { + public TopDocs search(QueryWeight weight,Filter filter,int n) { throw new UnsupportedOperationException(); } - public TopDocs search(Weight weight,Filter filter,int n) { + public TopFieldDocs search(QueryWeight weight,Filter filter,int n,Sort sort) { throw new UnsupportedOperationException(); } - - public TopFieldDocs search(Weight weight,Filter filter,int n,Sort sort) { - throw new UnsupportedOperationException(); - } } - private Searchable[] searchables; private int[] starts; private int maxDoc = 0; - /** Creates a searcher which searches searchables. */ + /** Creates a searcher which searches searchers. */ public MultiSearcher(Searchable[] searchables) throws IOException { this.searchables = searchables; @@ -136,7 +131,7 @@ public Searchable[] getSearchables() { return searchables; } - + protected int[] getStarts() { return starts; } @@ -200,8 +195,8 @@ return maxDoc; } - public TopDocs search(Weight weight, Filter filter, int nDocs) - throws IOException { + public TopDocs search(QueryWeight weight, Filter filter, int nDocs) + throws IOException { HitQueue hq = new HitQueue(nDocs, false); int totalHits = 0; @@ -211,10 +206,10 @@ totalHits += docs.totalHits; // update totalHits ScoreDoc[] scoreDocs = docs.scoreDocs; for (int j = 0; j < scoreDocs.length; j++) { // merge scoreDocs into hq - ScoreDoc scoreDoc = scoreDocs[j]; + ScoreDoc scoreDoc = scoreDocs[j]; scoreDoc.doc += starts[i]; // convert doc if(!hq.insert(scoreDoc)) - break; // no more scores > minScore + break; // no more scores > minScore } } @@ -227,7 +222,7 @@ return new TopDocs(totalHits, scoreDocs, maxScore); } - public TopFieldDocs search (Weight weight, Filter filter, int n, Sort sort) + public TopFieldDocs search (QueryWeight weight, Filter filter, int n, Sort sort) throws IOException { FieldDocSortedHitQueue hq = null; int totalHits = 0; @@ -269,14 +264,7 @@ } // inherit javadoc - /** @deprecated use {@link #search(Weight, Filter, Collector)} instead. */ - public void search(Weight weight, Filter filter, final HitCollector results) - throws IOException { - search(weight, filter, new HitCollectorWrapper(results)); - } - - // inherit javadoc - public void search(Weight weight, Filter filter, final Collector collector) + public void search(QueryWeight weight, Filter filter, final Collector collector) throws IOException { for (int i = 0; i < searchables.length; i++) { @@ -292,6 +280,9 @@ public void setNextReader(IndexReader reader, int docBase) throws IOException { collector.setNextReader(reader, start + docBase); } + public boolean acceptsDocsOutOfOrder() { + return collector.acceptsDocsOutOfOrder(); + } }; searchables[i].search(weight, filter, hc); @@ -306,9 +297,9 @@ return queries[0].combine(queries); } - public Explanation explain(Weight weight, int doc) throws IOException { + public Explanation explain(QueryWeight weight, int doc) throws IOException { int i = subSearcher(doc); // find searcher index - return searchables[i].explain(weight,doc-starts[i]); // dispatch to searcher + return searchables[i].explain(weight, doc - starts[i]); // dispatch to searcher } /** @@ -326,7 +317,7 @@ * * @return rewritten queries */ - protected Weight createWeight(Query original) throws IOException { + protected QueryWeight createQueryWeight(Query original) throws IOException { // step 1 Query rewrittenQuery = rewrite(original); @@ -354,7 +345,7 @@ int numDocs = maxDoc(); CachedDfSource cacheSim = new CachedDfSource(dfMap, numDocs, getSimilarity()); - return rewrittenQuery.weight(cacheSim); + return rewrittenQuery.queryWeight(cacheSim); } } Index: src/java/org/apache/lucene/search/Hits.java =================================================================== --- src/java/org/apache/lucene/search/Hits.java (revision 786579) +++ src/java/org/apache/lucene/search/Hits.java (working copy) @@ -19,8 +19,8 @@ import java.io.IOException; import java.util.ConcurrentModificationException; +import java.util.Iterator; import java.util.Vector; -import java.util.Iterator; import org.apache.lucene.document.Document; import org.apache.lucene.index.CorruptIndexException; @@ -53,7 +53,7 @@ * */ public final class Hits { - private Weight weight; + private QueryWeight weight; private Searcher searcher; private Filter filter = null; private Sort sort = null; @@ -73,7 +73,7 @@ boolean debugCheckedForDeletions = false; // for test purposes. Hits(Searcher s, Query q, Filter f) throws IOException { - weight = q.weight(s); + weight = q.queryWeight(s); searcher = s; filter = f; nDeletions = countDeletions(s); @@ -82,7 +82,7 @@ } Hits(Searcher s, Query q, Filter f, Sort o) throws IOException { - weight = q.weight(s); + weight = q.queryWeight(s); searcher = s; filter = f; sort = o; Index: src/java/org/apache/lucene/search/QueryWrapperFilter.java =================================================================== --- src/java/org/apache/lucene/search/QueryWrapperFilter.java (revision 786579) +++ src/java/org/apache/lucene/search/QueryWrapperFilter.java (working copy) @@ -61,15 +61,18 @@ public void setNextReader(IndexReader reader, int docBase) { base = docBase; } + public boolean acceptsDocsOutOfOrder() { + return true; + } }); return bits; } public DocIdSet getDocIdSet(final IndexReader reader) throws IOException { - final Weight weight = query.weight(new IndexSearcher(reader)); + final QueryWeight weight = query.queryWeight(new IndexSearcher(reader)); return new DocIdSet() { public DocIdSetIterator iterator() throws IOException { - return weight.scorer(reader); + return weight.scorer(reader, true, false); } }; } Index: src/java/org/apache/lucene/search/TopFieldCollector.java =================================================================== --- src/java/org/apache/lucene/search/TopFieldCollector.java (revision 786579) +++ src/java/org/apache/lucene/search/TopFieldCollector.java (working copy) @@ -135,6 +135,11 @@ } } } + + public boolean acceptsDocsOutOfOrder() { + return true; + } + } /* @@ -240,6 +245,11 @@ } } } + + public boolean acceptsDocsOutOfOrder() { + return true; + } + } /* @@ -341,8 +351,12 @@ comparator.setBottom(bottom.slot); } } + } + + public boolean acceptsDocsOutOfOrder() { + return true; + } - } } /* @@ -489,6 +503,11 @@ } } } + + public boolean acceptsDocsOutOfOrder() { + return true; + } + } /* @@ -632,6 +651,11 @@ } } } + + public boolean acceptsDocsOutOfOrder() { + return true; + } + } /* @@ -781,6 +805,11 @@ this.scorer = scorer; super.setScorer(scorer); } + + public boolean acceptsDocsOutOfOrder() { + return true; + } + } private static final ScoreDoc[] EMPTY_SCOREDOCS = new ScoreDoc[0]; @@ -925,4 +954,8 @@ return new TopFieldDocs(totalHits, results, ((FieldValueHitQueue) pq).getFields(), maxScore); } + public boolean acceptsDocsOutOfOrder() { + return false; + } + } Index: src/java/org/apache/lucene/search/ParallelMultiSearcher.java =================================================================== --- src/java/org/apache/lucene/search/ParallelMultiSearcher.java (revision 786579) +++ src/java/org/apache/lucene/search/ParallelMultiSearcher.java (working copy) @@ -33,11 +33,11 @@ private Searchable[] searchables; private int[] starts; - /** Creates a searcher which searches searchables. */ + /** Creates a searchable which searches searchables. */ public ParallelMultiSearcher(Searchable[] searchables) throws IOException { super(searchables); - this.searchables=searchables; - this.starts=getStarts(); + this.searchables = searchables; + this.starts = getStarts(); } /** @@ -52,24 +52,16 @@ * Searchable, waits for each search to complete and merge * the results back together. */ - public TopDocs search(Weight weight, Filter filter, int nDocs) + public TopDocs search(QueryWeight weight, Filter filter, int nDocs) throws IOException { HitQueue hq = new HitQueue(nDocs, false); int totalHits = 0; MultiSearcherThread[] msta = new MultiSearcherThread[searchables.length]; - for (int i = 0; i < searchables.length; i++) { // search each searcher + for (int i = 0; i < searchables.length; i++) { // search each searchable // Assume not too many searchables and cost of creating a thread is by far inferior to a search - msta[i] = - new MultiSearcherThread( - searchables[i], - weight, - filter, - nDocs, - hq, - i, - starts, - "MultiSearcher thread #" + (i + 1)); + msta[i] = new MultiSearcherThread(searchables[i], weight, filter, nDocs, + hq, i, starts, "MultiSearcher thread #" + (i + 1)); msta[i].start(); } @@ -105,25 +97,16 @@ * Searchable, waits for each search to complete and merges * the results back together. */ - public TopFieldDocs search(Weight weight, Filter filter, int nDocs, Sort sort) + public TopFieldDocs search(QueryWeight weight, Filter filter, int nDocs, Sort sort) throws IOException { // don't specify the fields - we'll wait to do this until we get results FieldDocSortedHitQueue hq = new FieldDocSortedHitQueue (null, nDocs); int totalHits = 0; MultiSearcherThread[] msta = new MultiSearcherThread[searchables.length]; - for (int i = 0; i < searchables.length; i++) { // search each searcher + for (int i = 0; i < searchables.length; i++) { // search each searchable // Assume not too many searchables and cost of creating a thread is by far inferior to a search - msta[i] = - new MultiSearcherThread( - searchables[i], - weight, - filter, - nDocs, - hq, - sort, - i, - starts, - "MultiSearcher thread #" + (i + 1)); + msta[i] = new MultiSearcherThread(searchables[i], weight, filter, nDocs, + hq, sort, i, starts, "MultiSearcher thread #" + (i + 1)); msta[i].start(); } @@ -156,28 +139,6 @@ } /** Lower-level search API. - * - *

{@link HitCollector#collect(int,float)} is called for every matching - * document. - * - *

Applications should only use this if they need all of the - * matching documents. The high-level search API ({@link - * Searcher#search(Query)}) is usually more efficient, as it skips - * non-high-scoring hits. - * - * @param weight to match documents - * @param filter if non-null, a bitset used to eliminate some documents - * @param results to receive hits - * - * @todo parallelize this one too - * @deprecated use {@link #search(Weight, Filter, Collector)} instead. - */ - public void search(Weight weight, Filter filter, final HitCollector results) - throws IOException { - search(weight, filter, new HitCollectorWrapper(results)); - } - - /** Lower-level search API. * *

{@link Collector#collect(int)} is called for every matching document. * @@ -192,7 +153,7 @@ * * @todo parallelize this one too */ - public void search(Weight weight, Filter filter, final Collector collector) + public void search(QueryWeight weight, Filter filter, final Collector collector) throws IOException { for (int i = 0; i < searchables.length; i++) { @@ -205,10 +166,12 @@ public void collect(int doc) throws IOException { collector.collect(doc); } - public void setNextReader(IndexReader reader, int docBase) throws IOException { collector.setNextReader(reader, start + docBase); } + public boolean acceptsDocsOutOfOrder() { + return collector.acceptsDocsOutOfOrder(); + } }; searchables[i].search(weight, filter, hc); @@ -231,7 +194,7 @@ class MultiSearcherThread extends Thread { private Searchable searchable; - private Weight weight; + private QueryWeight weight; private Filter filter; private int nDocs; private TopDocs docs; @@ -241,15 +204,8 @@ private IOException ioe; private Sort sort; - public MultiSearcherThread( - Searchable searchable, - Weight weight, - Filter filter, - int nDocs, - HitQueue hq, - int i, - int[] starts, - String name) { + public MultiSearcherThread(Searchable searchable, QueryWeight weight, Filter filter, + int nDocs, HitQueue hq, int i, int[] starts, String name) { super(name); this.searchable = searchable; this.weight = weight; @@ -260,16 +216,9 @@ this.starts = starts; } - public MultiSearcherThread( - Searchable searchable, - Weight weight, - Filter filter, - int nDocs, - FieldDocSortedHitQueue hq, - Sort sort, - int i, - int[] starts, - String name) { + public MultiSearcherThread(Searchable searchable, QueryWeight weight, + Filter filter, int nDocs, FieldDocSortedHitQueue hq, Sort sort, int i, + int[] starts, String name) { super(name); this.searchable = searchable; this.weight = weight; @@ -298,7 +247,7 @@ TopFieldDocs docsFields = (TopFieldDocs) docs; // If one of the Sort fields is FIELD_DOC, need to fix its values, so that // it will break ties by doc Id properly. Otherwise, it will compare to - // 'relative' doc Ids, that belong to two different searchers. + // 'relative' doc Ids, that belong to two different searchables. for (int j = 0; j < docsFields.fields.length; j++) { if (docsFields.fields[j].getType() == SortField.DOC) { // iterate over the score docs and change their fields value Index: src/java/org/apache/lucene/search/IndexSearcher.java =================================================================== --- src/java/org/apache/lucene/search/IndexSearcher.java (revision 786579) +++ src/java/org/apache/lucene/search/IndexSearcher.java (working copy) @@ -161,38 +161,33 @@ } // inherit javadoc - public TopDocs search(Weight weight, Filter filter, final int nDocs) - throws IOException { + public TopDocs search(QueryWeight weight, Filter filter, final int nDocs) throws IOException { - if (nDocs <= 0) // null might be returned from hq.top() below. + if (nDocs <= 0) { throw new IllegalArgumentException("nDocs must be > 0"); + } - // TODO: The following should be changed to first obtain a Scorer and then ask it - // if it's going to return in-order or out-of-order docs, and create TSDC - // accordingly. - TopScoreDocCollector collector = TopScoreDocCollector.create(nDocs, false); + TopScoreDocCollector collector = TopScoreDocCollector.create(nDocs, !weight.scoresDocsOutOfOrder()); search(weight, filter, collector); return collector.topDocs(); } - // inherit javadoc - public TopFieldDocs search(Weight weight, Filter filter, final int nDocs, - Sort sort) - throws IOException { + public TopFieldDocs search(QueryWeight weight, Filter filter, + final int nDocs, Sort sort) throws IOException { return search(weight, filter, nDocs, sort, true); } /** - * Just like {@link #search(Weight, Filter, int, Sort)}, but you choose + * Just like {@link #search(QueryWeight, Filter, int, Sort)}, but you choose * whether or not the fields in the returned {@link FieldDoc} instances should * be set by specifying fillFields.
* NOTE: currently, this method tracks document scores and sets them in * the returned {@link FieldDoc}, however in 3.0 it will move to not track * document scores. If document scores tracking is still needed, you can use - * {@link #search(Weight, Filter, Collector)} and pass in a + * {@link #search(QueryWeight, Filter, Collector)} and pass in a * {@link TopFieldCollector} instance. */ - public TopFieldDocs search(Weight weight, Filter filter, final int nDocs, + public TopFieldDocs search(QueryWeight weight, Filter filter, final int nDocs, Sort sort, boolean fillFields) throws IOException { @@ -222,51 +217,51 @@ TopDocCollector collector = new TopFieldDocCollector(reader, sort, nDocs); HitCollectorWrapper hcw = new HitCollectorWrapper(collector); hcw.setNextReader(reader, 0); - doSearch(reader, weight, filter, hcw); + if (filter == null) { + Scorer scorer = weight.scorer(reader, true, true); + scorer.score(hcw); + } else { + searchWithFilter(reader, weight, filter, hcw); + } return (TopFieldDocs) collector.topDocs(); } - // Search each sub-reader - // TODO: The following should be changed to first obtain a Scorer and then ask it - // if it's going to return in-order or out-of-order docs, and create TSDC - // accordingly. + TopFieldCollector collector = TopFieldCollector.create(sort, nDocs, - fillFields, fieldSortDoTrackScores, fieldSortDoMaxScore, false); + fillFields, fieldSortDoTrackScores, fieldSortDoMaxScore, !weight.scoresDocsOutOfOrder()); search(weight, filter, collector); return (TopFieldDocs) collector.topDocs(); } - // inherit javadoc - /** @deprecated use {@link #search(Weight, Filter, Collector)} instead. */ - public void search(Weight weight, Filter filter, HitCollector results) + public void search(QueryWeight weight, Filter filter, Collector collector) throws IOException { - search(weight, filter, new HitCollectorWrapper(results)); - } - - // inherit javadoc - public void search(Weight weight, Filter filter, Collector collector) - throws IOException { - for (int i = 0; i < subReaders.length; i++) { // search each subreader - collector.setNextReader(subReaders[i], docStarts[i]); - doSearch(subReaders[i], weight, filter, collector); + if (filter == null) { + for (int i = 0; i < subReaders.length; i++) { // search each subreader + collector.setNextReader(subReaders[i], docStarts[i]); + Scorer scorer = weight.scorer(subReaders[i], !collector.acceptsDocsOutOfOrder(), true); + scorer.score(collector); + } + } else { + for (int i = 0; i < subReaders.length; i++) { // search each subreader + collector.setNextReader(subReaders[i], docStarts[i]); + searchWithFilter(subReaders[i], weight, filter, collector); + } } } - - private void doSearch(IndexReader reader, Weight weight, Filter filter, - final Collector collector) throws IOException { - Scorer scorer = weight.scorer(reader); - if (scorer == null) - return; + private void searchWithFilter(IndexReader reader, QueryWeight weight, + final Filter filter, final Collector collector) throws IOException { - int docID = scorer.docID(); - assert docID == -1 || docID == DocIdSetIterator.NO_MORE_DOCS; + assert filter != null; - if (filter == null) { - scorer.score(collector); + Scorer scorer = weight.scorer(reader, true, false); + if (scorer == null) { return; } + int docID = scorer.docID(); + assert docID == -1 || docID == DocIdSetIterator.NO_MORE_DOCS; + // CHECKME: use ConjunctionScorer here? DocIdSetIterator filterIter = filter.getDocIdSet(reader).iterator(); @@ -300,7 +295,7 @@ return query; } - public Explanation explain(Weight weight, int doc) throws IOException { + public Explanation explain(QueryWeight weight, int doc) throws IOException { return weight.explain(reader, doc); } Index: src/java/org/apache/lucene/search/ExactPhraseScorer.java =================================================================== --- src/java/org/apache/lucene/search/ExactPhraseScorer.java (revision 786579) +++ src/java/org/apache/lucene/search/ExactPhraseScorer.java (working copy) @@ -22,8 +22,8 @@ final class ExactPhraseScorer extends PhraseScorer { - ExactPhraseScorer(Weight weight, TermPositions[] tps, int[] offsets, Similarity similarity, - byte[] norms) { + ExactPhraseScorer(QueryWeight weight, TermPositions[] tps, int[] offsets, + Similarity similarity, byte[] norms) { super(weight, tps, offsets, similarity, norms); } @@ -43,13 +43,13 @@ while (first.position < last.position) { // scan forward in first do { if (!first.nextPosition()) - return (float)freq; + return freq; } while (first.position < last.position); firstToLast(); } freq++; // all equal: a match } while (last.nextPosition()); - return (float)freq; + return freq; } } Index: src/java/org/apache/lucene/search/TermScorer.java =================================================================== --- src/java/org/apache/lucene/search/TermScorer.java (revision 786579) +++ src/java/org/apache/lucene/search/TermScorer.java (working copy) @@ -27,7 +27,7 @@ private static final float[] SIM_NORM_DECODER = Similarity.getNormDecoder(); - private Weight weight; + private QueryWeight weight; private TermDocs termDocs; private byte[] norms; private float weightValue; @@ -41,13 +41,41 @@ private static final int SCORE_CACHE_SIZE = 32; private float[] scoreCache = new float[SCORE_CACHE_SIZE]; - /** Construct a TermScorer. - * @param weight The weight of the Term in the query. - * @param td An iterator over the documents matching the Term. - * @param similarity The Similarity implementation to be used for score computations. - * @param norms The field norms of the document fields for the Term. + /** + * Construct a TermScorer. + * + * @param weight + * The weight of the Term in the query. + * @param td + * An iterator over the documents matching the Term. + * @param similarity + * The Similarity implementation to be used for score + * computations. + * @param norms + * The field norms of the document fields for the Term. + * + * @deprecated use delete in 3.0, kept around for TestTermScorer in tag which + * creates TermScorer directly, and cannot pass in a QueryWeight + * object. */ - TermScorer(Weight weight, TermDocs td, Similarity similarity, + TermScorer(Weight weight, TermDocs td, Similarity similarity, byte[] norms) { + this(new QueryWeightWrapper(weight), td, similarity, norms); + } + + /** + * Construct a TermScorer. + * + * @param weight + * The weight of the Term in the query. + * @param td + * An iterator over the documents matching the Term. + * @param similarity + * The Similarity implementation to be used for score + * computations. + * @param norms + * The field norms of the document fields for the Term. + */ + TermScorer(QueryWeight weight, TermDocs td, Similarity similarity, byte[] norms) { super(similarity); this.weight = weight; @@ -194,7 +222,7 @@ * @param doc The document number for the explanation. */ public Explanation explain(int doc) throws IOException { - TermQuery query = (TermQuery)weight.getQuery(); + TermQuery query = (TermQuery) weight.getQuery(); Explanation tfExplanation = new Explanation(); int tf = 0; while (pointer < pointerMax) { Index: src/java/org/apache/lucene/search/Weight.java =================================================================== --- src/java/org/apache/lucene/search/Weight.java (revision 786579) +++ src/java/org/apache/lucene/search/Weight.java (working copy) @@ -40,6 +40,8 @@ * At this point the weighting is complete. *

  • A Scorer is constructed by {@link #scorer(IndexReader)}. * + * + * @deprecated use {@link QueryWeight} instead. */ public interface Weight extends java.io.Serializable { /** The query that this concerns. */ Index: src/java/org/apache/lucene/index/DocumentsWriter.java =================================================================== --- src/java/org/apache/lucene/index/DocumentsWriter.java (revision 786579) +++ src/java/org/apache/lucene/index/DocumentsWriter.java (working copy) @@ -17,28 +17,28 @@ * limitations under the License. */ +import java.io.IOException; +import java.io.PrintStream; +import java.text.NumberFormat; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; + import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.Document; -import org.apache.lucene.search.Similarity; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; -import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.QueryWeight; import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.Weight; +import org.apache.lucene.search.Similarity; +import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; -import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.ArrayUtil; -import java.io.IOException; -import java.io.PrintStream; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.HashMap; -import java.util.HashSet; -import java.util.ArrayList; -import java.util.Map.Entry; -import java.text.NumberFormat; - /** * This class accepts multiple added documents and directly * writes a single segment file. It does this more @@ -172,7 +172,7 @@ void setNext(DocWriter next) { this.next = next; } - }; + } /** * The IndexingChain must define the {@link #getChain(DocumentsWriter)} method @@ -303,7 +303,7 @@ synchronized void setAllowMinus1Position() { for(int i=0;i= limit) @@ -1144,7 +1144,7 @@ /* Initial chunks size of the shared byte[] blocks used to store postings data */ final static int BYTE_BLOCK_SHIFT = 15; - final static int BYTE_BLOCK_SIZE = (int) (1 << BYTE_BLOCK_SHIFT); + final static int BYTE_BLOCK_SIZE = 1 << BYTE_BLOCK_SHIFT; final static int BYTE_BLOCK_MASK = BYTE_BLOCK_SIZE - 1; final static int BYTE_BLOCK_NOT_MASK = ~BYTE_BLOCK_MASK; @@ -1187,7 +1187,7 @@ /* Initial chunks size of the shared int[] blocks used to store postings data */ final static int INT_BLOCK_SHIFT = 13; - final static int INT_BLOCK_SIZE = (int) (1 << INT_BLOCK_SHIFT); + final static int INT_BLOCK_SIZE = 1 << INT_BLOCK_SHIFT; final static int INT_BLOCK_MASK = INT_BLOCK_SIZE - 1; private ArrayList freeIntBlocks = new ArrayList(); @@ -1234,7 +1234,7 @@ /* Initial chunk size of the shared char[] blocks used to store term text */ final static int CHAR_BLOCK_SHIFT = 14; - final static int CHAR_BLOCK_SIZE = (int) (1 << CHAR_BLOCK_SHIFT); + final static int CHAR_BLOCK_SIZE = 1 << CHAR_BLOCK_SHIFT; final static int CHAR_BLOCK_MASK = CHAR_BLOCK_SIZE - 1; final static int MAX_TERM_LENGTH = CHAR_BLOCK_SIZE-1; @@ -1283,7 +1283,7 @@ void balanceRAM() { // We flush when we've used our target usage - final long flushTrigger = (long) ramBufferSize; + final long flushTrigger = ramBufferSize; if (numBytesAlloc > freeTrigger) { Index: contrib/remote/src/java/org/apache/lucene/search/RemoteSearchable.java =================================================================== --- contrib/remote/src/java/org/apache/lucene/search/RemoteSearchable.java (revision 786579) +++ contrib/remote/src/java/org/apache/lucene/search/RemoteSearchable.java (working copy) @@ -48,11 +48,16 @@ /** @deprecated use {@link #search(Weight, Filter, Collector)} instead. */ public void search(Weight weight, Filter filter, HitCollector results) throws IOException { - local.search(weight, filter, results); + search(new QueryWeightWrapper(weight), filter, new HitCollectorWrapper(results)); } public void search(Weight weight, Filter filter, Collector results) throws IOException { + search(new QueryWeightWrapper(weight), filter, results); + } + + public void search(QueryWeight weight, Filter filter, Collector results) + throws IOException { local.search(weight, filter, results); } @@ -74,12 +79,20 @@ } public TopDocs search(Weight weight, Filter filter, int n) throws IOException { + return search(new QueryWeightWrapper(weight), filter, n); + } + + public TopDocs search(QueryWeight weight, Filter filter, int n) throws IOException { return local.search(weight, filter, n); } - - public TopFieldDocs search (Weight weight, Filter filter, int n, Sort sort) + public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort) throws IOException { + return search(new QueryWeightWrapper(weight), filter, n, sort); + } + + public TopFieldDocs search(QueryWeight weight, Filter filter, int n, Sort sort) + throws IOException { return local.search (weight, filter, n, sort); } @@ -96,6 +109,10 @@ } public Explanation explain(Weight weight, int doc) throws IOException { + return explain(new QueryWeightWrapper(weight), doc); + } + + public Explanation explain(QueryWeight weight, int doc) throws IOException { return local.explain(weight, doc); } Index: contrib/miscellaneous/src/test/org/apache/lucene/index/TestFieldNormModifier.java =================================================================== --- contrib/miscellaneous/src/test/org/apache/lucene/index/TestFieldNormModifier.java (revision 786579) +++ contrib/miscellaneous/src/test/org/apache/lucene/index/TestFieldNormModifier.java (working copy) @@ -146,6 +146,9 @@ public void setScorer(Scorer scorer) throws IOException { this.scorer = scorer; } + public boolean acceptsDocsOutOfOrder() { + return true; + } }); searcher.close(); @@ -174,6 +177,9 @@ public void setScorer(Scorer scorer) throws IOException { this.scorer = scorer; } + public boolean acceptsDocsOutOfOrder() { + return true; + } }); searcher.close(); @@ -219,6 +225,9 @@ public void setScorer(Scorer scorer) throws IOException { this.scorer = scorer; } + public boolean acceptsDocsOutOfOrder() { + return true; + } }); searcher.close(); Index: contrib/miscellaneous/src/test/org/apache/lucene/misc/TestLengthNormModifier.java =================================================================== --- contrib/miscellaneous/src/test/org/apache/lucene/misc/TestLengthNormModifier.java (revision 786579) +++ contrib/miscellaneous/src/test/org/apache/lucene/misc/TestLengthNormModifier.java (working copy) @@ -153,6 +153,9 @@ public void setScorer(Scorer scorer) throws IOException { this.scorer = scorer; } + public boolean acceptsDocsOutOfOrder() { + return true; + } }); searcher.close(); @@ -187,6 +190,9 @@ public void setScorer(Scorer scorer) throws IOException { this.scorer = scorer; } + public boolean acceptsDocsOutOfOrder() { + return true; + } }); searcher.close();