terms, IndexSearcher searcher) throws IOException {
+ @Override public IDFExplanation idfExplain(PerReaderTermState[] terms, IndexSearcher searcher) throws IOException {
return new IDFExplanation() {
@Override
public float getIdf() {
Index: lucene/src/java/org/apache/lucene/search/ScoringRewrite.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/ScoringRewrite.java (revision 1086097)
+++ lucene/src/java/org/apache/lucene/search/ScoringRewrite.java (working copy)
@@ -151,12 +151,12 @@
if (e < 0 ) {
// duplicate term: update docFreq
final int pos = (-e)-1;
- array.termState[pos].register(state, readerContext.ord, termsEnum.docFreq());
+ array.termState[pos].register(state, readerContext.ord, termsEnum.docFreq(), termsEnum.totalTermFreq());
assert array.boost[pos] == boostAtt.getBoost() : "boost should be equal in all segment TermsEnums";
} else {
// new entry: we populate the entry initially
array.boost[e] = boostAtt.getBoost();
- array.termState[e] = new PerReaderTermState(topReaderContext, state, readerContext.ord, termsEnum.docFreq());
+ array.termState[e] = new PerReaderTermState(topReaderContext, state, readerContext.ord, termsEnum.docFreq(), termsEnum.totalTermFreq());
ScoringRewrite.this.checkMaxClauseCount(terms.size());
}
return true;
Index: lucene/src/java/org/apache/lucene/search/Similarity.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/Similarity.java (revision 1086097)
+++ lucene/src/java/org/apache/lucene/search/Similarity.java (working copy)
@@ -19,12 +19,11 @@
import java.io.IOException;
-import java.util.Collection;
import org.apache.lucene.index.FieldInvertState;
-import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.search.Explanation.IDFExplanation;
-import org.apache.lucene.util.SmallFloat;
+import org.apache.lucene.util.PerReaderTermState;
/**
@@ -34,493 +33,6 @@
* Overriding computation of these components is a convenient
* way to alter Lucene scoring.
*
- * Suggested reading:
- *
- * Introduction To Information Retrieval, Chapter 6.
- *
- *
The following describes how Lucene scoring evolves from
- * underlying information retrieval models to (efficient) implementation.
- * We first brief on VSM Score,
- * then derive from it Lucene's Conceptual Scoring Formula,
- * from which, finally, evolves Lucene's Practical Scoring Function
- * (the latter is connected directly with Lucene classes and methods).
- *
- *
Lucene combines
- *
- * Boolean model (BM) of Information Retrieval
- * with
- *
- * Vector Space Model (VSM) of Information Retrieval -
- * documents "approved" by BM are scored by VSM.
- *
- *
In VSM, documents and queries are represented as
- * weighted vectors in a multi-dimensional space,
- * where each distinct index term is a dimension,
- * and weights are
- * Tf-idf values.
- *
- *
VSM does not require weights to be Tf-idf values,
- * but Tf-idf values are believed to produce search results of high quality,
- * and so Lucene is using Tf-idf.
- * Tf and Idf are described in more detail below,
- * but for now, for completion, let's just say that
- * for given term t and document (or query) x,
- * Tf(t,x) varies with the number of occurrences of term t in x
- * (when one increases so does the other) and
- * idf(t) similarly varies with the inverse of the
- * number of index documents containing term t.
- *
- *
VSM score of document d for query q is the
- *
- * Cosine Similarity
- * of the weighted query vectors V(q) and V(d):
- *
- *
- *
- *
- *
- *
- *
- *
- * |
- * cosine-similarity(q,d) =
- * |
- *
- *
- * | V(q) · V(d) |
- * | ––––––––– |
- * | |V(q)| |V(d)| |
- *
- * |
- *
- *
- * |
- *
- * |
- * |
- * VSM Score
- * |
- *
- *
- *
- *
- * Where V(q) · V(d) is the
- * dot product
- * of the weighted vectors,
- * and |V(q)| and |V(d)| are their
- * Euclidean norms.
- *
- * Note: the above equation can be viewed as the dot product of
- * the normalized weighted vectors, in the sense that dividing
- * V(q) by its euclidean norm is normalizing it to a unit vector.
- *
- *
Lucene refines VSM score for both search quality and usability:
- *
- * - Normalizing V(d) to the unit vector is known to be problematic in that
- * it removes all document length information.
- * For some documents removing this info is probably ok,
- * e.g. a document made by duplicating a certain paragraph 10 times,
- * especially if that paragraph is made of distinct terms.
- * But for a document which contains no duplicated paragraphs,
- * this might be wrong.
- * To avoid this problem, a different document length normalization
- * factor is used, which normalizes to a vector equal to or larger
- * than the unit vector: doc-len-norm(d).
- *
- *
- * - At indexing, users can specify that certain documents are more
- * important than others, by assigning a document boost.
- * For this, the score of each document is also multiplied by its boost value
- * doc-boost(d).
- *
- *
- * - Lucene is field based, hence each query term applies to a single
- * field, document length normalization is by the length of the certain field,
- * and in addition to document boost there are also document fields boosts.
- *
- *
- * - The same field can be added to a document during indexing several times,
- * and so the boost of that field is the multiplication of the boosts of
- * the separate additions (or parts) of that field within the document.
- *
- *
- * - At search time users can specify boosts to each query, sub-query, and
- * each query term, hence the contribution of a query term to the score of
- * a document is multiplied by the boost of that query term query-boost(q).
- *
- *
- * - A document may match a multi term query without containing all
- * the terms of that query (this is correct for some of the queries),
- * and users can further reward documents matching more query terms
- * through a coordination factor, which is usually larger when
- * more terms are matched: coord-factor(q,d).
- *
- *
- *
- * Under the simplifying assumption of a single field in the index,
- * we get Lucene's Conceptual scoring formula:
- *
- *
- *
- *
- *
- *
- *
- *
- * |
- * score(q,d) =
- * coord-factor(q,d) ·
- * query-boost(q) ·
- * |
- *
- *
- * | V(q) · V(d) |
- * | ––––––––– |
- * | |V(q)| |
- *
- * |
- *
- * · doc-len-norm(d)
- * · doc-boost(d)
- * |
- *
- *
- * |
- *
- * |
- * |
- * Lucene Conceptual Scoring Formula
- * |
- *
- *
- *
- * The conceptual formula is a simplification in the sense that (1) terms and documents
- * are fielded and (2) boosts are usually per query term rather than per query.
- *
- *
We now describe how Lucene implements this conceptual scoring formula, and
- * derive from it Lucene's Practical Scoring Function.
- *
- *
For efficient score computation some scoring components
- * are computed and aggregated in advance:
- *
- *
- * - Query-boost for the query (actually for each query term)
- * is known when search starts.
- *
- *
- * - Query Euclidean norm |V(q)| can be computed when search starts,
- * as it is independent of the document being scored.
- * From search optimization perspective, it is a valid question
- * why bother to normalize the query at all, because all
- * scored documents will be multiplied by the same |V(q)|,
- * and hence documents ranks (their order by score) will not
- * be affected by this normalization.
- * There are two good reasons to keep this normalization:
- *
- * - Recall that
- *
- * Cosine Similarity can be used find how similar
- * two documents are. One can use Lucene for e.g.
- * clustering, and use a document as a query to compute
- * its similarity to other documents.
- * In this use case it is important that the score of document d3
- * for query d1 is comparable to the score of document d3
- * for query d2. In other words, scores of a document for two
- * distinct queries should be comparable.
- * There are other applications that may require this.
- * And this is exactly what normalizing the query vector V(q)
- * provides: comparability (to a certain extent) of two or more queries.
- *
- *
- * - Applying query normalization on the scores helps to keep the
- * scores around the unit vector, hence preventing loss of score data
- * because of floating point precision limitations.
- *
- *
- *
- *
- * - Document length norm doc-len-norm(d) and document
- * boost doc-boost(d) are known at indexing time.
- * They are computed in advance and their multiplication
- * is saved as a single value in the index: norm(d).
- * (In the equations below, norm(t in d) means norm(field(t) in doc d)
- * where field(t) is the field associated with term t.)
- *
- *
- *
- * Lucene's Practical Scoring Function is derived from the above.
- * The color codes demonstrate how it relates
- * to those of the conceptual formula:
- *
- *
- *
- * |
- *
- * |
- * |
- * Lucene Practical Scoring Function
- * |
- *
- *
- * where
- *
- * -
- *
- * tf(t in d)
- * correlates to the term's frequency,
- * defined as the number of times term t appears in the currently scored document d.
- * Documents that have more occurrences of a given term receive a higher score.
- * Note that tf(t in q) is assumed to be 1 and therefore it does not appear in this equation,
- * However if a query contains twice the same term, there will be
- * two term-queries with that same term and hence the computation would still be correct (although
- * not very efficient).
- * The default computation for tf(t in d) in
- * {@link org.apache.lucene.search.DefaultSimilarity#tf(float) DefaultSimilarity} is:
- *
- *
- *
- *
- * |
- * {@link org.apache.lucene.search.DefaultSimilarity#tf(float) tf(t in d)} =
- * |
- *
- * frequency½
- * |
- *
- *
- *
- *
- *
- * -
- *
- * idf(t) stands for Inverse Document Frequency. This value
- * correlates to the inverse of docFreq
- * (the number of documents in which the term t appears).
- * This means rarer terms give higher contribution to the total score.
- * idf(t) appears for t in both the query and the document,
- * hence it is squared in the equation.
- * The default computation for idf(t) in
- * {@link org.apache.lucene.search.DefaultSimilarity#idf(int, int) DefaultSimilarity} is:
- *
- *
- *
- *
- * |
- * {@link org.apache.lucene.search.DefaultSimilarity#idf(int, int) idf(t)} =
- * |
- *
- * 1 + log (
- * |
- *
- *
- * | numDocs |
- * | ––––––––– |
- * | docFreq+1 |
- *
- * |
- *
- * )
- * |
- *
- *
- *
- *
- *
- * -
- *
- * coord(q,d)
- * is a score factor based on how many of the query terms are found in the specified document.
- * Typically, a document that contains more of the query's terms will receive a higher score
- * than another document with fewer query terms.
- * This is a search time factor computed in
- * {@link SimilarityProvider#coord(int, int) coord(q,d)}
- * by the SimilarityProvider in effect at search time.
- *
- *
- *
- * -
- *
- * queryNorm(q)
- *
- * is a normalizing factor used to make scores between queries comparable.
- * This factor does not affect document ranking (since all ranked documents are multiplied by the same factor),
- * but rather just attempts to make scores from different queries (or even different indexes) comparable.
- * This is a search time factor computed by the SimilarityProvider in effect at search time.
- *
- * The default computation in
- * {@link org.apache.lucene.search.DefaultSimilarityProvider#queryNorm(float) DefaultSimilarityProvider}
- * produces a Euclidean norm:
- *
- *
- *
- * |
- * queryNorm(q) =
- * {@link org.apache.lucene.search.DefaultSimilarityProvider#queryNorm(float) queryNorm(sumOfSquaredWeights)}
- * =
- * |
- *
- *
- * | 1 |
- * |
- * ––––––––––––––
- * |
- * | sumOfSquaredWeights½ |
- *
- * |
- *
- *
- *
- *
- * The sum of squared weights (of the query terms) is
- * computed by the query {@link org.apache.lucene.search.Weight} object.
- * For example, a {@link org.apache.lucene.search.BooleanQuery}
- * computes this value as:
- *
- *
- *
- *
- * |
- * {@link org.apache.lucene.search.Weight#sumOfSquaredWeights() sumOfSquaredWeights} =
- * {@link org.apache.lucene.search.Query#getBoost() q.getBoost()} 2
- * ·
- * |
- *
- * ∑
- * |
- *
- * (
- * idf(t) ·
- * t.getBoost()
- * ) 2
- * |
- *
- *
- * |
- * t in q |
- * |
- *
- *
- *
- *
- *
- *
- * -
- *
- * t.getBoost()
- * is a search time boost of term t in the query q as
- * specified in the query text
- * (see query syntax),
- * or as set by application calls to
- * {@link org.apache.lucene.search.Query#setBoost(float) setBoost()}.
- * Notice that there is really no direct API for accessing a boost of one term in a multi term query,
- * but rather multi terms are represented in a query as multi
- * {@link org.apache.lucene.search.TermQuery TermQuery} objects,
- * and so the boost of a term in the query is accessible by calling the sub-query
- * {@link org.apache.lucene.search.Query#getBoost() getBoost()}.
- *
- *
- *
- * -
- *
- * norm(t,d) encapsulates a few (indexing time) boost and length factors:
- *
- *
- * - Document boost - set by calling
- * {@link org.apache.lucene.document.Document#setBoost(float) doc.setBoost()}
- * before adding the document to the index.
- *
- * - Field boost - set by calling
- * {@link org.apache.lucene.document.Fieldable#setBoost(float) field.setBoost()}
- * before adding the field to a document.
- *
- * - lengthNorm - computed
- * when the document is added to the index in accordance with the number of tokens
- * of this field in the document, so that shorter fields contribute more to the score.
- * LengthNorm is computed by the Similarity class in effect at indexing.
- *
- *
- * The {@link #computeNorm} method is responsible for
- * combining all of these factors into a single float.
- *
- *
- * When a document is added to the index, all the above factors are multiplied.
- * If the document has multiple fields with the same name, all their boosts are multiplied together:
- *
- *
- *
- *
- * |
- * norm(t,d) =
- * {@link org.apache.lucene.document.Document#getBoost() doc.getBoost()}
- * ·
- * lengthNorm
- * ·
- * |
- *
- * ∏
- * |
- *
- * {@link org.apache.lucene.document.Fieldable#getBoost() f.getBoost}()
- * |
- *
- *
- * |
- * field f in d named as t |
- * |
- *
- *
- *
- * However the resulted norm value is {@link #encodeNormValue(float) encoded} as a single byte
- * before being stored.
- * At search time, the norm byte value is read from the index
- * {@link org.apache.lucene.store.Directory directory} and
- * {@link #decodeNormValue(byte) decoded} back to a float norm value.
- * This encoding/decoding, while reducing index size, comes with the price of
- * precision loss - it is not guaranteed that decode(encode(x)) = x.
- * For instance, decode(encode(0.89)) = 0.75.
- *
- * Compression of norm values to a single byte saves memory at search time,
- * because once a field is referenced at search time, its norms - for
- * all documents - are maintained in memory.
- *
- * The rationale supporting such lossy compression of norm values is that
- * given the difficulty (and inaccuracy) of users to express their true information
- * need by a query, only big differences matter.
- *
- * Last, note that search time is too late to modify this norm part of scoring, e.g. by
- * using a different {@link Similarity} for search.
- *
- *
- *
- *
* @see org.apache.lucene.index.IndexWriterConfig#setSimilarityProvider(SimilarityProvider)
* @see IndexSearcher#setSimilarityProvider(SimilarityProvider)
*/
@@ -528,20 +40,10 @@
public static final int NO_DOC_ID_PROVIDED = -1;
- /** Cache of decoded bytes. */
- private static final float[] NORM_TABLE = new float[256];
-
- static {
- for (int i = 0; i < 256; i++)
- NORM_TABLE[i] = SmallFloat.byte315ToFloat((byte)i);
- }
-
/** Decodes a normalization factor stored in an index.
* @see #encodeNormValue(float)
*/
- public float decodeNormValue(byte b) {
- return NORM_TABLE[b & 0xFF]; // & 0xFF maps negative bytes to positive above 127
- }
+ public abstract float decodeNormValue(byte b);
/**
* Computes the normalization value for a field, given the accumulated
@@ -569,40 +71,11 @@
public abstract float computeNorm(FieldInvertState state);
/** Encodes a normalization factor for storage in an index.
- *
- * The encoding uses a three-bit mantissa, a five-bit exponent, and
- * the zero-exponent point at 15, thus
- * representing values from around 7x10^9 to 2x10^-9 with about one
- * significant decimal digit of accuracy. Zero is also represented.
- * Negative numbers are rounded up to zero. Values too large to represent
- * are rounded down to the largest representable value. Positive values too
- * small to represent are rounded up to the smallest positive representable
- * value.
+ *
* @see org.apache.lucene.document.Field#setBoost(float)
* @see org.apache.lucene.util.SmallFloat
*/
- public byte encodeNormValue(float f) {
- return SmallFloat.floatToByte315(f);
- }
-
- /** Computes a score factor based on a term or phrase's frequency in a
- * document. This value is multiplied by the {@link #idf(int, int)}
- * factor for each term in the query and these products are then summed to
- * form the initial score for a document.
- *
- *
Terms and phrases repeated in a document indicate the topic of the
- * document, so implementations of this method usually return larger values
- * when freq is large, and smaller values when freq
- * is small.
- *
- *
The default implementation calls {@link #tf(float)}.
- *
- * @param freq the frequency of a term within a document
- * @return a score factor based on a term's within-document frequency
- */
- public float tf(int freq) {
- return tf((float)freq);
- }
+ public abstract byte encodeNormValue(float f);
/** Computes the amount of a sloppy phrase match, based on an edit distance.
* This value is summed for each sloppy phrase match in a document to form
@@ -619,125 +92,7 @@
*/
public abstract float sloppyFreq(int distance);
- /** Computes a score factor based on a term or phrase's frequency in a
- * document. This value is multiplied by the {@link #idf(int, int)}
- * factor for each term in the query and these products are then summed to
- * form the initial score for a document.
- *
- *
Terms and phrases repeated in a document indicate the topic of the
- * document, so implementations of this method usually return larger values
- * when freq is large, and smaller values when freq
- * is small.
- *
- * @param freq the frequency of a term within a document
- * @return a score factor based on a term's within-document frequency
- */
- public abstract float tf(float freq);
-
/**
- * Computes a score factor for a simple term and returns an explanation
- * for that score factor.
- *
- *
- * The default implementation uses:
- *
- *
- * idf(docFreq, searcher.maxDoc());
- *
- *
- * Note that {@link IndexSearcher#maxDoc()} is used instead of
- * {@link org.apache.lucene.index.IndexReader#numDocs() IndexReader#numDocs()} because also
- * {@link IndexSearcher#docFreq(Term)} is used, and when the latter
- * is inaccurate, so is {@link IndexSearcher#maxDoc()}, and in the same direction.
- * In addition, {@link IndexSearcher#maxDoc()} is more efficient to compute
- *
- * @param term the term in question
- * @param searcher the document collection being searched
- * @param docFreq externally computed docFreq for this term
- * @return an IDFExplain object that includes both an idf score factor
- and an explanation for the term.
- * @throws IOException
- */
- public IDFExplanation idfExplain(final Term term, final IndexSearcher searcher, int docFreq) throws IOException {
- final int df = docFreq;
- final int max = searcher.maxDoc();
- final float idf = idf(df, max);
- return new IDFExplanation() {
- @Override
- public String explain() {
- return "idf(docFreq=" + df +
- ", maxDocs=" + max + ")";
- }
- @Override
- public float getIdf() {
- return idf;
- }};
- }
-
- /**
- * This method forwards to {@link
- * #idfExplain(Term,IndexSearcher,int)} by passing
- * searcher.docFreq(term) as the docFreq.
- */
- public IDFExplanation idfExplain(final Term term, final IndexSearcher searcher) throws IOException {
- return idfExplain(term, searcher, searcher.docFreq(term));
- }
-
- /**
- * Computes a score factor for a phrase.
- *
- *
- * The default implementation sums the idf factor for
- * each term in the phrase.
- *
- * @param terms the terms in the phrase
- * @param searcher the document collection being searched
- * @return an IDFExplain object that includes both an idf
- * score factor for the phrase and an explanation
- * for each term.
- * @throws IOException
- */
- public IDFExplanation idfExplain(Collection terms, IndexSearcher searcher) throws IOException {
- final int max = searcher.maxDoc();
- float idf = 0.0f;
- final StringBuilder exp = new StringBuilder();
- for (final Term term : terms ) {
- final int df = searcher.docFreq(term);
- idf += idf(df, max);
- exp.append(" ");
- exp.append(term.text());
- exp.append("=");
- exp.append(df);
- }
- final float fIdf = idf;
- return new IDFExplanation() {
- @Override
- public float getIdf() {
- return fIdf;
- }
- @Override
- public String explain() {
- return exp.toString();
- }
- };
- }
-
- /** Computes a score factor based on a term's document frequency (the number
- * of documents which contain the term). This value is multiplied by the
- * {@link #tf(int)} factor for each term in the query and these products are
- * then summed to form the initial score for a document.
- *
- * Terms that occur in fewer documents are better indicators of topic, so
- * implementations of this method usually return larger values for rare terms,
- * and smaller values for common terms.
- *
- * @param docFreq the number of documents which contain the term
- * @param numDocs the total number of documents in the collection
- * @return a score factor based on the term's document frequency
- */
- public abstract float idf(int docFreq, int numDocs);
-
- /**
* Calculate a scoring factor based on the data in the payload. Overriding implementations
* are responsible for interpreting what is in the payload. Lucene makes no assumptions about
* what is in the byte array.
@@ -758,5 +113,17 @@
{
return 1;
}
-
+
+ public abstract IDFExplanation computeWeight(IndexSearcher searcher, String fieldName, PerReaderTermState... termStats) throws IOException;
+
+ public abstract ExactDocScorer exactDocScorer(Weight weight, String fieldName, AtomicReaderContext context) throws IOException;
+ public abstract SloppyDocScorer sloppyDocScorer(Weight weight, String fieldName, AtomicReaderContext context) throws IOException;
+
+ public abstract class ExactDocScorer {
+ public abstract float score(int doc, int freq);
+ }
+
+ public abstract class SloppyDocScorer {
+ public abstract float score(int doc, float freq);
+ }
}
Index: lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java (revision 1086097)
+++ lucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java (working copy)
@@ -145,7 +145,7 @@
@Override
public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
return new PayloadNearSpanScorer(query.getSpans(context), this,
- similarity, context.reader.norms(query.getField()));
+ similarity, query.getField(), context);
}
}
@@ -155,8 +155,8 @@
private int payloadsSeen;
protected PayloadNearSpanScorer(Spans spans, Weight weight,
- Similarity similarity, byte[] norms) throws IOException {
- super(spans, weight, similarity, norms);
+ Similarity similarity, String field, AtomicReaderContext context) throws IOException {
+ super(spans, weight, similarity, field, context);
this.spans = spans;
}
Index: lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java (revision 1086097)
+++ lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java (working copy)
@@ -76,7 +76,7 @@
@Override
public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
return new PayloadTermSpanScorer((TermSpans) query.getSpans(context),
- this, similarity, context.reader.norms(query.getField()));
+ this, similarity, query.getField(), context);
}
protected class PayloadTermSpanScorer extends SpanScorer {
@@ -86,8 +86,8 @@
private final TermSpans termSpans;
public PayloadTermSpanScorer(TermSpans spans, Weight weight,
- Similarity similarity, byte[] norms) throws IOException {
- super(spans, weight, similarity, norms);
+ Similarity similarity, String field, AtomicReaderContext context) throws IOException {
+ super(spans, weight, similarity, field, context);
termSpans = spans;
}
Index: lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java (revision 1086097)
+++ lucene/src/java/org/apache/lucene/search/spans/SpanWeight.java (working copy)
@@ -18,13 +18,15 @@
*/
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.search.Explanation.IDFExplanation;
+import org.apache.lucene.util.PerReaderTermState;
import java.io.IOException;
-import java.util.HashSet;
import java.util.Set;
+import java.util.TreeSet;
/**
* Expert-only. Public for use by other weight implementations
@@ -45,10 +47,14 @@
this.similarity = searcher.getSimilarityProvider().get(query.getField());
this.query = query;
- terms=new HashSet();
+ terms=new TreeSet();
query.extractTerms(terms);
-
- idfExp = similarity.idfExplain(terms, searcher);
+ final ReaderContext context = searcher.getTopReaderContext();
+ final PerReaderTermState states[] = new PerReaderTermState[terms.size()];
+ int i = 0;
+ for (Term term : terms)
+ states[i++] = PerReaderTermState.build(context, term, true);
+ idfExp = similarity.computeWeight(searcher, query.getField(), states);
idf = idfExp.getIdf();
}
@@ -73,8 +79,7 @@
@Override
public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
- return new SpanScorer(query.getSpans(context), this, similarity, context.reader
- .norms(query.getField()));
+ return new SpanScorer(query.getSpans(context), this, similarity, query.getField(), context);
}
@Override
Index: lucene/src/java/org/apache/lucene/search/spans/SpanScorer.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/spans/SpanScorer.java (revision 1086097)
+++ lucene/src/java/org/apache/lucene/search/spans/SpanScorer.java (working copy)
@@ -19,7 +19,9 @@
import java.io.IOException;
+import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.TFIDFSimilarity;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Similarity;
@@ -29,22 +31,21 @@
*/
public class SpanScorer extends Scorer {
protected Spans spans;
- protected byte[] norms;
- protected float value;
protected boolean more = true;
protected int doc;
protected float freq;
protected final Similarity similarity;
+ protected final Similarity.SloppyDocScorer docScorer;
- protected SpanScorer(Spans spans, Weight weight, Similarity similarity, byte[] norms)
+ protected SpanScorer(Spans spans, Weight weight, Similarity similarity, String field, AtomicReaderContext context)
throws IOException {
super(weight);
this.similarity = similarity;
+ this.docScorer = similarity.sloppyDocScorer(weight, field, context);
this.spans = spans;
- this.norms = norms;
- this.value = weight.getValue();
+
if (this.spans.next()) {
doc = -1;
} else {
@@ -94,8 +95,7 @@
@Override
public float score() throws IOException {
- float raw = similarity.tf(freq) * value; // raw score
- return norms == null? raw : raw * similarity.decodeNormValue(norms[doc]); // normalize
+ return docScorer.score(doc, freq);
}
@Override
@@ -105,15 +105,18 @@
/** This method is no longer an official member of {@link Scorer},
* but it is needed by SpanWeight to build an explanation. */
+ // nocommit: die
protected Explanation explain(final int doc) throws IOException {
Explanation tfExplanation = new Explanation();
int expDoc = advance(doc);
float phraseFreq = (expDoc == doc) ? freq : 0.0f;
- tfExplanation.setValue(similarity.tf(phraseFreq));
- tfExplanation.setDescription("tf(phraseFreq=" + phraseFreq + ")");
-
+ if (similarity instanceof TFIDFSimilarity) {
+ TFIDFSimilarity tfidf = (TFIDFSimilarity) similarity;
+ tfExplanation.setValue(tfidf.tf(phraseFreq));
+ tfExplanation.setDescription("tf(phraseFreq=" + phraseFreq + ")");
+ }
return tfExplanation;
}
Index: lucene/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/SloppyPhraseScorer.java (revision 1086097)
+++ lucene/src/java/org/apache/lucene/search/SloppyPhraseScorer.java (working copy)
@@ -20,16 +20,20 @@
import java.io.IOException;
import java.util.HashMap;
+import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+
final class SloppyPhraseScorer extends PhraseScorer {
private int slop;
private PhrasePositions repeats[];
private PhrasePositions tmpPos[]; // for flipping repeating pps.
private boolean checkedRepeats;
-
+ private final Similarity similarity;
+
SloppyPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings, Similarity similarity,
- int slop, byte[] norms) {
- super(weight, postings, similarity, norms);
+ int slop, String field, AtomicReaderContext context) throws IOException {
+ super(weight, postings, similarity, field, context);
this.slop = slop;
+ this.similarity = similarity;
}
/**
Index: lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java (revision 1086097)
+++ lucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java (working copy)
@@ -22,12 +22,14 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.search.Explanation.IDFExplanation;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.PerReaderTermState;
import org.apache.lucene.util.ToStringUtils;
import org.apache.lucene.util.PriorityQueue;
import org.apache.lucene.util.Bits;
@@ -140,15 +142,16 @@
public MultiPhraseWeight(IndexSearcher searcher)
throws IOException {
this.similarity = searcher.getSimilarityProvider().get(field);
-
+ final ReaderContext context = searcher.getTopReaderContext();
+
// compute idf
- ArrayList allTerms = new ArrayList();
+ ArrayList allTerms = new ArrayList();
for(final Term[] terms: termArrays) {
for (Term term: terms) {
- allTerms.add(term);
+ allTerms.add(PerReaderTermState.build(context, term, true));
}
}
- idfExp = similarity.idfExplain(allTerms, searcher);
+ idfExp = similarity.computeWeight(searcher, field, allTerms.toArray(new PerReaderTermState[allTerms.size()]));
idf = idfExp.getIdf();
}
@@ -223,8 +226,7 @@
}
if (slop == 0) {
- ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity,
- reader.norms(field));
+ ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity, field, context);
if (s.noDocs) {
return null;
} else {
@@ -232,13 +234,18 @@
}
} else {
return new SloppyPhraseScorer(this, postingsFreqs, similarity,
- slop, reader.norms(field));
+ slop, field, context);
}
}
@Override
public Explanation explain(AtomicReaderContext context, int doc)
throws IOException {
+ //nocommit: fix explains
+ if (!(similarity instanceof TFIDFSimilarity))
+ return new ComplexExplanation();
+ final TFIDFSimilarity similarity = (TFIDFSimilarity) this.similarity;
+
ComplexExplanation result = new ComplexExplanation();
result.setDescription("weight("+getQuery()+" in "+doc+"), product of:");
Index: lucene/src/java/org/apache/lucene/search/PhraseScorer.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/PhraseScorer.java (revision 1086097)
+++ lucene/src/java/org/apache/lucene/search/PhraseScorer.java (working copy)
@@ -19,6 +19,8 @@
import java.io.IOException;
+import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+
/** Expert: Scoring functionality for phrase queries.
*
A document is considered matching if it contains the phrase-query terms
* at "valid" positions. What "valid positions" are
@@ -40,14 +42,12 @@
private float freq; //phrase frequency in current doc as computed by phraseFreq().
- protected final Similarity similarity;
+ protected final Similarity.SloppyDocScorer docScorer;
PhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
- Similarity similarity, byte[] norms) {
+ Similarity similarity, String field, AtomicReaderContext context) throws IOException {
super(weight);
- this.similarity = similarity;
- this.norms = norms;
- this.value = weight.getValue();
+ docScorer = similarity.sloppyDocScorer(weight, field, context);
// convert tps to a list of phrase positions.
// note: phrase-position differs from term-position in that its position
@@ -107,9 +107,7 @@
@Override
public float score() throws IOException {
- //System.out.println("scoring " + first.doc);
- float raw = similarity.tf(freq) * value; // raw score
- return norms == null ? raw : raw * similarity.decodeNormValue(norms[first.doc]); // normalize
+ return docScorer.score(first.doc, freq);
}
@Override
Index: lucene/src/java/org/apache/lucene/search/PhraseQuery.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/PhraseQuery.java (revision 1086097)
+++ lucene/src/java/org/apache/lucene/search/PhraseQuery.java (working copy)
@@ -22,10 +22,14 @@
import java.util.ArrayList;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader.ReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.TermState;
+import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.Explanation.IDFExplanation;
+import org.apache.lucene.util.PerReaderTermState;
import org.apache.lucene.util.ToStringUtils;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Bits;
@@ -143,12 +147,16 @@
private float queryNorm;
private float queryWeight;
private IDFExplanation idfExp;
+ private transient PerReaderTermState states[];
public PhraseWeight(IndexSearcher searcher)
throws IOException {
this.similarity = searcher.getSimilarityProvider().get(field);
-
- idfExp = similarity.idfExplain(terms, searcher);
+ final ReaderContext context = searcher.getTopReaderContext();
+ states = new PerReaderTermState[terms.size()];
+ for (int i = 0; i < terms.size(); i++)
+ states[i] = PerReaderTermState.build(context, terms.get(i), true);
+ idfExp = similarity.computeWeight(searcher, field, states);
idf = idfExp.getIdf();
}
@@ -183,21 +191,29 @@
final Bits delDocs = reader.getDeletedDocs();
for (int i = 0; i < terms.size(); i++) {
final Term t = terms.get(i);
+ final TermState state = states[i].get(context.ord);
+ if (state == null) /* term doesnt exist in this segment */
+ return null;
DocsAndPositionsEnum postingsEnum = reader.termPositionsEnum(delDocs,
t.field(),
- t.bytes());
+ t.bytes(),
+ state);
// PhraseQuery on a field that did not index
// positions.
if (postingsEnum == null) {
- if (reader.termDocsEnum(delDocs, t.field(), t.bytes()) != null) {
+ if (reader.termDocsEnum(delDocs, t.field(), t.bytes(), state) != null) {
// term does exist, but has no positions
throw new IllegalStateException("field \"" + t.field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run PhraseQuery (term=" + t.text() + ")");
} else {
// term does not exist
+ // nocommit: should be impossible, state should be null?
return null;
}
}
- postingsFreqs[i] = new PostingsAndFreq(postingsEnum, reader.docFreq(t.field(), t.bytes()), positions.get(i).intValue());
+ // get the docFreq without seeking
+ TermsEnum te = reader.fields().terms(field).getThreadTermsEnum();
+ te.seek(t.bytes(), state);
+ postingsFreqs[i] = new PostingsAndFreq(postingsEnum, te.docFreq(), positions.get(i).intValue());
}
// sort by increasing docFreq order
@@ -206,8 +222,7 @@
}
if (slop == 0) { // optimize exact case
- ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity,
- reader.norms(field));
+ ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity, field, context);
if (s.noDocs) {
return null;
} else {
@@ -215,15 +230,18 @@
}
} else {
return
- new SloppyPhraseScorer(this, postingsFreqs, similarity, slop,
- reader.norms(field));
+ new SloppyPhraseScorer(this, postingsFreqs, similarity, slop, field, context);
}
}
@Override
public Explanation explain(AtomicReaderContext context, int doc)
throws IOException {
-
+ //nocommit: fix explains
+ if (!(similarity instanceof TFIDFSimilarity))
+ return new ComplexExplanation();
+ final TFIDFSimilarity similarity = (TFIDFSimilarity) this.similarity;
+
ComplexExplanation result = new ComplexExplanation();
result.setDescription("weight("+getQuery()+" in "+doc+"), product of:");
Index: lucene/src/java/org/apache/lucene/search/TopTermsRewrite.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/TopTermsRewrite.java (revision 1086097)
+++ lucene/src/java/org/apache/lucene/search/TopTermsRewrite.java (working copy)
@@ -101,14 +101,14 @@
if (t != null) {
// if the term is already in the PQ, only update docFreq of term in PQ
assert t.boost == boost : "boost should be equal in all segment TermsEnums";
- t.termState.register(state, readerContext.ord, termsEnum.docFreq());
+ t.termState.register(state, readerContext.ord, termsEnum.docFreq(), termsEnum.totalTermFreq());
} else {
// add new entry in PQ, we must clone the term, else it may get overwritten!
st.bytes.copy(bytes);
st.boost = boost;
visitedTerms.put(st.bytes, st);
assert st.termState.docFreq() == 0;
- st.termState.register(state, readerContext.ord, termsEnum.docFreq());
+ st.termState.register(state, readerContext.ord, termsEnum.docFreq(), termsEnum.totalTermFreq());
stQueue.offer(st);
// possibly drop entries from queue
if (stQueue.size() > maxSize) {
Index: lucene/src/java/org/apache/lucene/search/ConstantScoreAutoRewrite.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/ConstantScoreAutoRewrite.java (revision 1086097)
+++ lucene/src/java/org/apache/lucene/search/ConstantScoreAutoRewrite.java (working copy)
@@ -141,9 +141,9 @@
assert termState != null;
if (pos < 0) {
pos = (-pos)-1;
- array.termState[pos].register(termState, readerContext.ord, termsEnum.docFreq());
+ array.termState[pos].register(termState, readerContext.ord, termsEnum.docFreq(), termsEnum.totalTermFreq());
} else {
- array.termState[pos] = new PerReaderTermState(topReaderContext, termState, readerContext.ord, termsEnum.docFreq());
+ array.termState[pos] = new PerReaderTermState(topReaderContext, termState, readerContext.ord, termsEnum.docFreq(), termsEnum.totalTermFreq());
}
return true;
}
Index: lucene/src/java/org/apache/lucene/search/TermQuery.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/TermQuery.java (revision 1086097)
+++ lucene/src/java/org/apache/lucene/search/TermQuery.java (working copy)
@@ -50,16 +50,12 @@
private final IDFExplanation idfExp;
private transient PerReaderTermState termStates;
- public TermWeight(IndexSearcher searcher, PerReaderTermState termStates, int docFreq)
+ public TermWeight(IndexSearcher searcher, PerReaderTermState termStates)
throws IOException {
assert termStates != null : "PerReaderTermState must not be null";
this.termStates = termStates;
this.similarity = searcher.getSimilarityProvider().get(term.field());
- if (docFreq != -1) {
- idfExp = similarity.idfExplain(term, searcher, docFreq);
- } else {
- idfExp = similarity.idfExplain(term, searcher);
- }
+ idfExp = similarity.computeWeight(searcher, term.field(), termStates);
idf = idfExp.getIdf();
}
@@ -98,7 +94,7 @@
}
final DocsEnum docs = reader.termDocsEnum(reader.getDeletedDocs(), field, term.bytes(), state);
assert docs != null;
- return new TermScorer(this, docs, similarity, context.reader.norms(field));
+ return new TermScorer(this, docs, similarity, field, context);
}
private boolean termNotInReader(IndexReader reader, String field, BytesRef bytes) throws IOException {
@@ -110,6 +106,11 @@
@Override
public Explanation explain(AtomicReaderContext context, int doc)
throws IOException {
+ //nocommit: fix explains
+ if (!(similarity instanceof TFIDFSimilarity))
+ return new ComplexExplanation();
+ final TFIDFSimilarity similarity = (TFIDFSimilarity) this.similarity;
+
final IndexReader reader = context.reader;
ComplexExplanation result = new ComplexExplanation();
@@ -214,20 +215,20 @@
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
final ReaderContext context = searcher.getTopReaderContext();
- final int weightDocFreq;
final PerReaderTermState termState;
if (perReaderTermState == null || perReaderTermState.topReaderContext != context) {
// make TermQuery single-pass if we don't have a PRTS or if the context differs!
termState = PerReaderTermState.build(context, term, true); // cache term lookups!
- // we must not ignore the given docFreq - if set use the given value
- weightDocFreq = docFreq == -1 ? termState.docFreq() : docFreq;
} else {
// PRTS was pre-build for this IS
termState = this.perReaderTermState;
- weightDocFreq = docFreq;
}
+
+ // we must not ignore the given docFreq - if set use the given value (lie)
+ if (docFreq != -1)
+ termState.setDocFreq(docFreq);
- return new TermWeight(searcher, termState, weightDocFreq);
+ return new TermWeight(searcher, termState);
}
@Override
Index: lucene/src/java/org/apache/lucene/search/TFIDFSimilarity.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/TFIDFSimilarity.java (revision 0)
+++ lucene/src/java/org/apache/lucene/search/TFIDFSimilarity.java (revision 0)
@@ -0,0 +1,787 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+import java.io.IOException;
+import java.io.Serializable;
+
+import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Explanation.IDFExplanation;
+import org.apache.lucene.util.PerReaderTermState;
+import org.apache.lucene.util.SmallFloat;
+
+
+/**
+ * Expert: Scoring API.
+ *
+ * Similarity defines the components of Lucene scoring.
+ * Overriding computation of these components is a convenient
+ * way to alter Lucene scoring.
+ *
+ *
Suggested reading:
+ *
+ * Introduction To Information Retrieval, Chapter 6.
+ *
+ *
The following describes how Lucene scoring evolves from
+ * underlying information retrieval models to (efficient) implementation.
+ * We first brief on VSM Score,
+ * then derive from it Lucene's Conceptual Scoring Formula,
+ * from which, finally, evolves Lucene's Practical Scoring Function
+ * (the latter is connected directly with Lucene classes and methods).
+ *
+ *
Lucene combines
+ *
+ * Boolean model (BM) of Information Retrieval
+ * with
+ *
+ * Vector Space Model (VSM) of Information Retrieval -
+ * documents "approved" by BM are scored by VSM.
+ *
+ *
In VSM, documents and queries are represented as
+ * weighted vectors in a multi-dimensional space,
+ * where each distinct index term is a dimension,
+ * and weights are
+ * Tf-idf values.
+ *
+ *
VSM does not require weights to be Tf-idf values,
+ * but Tf-idf values are believed to produce search results of high quality,
+ * and so Lucene is using Tf-idf.
+ * Tf and Idf are described in more detail below,
+ * but for now, for completion, let's just say that
+ * for given term t and document (or query) x,
+ * Tf(t,x) varies with the number of occurrences of term t in x
+ * (when one increases so does the other) and
+ * idf(t) similarly varies with the inverse of the
+ * number of index documents containing term t.
+ *
+ *
VSM score of document d for query q is the
+ *
+ * Cosine Similarity
+ * of the weighted query vectors V(q) and V(d):
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * |
+ * cosine-similarity(q,d) =
+ * |
+ *
+ *
+ * | V(q) · V(d) |
+ * | ––––––––– |
+ * | |V(q)| |V(d)| |
+ *
+ * |
+ *
+ *
+ * |
+ *
+ * |
+ * |
+ * VSM Score
+ * |
+ *
+ *
+ *
+ *
+ * Where V(q) · V(d) is the
+ * dot product
+ * of the weighted vectors,
+ * and |V(q)| and |V(d)| are their
+ * Euclidean norms.
+ *
+ * Note: the above equation can be viewed as the dot product of
+ * the normalized weighted vectors, in the sense that dividing
+ * V(q) by its euclidean norm is normalizing it to a unit vector.
+ *
+ *
Lucene refines VSM score for both search quality and usability:
+ *
+ * - Normalizing V(d) to the unit vector is known to be problematic in that
+ * it removes all document length information.
+ * For some documents removing this info is probably ok,
+ * e.g. a document made by duplicating a certain paragraph 10 times,
+ * especially if that paragraph is made of distinct terms.
+ * But for a document which contains no duplicated paragraphs,
+ * this might be wrong.
+ * To avoid this problem, a different document length normalization
+ * factor is used, which normalizes to a vector equal to or larger
+ * than the unit vector: doc-len-norm(d).
+ *
+ *
+ * - At indexing, users can specify that certain documents are more
+ * important than others, by assigning a document boost.
+ * For this, the score of each document is also multiplied by its boost value
+ * doc-boost(d).
+ *
+ *
+ * - Lucene is field based, hence each query term applies to a single
+ * field, document length normalization is by the length of the certain field,
+ * and in addition to document boost there are also document fields boosts.
+ *
+ *
+ * - The same field can be added to a document during indexing several times,
+ * and so the boost of that field is the multiplication of the boosts of
+ * the separate additions (or parts) of that field within the document.
+ *
+ *
+ * - At search time users can specify boosts to each query, sub-query, and
+ * each query term, hence the contribution of a query term to the score of
+ * a document is multiplied by the boost of that query term query-boost(q).
+ *
+ *
+ * - A document may match a multi term query without containing all
+ * the terms of that query (this is correct for some of the queries),
+ * and users can further reward documents matching more query terms
+ * through a coordination factor, which is usually larger when
+ * more terms are matched: coord-factor(q,d).
+ *
+ *
+ *
+ * Under the simplifying assumption of a single field in the index,
+ * we get Lucene's Conceptual scoring formula:
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * |
+ * score(q,d) =
+ * coord-factor(q,d) ·
+ * query-boost(q) ·
+ * |
+ *
+ *
+ * | V(q) · V(d) |
+ * | ––––––––– |
+ * | |V(q)| |
+ *
+ * |
+ *
+ * · doc-len-norm(d)
+ * · doc-boost(d)
+ * |
+ *
+ *
+ * |
+ *
+ * |
+ * |
+ * Lucene Conceptual Scoring Formula
+ * |
+ *
+ *
+ *
+ * The conceptual formula is a simplification in the sense that (1) terms and documents
+ * are fielded and (2) boosts are usually per query term rather than per query.
+ *
+ *
We now describe how Lucene implements this conceptual scoring formula, and
+ * derive from it Lucene's Practical Scoring Function.
+ *
+ *
For efficient score computation some scoring components
+ * are computed and aggregated in advance:
+ *
+ *
+ * - Query-boost for the query (actually for each query term)
+ * is known when search starts.
+ *
+ *
+ * - Query Euclidean norm |V(q)| can be computed when search starts,
+ * as it is independent of the document being scored.
+ * From search optimization perspective, it is a valid question
+ * why bother to normalize the query at all, because all
+ * scored documents will be multiplied by the same |V(q)|,
+ * and hence documents ranks (their order by score) will not
+ * be affected by this normalization.
+ * There are two good reasons to keep this normalization:
+ *
+ * - Recall that
+ *
+ * Cosine Similarity can be used find how similar
+ * two documents are. One can use Lucene for e.g.
+ * clustering, and use a document as a query to compute
+ * its similarity to other documents.
+ * In this use case it is important that the score of document d3
+ * for query d1 is comparable to the score of document d3
+ * for query d2. In other words, scores of a document for two
+ * distinct queries should be comparable.
+ * There are other applications that may require this.
+ * And this is exactly what normalizing the query vector V(q)
+ * provides: comparability (to a certain extent) of two or more queries.
+ *
+ *
+ * - Applying query normalization on the scores helps to keep the
+ * scores around the unit vector, hence preventing loss of score data
+ * because of floating point precision limitations.
+ *
+ *
+ *
+ *
+ * - Document length norm doc-len-norm(d) and document
+ * boost doc-boost(d) are known at indexing time.
+ * They are computed in advance and their multiplication
+ * is saved as a single value in the index: norm(d).
+ * (In the equations below, norm(t in d) means norm(field(t) in doc d)
+ * where field(t) is the field associated with term t.)
+ *
+ *
+ *
+ * Lucene's Practical Scoring Function is derived from the above.
+ * The color codes demonstrate how it relates
+ * to those of the conceptual formula:
+ *
+ *
+ *
+ * |
+ *
+ * |
+ * |
+ * Lucene Practical Scoring Function
+ * |
+ *
+ *
+ * where
+ *
+ * -
+ *
+ * tf(t in d)
+ * correlates to the term's frequency,
+ * defined as the number of times term t appears in the currently scored document d.
+ * Documents that have more occurrences of a given term receive a higher score.
+ * Note that tf(t in q) is assumed to be 1 and therefore it does not appear in this equation,
+ * However if a query contains twice the same term, there will be
+ * two term-queries with that same term and hence the computation would still be correct (although
+ * not very efficient).
+ * The default computation for tf(t in d) in
+ * {@link org.apache.lucene.search.DefaultSimilarity#tf(float) DefaultSimilarity} is:
+ *
+ *
+ *
+ *
+ * |
+ * {@link org.apache.lucene.search.DefaultSimilarity#tf(float) tf(t in d)} =
+ * |
+ *
+ * frequency½
+ * |
+ *
+ *
+ *
+ *
+ *
+ * -
+ *
+ * idf(t) stands for Inverse Document Frequency. This value
+ * correlates to the inverse of docFreq
+ * (the number of documents in which the term t appears).
+ * This means rarer terms give higher contribution to the total score.
+ * idf(t) appears for t in both the query and the document,
+ * hence it is squared in the equation.
+ * The default computation for idf(t) in
+ * {@link org.apache.lucene.search.DefaultSimilarity#idf(int, int) DefaultSimilarity} is:
+ *
+ *
+ *
+ *
+ * |
+ * {@link org.apache.lucene.search.DefaultSimilarity#idf(int, int) idf(t)} =
+ * |
+ *
+ * 1 + log (
+ * |
+ *
+ *
+ * | numDocs |
+ * | ––––––––– |
+ * | docFreq+1 |
+ *
+ * |
+ *
+ * )
+ * |
+ *
+ *
+ *
+ *
+ *
+ * -
+ *
+ * coord(q,d)
+ * is a score factor based on how many of the query terms are found in the specified document.
+ * Typically, a document that contains more of the query's terms will receive a higher score
+ * than another document with fewer query terms.
+ * This is a search time factor computed in
+ * {@link SimilarityProvider#coord(int, int) coord(q,d)}
+ * by the SimilarityProvider in effect at search time.
+ *
+ *
+ *
+ * -
+ *
+ * queryNorm(q)
+ *
+ * is a normalizing factor used to make scores between queries comparable.
+ * This factor does not affect document ranking (since all ranked documents are multiplied by the same factor),
+ * but rather just attempts to make scores from different queries (or even different indexes) comparable.
+ * This is a search time factor computed by the Similarity in effect at search time.
+ *
+ * The default computation in
+ * {@link org.apache.lucene.search.DefaultSimilarityProvider#queryNorm(float) DefaultSimilarityProvider}
+ * produces a Euclidean norm:
+ *
+ *
+ *
+ * |
+ * queryNorm(q) =
+ * {@link org.apache.lucene.search.DefaultSimilarityProvider#queryNorm(float) queryNorm(sumOfSquaredWeights)}
+ * =
+ * |
+ *
+ *
+ * | 1 |
+ * |
+ * ––––––––––––––
+ * |
+ * | sumOfSquaredWeights½ |
+ *
+ * |
+ *
+ *
+ *
+ *
+ * The sum of squared weights (of the query terms) is
+ * computed by the query {@link org.apache.lucene.search.Weight} object.
+ * For example, a {@link org.apache.lucene.search.BooleanQuery}
+ * computes this value as:
+ *
+ *
+ *
+ *
+ * |
+ * {@link org.apache.lucene.search.Weight#sumOfSquaredWeights() sumOfSquaredWeights} =
+ * {@link org.apache.lucene.search.Query#getBoost() q.getBoost()} 2
+ * ·
+ * |
+ *
+ * ∑
+ * |
+ *
+ * (
+ * idf(t) ·
+ * t.getBoost()
+ * ) 2
+ * |
+ *
+ *
+ * |
+ * t in q |
+ * |
+ *
+ *
+ *
+ *
+ *
+ *
+ * -
+ *
+ * t.getBoost()
+ * is a search time boost of term t in the query q as
+ * specified in the query text
+ * (see query syntax),
+ * or as set by application calls to
+ * {@link org.apache.lucene.search.Query#setBoost(float) setBoost()}.
+ * Notice that there is really no direct API for accessing a boost of one term in a multi term query,
+ * but rather multi terms are represented in a query as multi
+ * {@link org.apache.lucene.search.TermQuery TermQuery} objects,
+ * and so the boost of a term in the query is accessible by calling the sub-query
+ * {@link org.apache.lucene.search.Query#getBoost() getBoost()}.
+ *
+ *
+ *
+ * -
+ *
+ * norm(t,d) encapsulates a few (indexing time) boost and length factors:
+ *
+ *
+ * - Document boost - set by calling
+ * {@link org.apache.lucene.document.Document#setBoost(float) doc.setBoost()}
+ * before adding the document to the index.
+ *
+ * - Field boost - set by calling
+ * {@link org.apache.lucene.document.Fieldable#setBoost(float) field.setBoost()}
+ * before adding the field to a document.
+ *
+ * - lengthNorm - computed
+ * when the document is added to the index in accordance with the number of tokens
+ * of this field in the document, so that shorter fields contribute more to the score.
+ * LengthNorm is computed by the Similarity class in effect at indexing.
+ *
+ *
+ * The {@link #computeNorm} method is responsible for
+ * combining all of these factors into a single float.
+ *
+ *
+ * When a document is added to the index, all the above factors are multiplied.
+ * If the document has multiple fields with the same name, all their boosts are multiplied together:
+ *
+ *
+ *
+ *
+ * |
+ * norm(t,d) =
+ * {@link org.apache.lucene.document.Document#getBoost() doc.getBoost()}
+ * ·
+ * lengthNorm
+ * ·
+ * |
+ *
+ * ∏
+ * |
+ *
+ * {@link org.apache.lucene.document.Fieldable#getBoost() f.getBoost}()
+ * |
+ *
+ *
+ * |
+ * field f in d named as t |
+ * |
+ *
+ *
+ *
+ * However the resulted norm value is {@link #encodeNormValue(float) encoded} as a single byte
+ * before being stored.
+ * At search time, the norm byte value is read from the index
+ * {@link org.apache.lucene.store.Directory directory} and
+ * {@link #decodeNormValue(byte) decoded} back to a float norm value.
+ * This encoding/decoding, while reducing index size, comes with the price of
+ * precision loss - it is not guaranteed that decode(encode(x)) = x.
+ * For instance, decode(encode(0.89)) = 0.75.
+ *
+ * Compression of norm values to a single byte saves memory at search time,
+ * because once a field is referenced at search time, its norms - for
+ * all documents - are maintained in memory.
+ *
+ * The rationale supporting such lossy compression of norm values is that
+ * given the difficulty (and inaccuracy) of users to express their true information
+ * need by a query, only big differences matter.
+ *
+ * Last, note that search time is too late to modify this norm part of scoring, e.g. by
+ * using a different {@link Similarity} for search.
+ *
+ *
+ *
+ *
+ * @see org.apache.lucene.index.IndexWriterConfig#setSimilarityProvider(SimilarityProvider)
+ * @see IndexSearcher#setSimilarityProvider(SimilarityProvider)
+ */
+public abstract class TFIDFSimilarity extends Similarity implements Serializable {
+
+ /** Computes a score factor based on a term or phrase's frequency in a
+ * document. This value is multiplied by the {@link #idf(int, int)}
+ * factor for each term in the query and these products are then summed to
+ * form the initial score for a document.
+ *
+ * Terms and phrases repeated in a document indicate the topic of the
+ * document, so implementations of this method usually return larger values
+ * when freq is large, and smaller values when freq
+ * is small.
+ *
+ *
The default implementation calls {@link #tf(float)}.
+ *
+ * @param freq the frequency of a term within a document
+ * @return a score factor based on a term's within-document frequency
+ */
+ public float tf(int freq) {
+ return tf((float)freq);
+ }
+
+ /** Computes a score factor based on a term or phrase's frequency in a
+ * document. This value is multiplied by the {@link #idf(int, int)}
+ * factor for each term in the query and these products are then summed to
+ * form the initial score for a document.
+ *
+ *
Terms and phrases repeated in a document indicate the topic of the
+ * document, so implementations of this method usually return larger values
+ * when freq is large, and smaller values when freq
+ * is small.
+ *
+ * @param freq the frequency of a term within a document
+ * @return a score factor based on a term's within-document frequency
+ */
+ public abstract float tf(float freq);
+
+ /**
+ * Computes a score factor for a simple term and returns an explanation
+ * for that score factor.
+ *
+ *
+ * The default implementation uses:
+ *
+ *
+ * idf(docFreq, searcher.maxDoc());
+ *
+ *
+ * Note that {@link IndexSearcher#maxDoc()} is used instead of
+ * {@link org.apache.lucene.index.IndexReader#numDocs() IndexReader#numDocs()} because also
+ * {@link IndexSearcher#docFreq(Term)} is used, and when the latter
+ * is inaccurate, so is {@link IndexSearcher#maxDoc()}, and in the same direction.
+ * In addition, {@link IndexSearcher#maxDoc()} is more efficient to compute
+ *
+ * @param term the term in question
+ * @param searcher the document collection being searched
+ * @param docFreq externally computed docFreq for this term
+ * @return an IDFExplain object that includes both an idf score factor
+ and an explanation for the term.
+ * @throws IOException
+ */
+ public IDFExplanation idfExplain(PerReaderTermState stats, final IndexSearcher searcher) throws IOException {
+ final int df = stats.docFreq();
+ final int max = searcher.maxDoc();
+ final float idf = idf(df, max);
+ return new IDFExplanation() {
+ @Override
+ public String explain() {
+ return "idf(docFreq=" + df +
+ ", maxDocs=" + max + ")";
+ }
+ @Override
+ public float getIdf() {
+ return idf;
+ }};
+ }
+
+ /**
+ * Computes a score factor for a phrase.
+ *
+ *
+ * The default implementation sums the idf factor for
+ * each term in the phrase.
+ *
+ * @param terms the terms in the phrase
+ * @param searcher the document collection being searched
+ * @return an IDFExplain object that includes both an idf
+ * score factor for the phrase and an explanation
+ * for each term.
+ * @throws IOException
+ */
+ public IDFExplanation idfExplain(final PerReaderTermState stats[], IndexSearcher searcher) throws IOException {
+ final int max = searcher.maxDoc();
+ float idf = 0.0f;
+ final StringBuilder exp = new StringBuilder();
+ for (final PerReaderTermState stat : stats ) {
+ final int df = stat.docFreq();
+ idf += idf(df, max);
+ exp.append(" ");
+ exp.append(df);
+ }
+ final float fIdf = idf;
+ return new IDFExplanation() {
+ @Override
+ public float getIdf() {
+ return fIdf;
+ }
+ @Override
+ public String explain() {
+ return exp.toString();
+ }
+ };
+ }
+
+ /** Computes a score factor based on a term's document frequency (the number
+ * of documents which contain the term). This value is multiplied by the
+ * {@link #tf(int)} factor for each term in the query and these products are
+ * then summed to form the initial score for a document.
+ *
+ *
Terms that occur in fewer documents are better indicators of topic, so
+ * implementations of this method usually return larger values for rare terms,
+ * and smaller values for common terms.
+ *
+ * @param docFreq the number of documents which contain the term
+ * @param numDocs the total number of documents in the collection
+ * @return a score factor based on the term's document frequency
+ */
+ public abstract float idf(int docFreq, int numDocs);
+
+ /** Cache of decoded bytes. */
+ private static final float[] NORM_TABLE = new float[256];
+
+ static {
+ for (int i = 0; i < 256; i++)
+ NORM_TABLE[i] = SmallFloat.byte315ToFloat((byte)i);
+ }
+
+ /** Decodes a normalization factor stored in an index.
+ * @see #encodeNormValue(float)
+ */
+ @Override
+ public float decodeNormValue(byte b) {
+ return NORM_TABLE[b & 0xFF]; // & 0xFF maps negative bytes to positive above 127
+ }
+
+ /** Encodes a normalization factor for storage in an index.
+ *
+ *
The encoding uses a three-bit mantissa, a five-bit exponent, and
+ * the zero-exponent point at 15, thus
+ * representing values from around 7x10^9 to 2x10^-9 with about one
+ * significant decimal digit of accuracy. Zero is also represented.
+ * Negative numbers are rounded up to zero. Values too large to represent
+ * are rounded down to the largest representable value. Positive values too
+ * small to represent are rounded up to the smallest positive representable
+ * value.
+ * @see org.apache.lucene.document.Field#setBoost(float)
+ * @see org.apache.lucene.util.SmallFloat
+ */
+ @Override
+ public byte encodeNormValue(float f) {
+ return SmallFloat.floatToByte315(f);
+ }
+
+ @Override
+ public final IDFExplanation computeWeight(IndexSearcher searcher, String fieldName,
+ PerReaderTermState... termStats) throws IOException {
+ return termStats.length == 1
+ ? idfExplain(termStats[0], searcher)
+ : idfExplain(termStats, searcher);
+ }
+
+ @Override
+ public final ExactDocScorer exactDocScorer(Weight weight, String fieldName, AtomicReaderContext context) throws IOException {
+ final byte norms[] = context.reader.norms(fieldName);
+ return norms == null
+ ? new RawExactTFIDFDocScorer(weight.getValue())
+ : new ExactTFIDFDocScorer(weight.getValue(), norms);
+ }
+
+ @Override
+ public final SloppyDocScorer sloppyDocScorer(Weight weight, String fieldName, AtomicReaderContext context) throws IOException {
+ final byte norms[] = context.reader.norms(fieldName);
+ return norms == null
+ ? new RawSloppyTFIDFDocScorer(weight.getValue())
+ : new SloppyTFIDFDocScorer(weight.getValue(), norms);
+ }
+
+ // nocommit: below are specialized classes, we should test if it really helps to avoid the 'if' for omitNorms, etc
+ // nocommit: make SCORE_CACHE_SIZE dynamic when available? (e.g. totalTermFreq / docFreq)
+ // nocommit: make configurable?
+ private final class ExactTFIDFDocScorer extends ExactDocScorer {
+ private final float weightValue;
+ private final byte[] norms;
+ private static final int SCORE_CACHE_SIZE = 32;
+ private float[] scoreCache = new float[SCORE_CACHE_SIZE];
+
+ ExactTFIDFDocScorer(float weightValue, byte norms[]) {
+ this.weightValue = weightValue;
+ this.norms = norms;
+ for (int i = 0; i < SCORE_CACHE_SIZE; i++)
+ scoreCache[i] = tf(i) * weightValue;
+ }
+
+ @Override
+ public float score(int doc, int freq) {
+ float raw = // compute tf(f)*weight
+ freq < SCORE_CACHE_SIZE // check cache
+ ? scoreCache[freq] // cache hit
+ : tf(freq)*weightValue; // cache miss
+
+ return raw * decodeNormValue(norms[doc]); // normalize for field
+ }
+ }
+
+ private final class RawExactTFIDFDocScorer extends ExactDocScorer {
+ private final float weightValue;
+ private static final int SCORE_CACHE_SIZE = 32;
+ private float[] scoreCache = new float[SCORE_CACHE_SIZE];
+
+ RawExactTFIDFDocScorer(float weightValue) {
+ this.weightValue = weightValue;
+ for (int i = 0; i < SCORE_CACHE_SIZE; i++)
+ scoreCache[i] = tf(i) * weightValue;
+ }
+
+ @Override
+ public float score(int doc, int freq) {
+ return freq < SCORE_CACHE_SIZE // check cache
+ ? scoreCache[freq] // cache hit
+ : tf(freq)*weightValue; // cache miss
+ }
+ }
+
+ private final class SloppyTFIDFDocScorer extends SloppyDocScorer {
+ private final float weightValue;
+ private final byte[] norms;
+
+ SloppyTFIDFDocScorer(float weightValue, byte norms[]) {
+ this.weightValue = weightValue;
+ this.norms = norms;
+ }
+
+ @Override
+ public float score(int doc, float freq) {
+ return tf(freq) * weightValue * decodeNormValue(norms[doc]); // compute tf(f)*weight * normalize for field
+ }
+ }
+
+ private final class RawSloppyTFIDFDocScorer extends SloppyDocScorer {
+ private final float weightValue;
+
+ RawSloppyTFIDFDocScorer(float weightValue) {
+ this.weightValue = weightValue;
+ }
+
+ @Override
+ public float score(int doc, float freq) {
+ return tf(freq)*weightValue; // compute tf(f)*weight
+ }
+ }
+}
Property changes on: lucene\src\java\org\apache\lucene\search\TFIDFSimilarity.java
___________________________________________________________________
Added: svn:eol-style
+ native
Index: lucene/src/java/org/apache/lucene/search/ExactPhraseScorer.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/ExactPhraseScorer.java (revision 1086097)
+++ lucene/src/java/org/apache/lucene/search/ExactPhraseScorer.java (working copy)
@@ -21,14 +21,9 @@
import java.util.Arrays;
import org.apache.lucene.index.*;
+import org.apache.lucene.index.IndexReader.AtomicReaderContext;
final class ExactPhraseScorer extends Scorer {
- private final byte[] norms;
- private final float value;
-
- private static final int SCORE_CACHE_SIZE = 32;
- private final float[] scoreCache = new float[SCORE_CACHE_SIZE];
-
private final int endMinus1;
private final static int CHUNK = 4096;
@@ -60,14 +55,12 @@
private int docID = -1;
private int freq;
- private final Similarity similarity;
+ private final Similarity.ExactDocScorer docScorer;
ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings,
- Similarity similarity, byte[] norms) throws IOException {
+ Similarity similarity, String field, AtomicReaderContext context) throws IOException {
super(weight);
- this.similarity = similarity;
- this.norms = norms;
- this.value = weight.getValue();
+ this.docScorer = similarity.exactDocScorer(weight, field, context);
chunkStates = new ChunkState[postings.length];
@@ -88,10 +81,6 @@
return;
}
}
-
- for (int i = 0; i < SCORE_CACHE_SIZE; i++) {
- scoreCache[i] = similarity.tf((float) i) * value;
- }
}
@Override
@@ -206,13 +195,7 @@
@Override
public float score() throws IOException {
- final float raw; // raw score
- if (freq < SCORE_CACHE_SIZE) {
- raw = scoreCache[freq];
- } else {
- raw = similarity.tf((float) freq) * value;
- }
- return norms == null ? raw : raw * similarity.decodeNormValue(norms[docID]); // normalize
+ return docScorer.score(docID, freq);
}
private int phraseFreq() throws IOException {
Index: lucene/src/java/org/apache/lucene/search/TermScorer.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/TermScorer.java (revision 1086097)
+++ lucene/src/java/org/apache/lucene/search/TermScorer.java (working copy)
@@ -20,25 +20,22 @@
import java.io.IOException;
import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.IndexReader.AtomicReaderContext;
/** Expert: A Scorer for documents matching a Term.
*/
final class TermScorer extends Scorer {
private DocsEnum docsEnum;
- private byte[] norms;
- private float weightValue;
private int doc = -1;
private int freq;
private int pointer;
private int pointerMax;
- private static final int SCORE_CACHE_SIZE = 32;
- private float[] scoreCache = new float[SCORE_CACHE_SIZE];
private int[] docs;
private int[] freqs;
private final DocsEnum.BulkReadResult bulkResult;
- private final Similarity similarity;
+ private final Similarity.ExactDocScorer docScorer;
/**
* Construct a TermScorer.
@@ -53,16 +50,11 @@
* @param norms
* The field norms of the document fields for the Term.
*/
- TermScorer(Weight weight, DocsEnum td, Similarity similarity, byte[] norms) {
+ TermScorer(Weight weight, DocsEnum td, Similarity similarity, String fieldName, AtomicReaderContext context) throws IOException {
super(weight);
- this.similarity = similarity;
+ this.docScorer = similarity.exactDocScorer(weight, fieldName, context);
this.docsEnum = td;
- this.norms = norms;
- this.weightValue = weight.getValue();
bulkResult = td.getBulkResult();
-
- for (int i = 0; i < SCORE_CACHE_SIZE; i++)
- scoreCache[i] = similarity.tf(i) * weightValue;
}
@Override
@@ -134,12 +126,7 @@
@Override
public float score() {
assert doc != NO_MORE_DOCS;
- float raw = // compute tf(f)*weight
- freq < SCORE_CACHE_SIZE // check cache
- ? scoreCache[freq] // cache hit
- : similarity.tf(freq)*weightValue; // cache miss
-
- return norms == null ? raw : raw * similarity.decodeNormValue(norms[doc]); // normalize for field
+ return docScorer.score(doc, freq);
}
/**
Index: lucene/src/java/org/apache/lucene/search/DefaultSimilarity.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/DefaultSimilarity.java (revision 1086097)
+++ lucene/src/java/org/apache/lucene/search/DefaultSimilarity.java (working copy)
@@ -20,7 +20,7 @@
*/
/** Expert: Default scoring implementation. */
-public class DefaultSimilarity extends Similarity {
+public class DefaultSimilarity extends TFIDFSimilarity {
/** Implemented as
* state.getBoost()*lengthNorm(numTerms), where
@@ -39,7 +39,7 @@
numTerms = state.getLength();
return state.getBoost() * ((float) (1.0 / Math.sqrt(numTerms)));
}
-
+
/** Implemented as sqrt(freq). */
@Override
public float tf(float freq) {
Index: lucene/src/java/org/apache/lucene/util/PerReaderTermState.java
===================================================================
--- lucene/src/java/org/apache/lucene/util/PerReaderTermState.java (revision 1086097)
+++ lucene/src/java/org/apache/lucene/util/PerReaderTermState.java (working copy)
@@ -43,6 +43,7 @@
public final ReaderContext topReaderContext; // for asserting!
private final TermState[] states;
private int docFreq;
+ private long totalTermFreq;
/**
* Creates an empty {@link PerReaderTermState} from a {@link ReaderContext}
@@ -64,9 +65,9 @@
* Creates a {@link PerReaderTermState} with an initial {@link TermState},
* {@link IndexReader} pair.
*/
- public PerReaderTermState(ReaderContext context, TermState state, int ord, int docFreq) {
+ public PerReaderTermState(ReaderContext context, TermState state, int ord, int docFreq, long totalTermFreq) {
this(context);
- register(state, ord, docFreq);
+ register(state, ord, docFreq, totalTermFreq);
}
/**
@@ -92,7 +93,7 @@
final TermsEnum termsEnum = terms.getThreadTermsEnum(); // thread-private don't share!
if (SeekStatus.FOUND == termsEnum.seek(bytes, cache)) {
final TermState termState = termsEnum.termState();
- perReaderTermState.register(termState, leaves[i].ord, termsEnum.docFreq());
+ perReaderTermState.register(termState, leaves[i].ord, termsEnum.docFreq(), termsEnum.totalTermFreq());
}
}
}
@@ -113,12 +114,16 @@
* Registers and associates a {@link TermState} with an leaf ordinal. The leaf ordinal
* should be derived from a {@link ReaderContext}'s leaf ord.
*/
- public void register(TermState state, final int ord, final int docFreq) {
+ public void register(TermState state, final int ord, final int docFreq, final long totalTermFreq) {
assert state != null : "state must not be null";
assert ord >= 0 && ord < states.length;
assert states[ord] == null : "state for ord: " + ord
+ " already registered";
this.docFreq += docFreq;
+ if (this.totalTermFreq >= 0 && totalTermFreq >= 0)
+ this.totalTermFreq += totalTermFreq;
+ else
+ this.totalTermFreq = -1;
states[ord] = state;
}
@@ -138,11 +143,27 @@
/**
* Returns the accumulated document frequency of all {@link TermState}
- * instances passed to {@link #register(TermState, int, int)}.
+ * instances passed to {@link #register(TermState, int, int, long)}.
* @return the accumulated document frequency of all {@link TermState}
- * instances passed to {@link #register(TermState, int, int)}.
+ * instances passed to {@link #register(TermState, int, int, long)}.
*/
public int docFreq() {
return docFreq;
}
+
+ /**
+ * Returns the accumulated term frequency of all {@link TermState}
+ * instances passed to {@link #register(TermState, int, int, long)}.
+ * @return the accumulated term frequency of all {@link TermState}
+ * instances passed to {@link #register(TermState, int, int, long)}.
+ */
+ public long totalTermFreq() {
+ return totalTermFreq;
+ }
+
+ /** expert: only available for queries that want to lie about docfreq
+ * @lucene.internal */
+ public void setDocFreq(int docFreq) {
+ this.docFreq = docFreq;
+ }
}
\ No newline at end of file
Index: lucene/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java
===================================================================
--- lucene/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java (revision 1086097)
+++ lucene/contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java (working copy)
@@ -21,6 +21,7 @@
import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.search.DefaultSimilarityProvider;
import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.TFIDFSimilarity;
import org.apache.lucene.search.SimilarityProvider;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.index.FieldInvertState;
@@ -170,8 +171,8 @@
SweetSpotSimilarity ss = new SweetSpotSimilarity();
- Similarity d = new DefaultSimilarity();
- Similarity s = ss;
+ TFIDFSimilarity d = new DefaultSimilarity();
+ TFIDFSimilarity s = ss;
// tf equal
@@ -222,7 +223,7 @@
};
ss.setHyperbolicTfFactors(3.3f, 7.7f, Math.E, 5.0f);
- Similarity s = ss;
+ TFIDFSimilarity s = ss;
for (int i = 1; i <=1000; i++) {
assertTrue("MIN tf: i="+i+" : s="+s.tf(i),
Index: lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java
===================================================================
--- lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (revision 1086097)
+++ lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (working copy)
@@ -44,6 +44,7 @@
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.TFIDFSimilarity;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.FSDirectory;
@@ -284,7 +285,8 @@
/**
* For idf() calculations.
*/
- private Similarity similarity;// = new DefaultSimilarity();
+ // nocommit? this is pretty much wired to tf-idf things...
+ private TFIDFSimilarity similarity;// = new DefaultSimilarity();
/**
* IndexReader to use
@@ -319,17 +321,17 @@
this(ir, new DefaultSimilarity());
}
- public MoreLikeThis(IndexReader ir, Similarity sim){
+ public MoreLikeThis(IndexReader ir, TFIDFSimilarity sim){
this.ir = ir;
this.similarity = sim;
}
- public Similarity getSimilarity() {
+ public TFIDFSimilarity getSimilarity() {
return similarity;
}
- public void setSimilarity(Similarity similarity) {
+ public void setSimilarity(TFIDFSimilarity similarity) {
this.similarity = similarity;
}
Index: lucene/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java
===================================================================
--- lucene/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java (revision 1086097)
+++ lucene/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java (working copy)
@@ -51,7 +51,8 @@
*/
public class FuzzyLikeThisQuery extends Query
{
- static Similarity sim=new DefaultSimilarity();
+ //nocommit? this query is pretty much hardcoded at TF/IDF
+ static TFIDFSimilarity sim=new DefaultSimilarity();
Query rewrittenQuery=null;
ArrayList fieldVals=new ArrayList();
Analyzer analyzer;