WARNING: This is
* exceptionally RAM intensive: it makes no effort to
* compress the postings data, storing terms as separate
- * byte[] and postings as separate int[], but as a result it
+ * byte[] and postings as separate int[], but as a result it
* gives substantial increase in search performance.
*
*
This postings format supports {@link TermsEnum#ord}
@@ -89,7 +88,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
public DirectPostingsFormat() {
this(DEFAULT_MIN_SKIP_COUNT, DEFAULT_LOW_FREQ_CUTOFF);
}
-
+
/** minSkipCount is how many terms in a row must have the
* same prefix before we put a skip pointer down. Terms
* with docFreq <= lowFreqCutoff will use a single int[]
@@ -100,7 +99,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
this.minSkipCount = minSkipCount;
this.lowFreqCutoff = lowFreqCutoff;
}
-
+
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
return PostingsFormat.forName("Lucene50").fieldsConsumer(state);
@@ -161,7 +160,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
return sizeInBytes;
}
-
+
@Override
public Collection getChildResources() {
return Accountables.namedAccountables("field", fields);
@@ -206,9 +205,10 @@ public final class DirectPostingsFormat extends PostingsFormat {
@Override
public long ramBytesUsed() {
return BASE_RAM_BYTES_USED +
- ((postings!=null) ? RamUsageEstimator.sizeOf(postings) : 0) +
+ ((postings!=null) ? RamUsageEstimator.sizeOf(postings) : 0) +
((payloads!=null) ? RamUsageEstimator.sizeOf(payloads) : 0);
}
+
}
// TODO: maybe specialize into prx/no-prx/no-frq cases?
@@ -232,31 +232,32 @@ public final class DirectPostingsFormat extends PostingsFormat {
@Override
public long ramBytesUsed() {
- long sizeInBytes = BASE_RAM_BYTES_USED;
- sizeInBytes += (docIDs!=null)? RamUsageEstimator.sizeOf(docIDs) : 0;
- sizeInBytes += (freqs!=null)? RamUsageEstimator.sizeOf(freqs) : 0;
-
- if(positions != null) {
- sizeInBytes += RamUsageEstimator.shallowSizeOf(positions);
- for(int[] position : positions) {
- sizeInBytes += (position!=null) ? RamUsageEstimator.sizeOf(position) : 0;
- }
- }
-
- if (payloads != null) {
- sizeInBytes += RamUsageEstimator.shallowSizeOf(payloads);
- for(byte[][] payload : payloads) {
- if(payload != null) {
- sizeInBytes += RamUsageEstimator.shallowSizeOf(payload);
- for(byte[] pload : payload) {
- sizeInBytes += (pload!=null) ? RamUsageEstimator.sizeOf(pload) : 0;
- }
- }
- }
- }
-
- return sizeInBytes;
+ long sizeInBytes = BASE_RAM_BYTES_USED;
+ sizeInBytes += (docIDs!=null)? RamUsageEstimator.sizeOf(docIDs) : 0;
+ sizeInBytes += (freqs!=null)? RamUsageEstimator.sizeOf(freqs) : 0;
+
+ if(positions != null) {
+ sizeInBytes += RamUsageEstimator.shallowSizeOf(positions);
+ for(int[] position : positions) {
+ sizeInBytes += (position!=null) ? RamUsageEstimator.sizeOf(position) : 0;
+ }
+ }
+
+ if (payloads != null) {
+ sizeInBytes += RamUsageEstimator.shallowSizeOf(payloads);
+ for(byte[][] payload : payloads) {
+ if(payload != null) {
+ sizeInBytes += RamUsageEstimator.shallowSizeOf(payload);
+ for(byte[] pload : payload) {
+ sizeInBytes += (pload!=null) ? RamUsageEstimator.sizeOf(pload) : 0;
+ }
+ }
+ }
+ }
+
+ return sizeInBytes;
}
+
}
private final byte[] termBytes;
@@ -313,7 +314,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
terms = new TermAndSkip[numTerms];
termOffsets = new int[1+numTerms];
-
+
byte[] termBytes = new byte[1024];
this.minSkipCount = minSkipCount;
@@ -325,7 +326,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
BytesRef term;
DocsEnum docsEnum = null;
- DocsAndPositionsEnum docsAndPositionsEnum = null;
+ DocsEnum docsAndPositionsEnum = null;
final TermsEnum termsEnum = termsIn.iterator(null);
int termOffset = 0;
@@ -356,7 +357,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
termOffsets[count+1] = termOffset;
if (hasPos) {
- docsAndPositionsEnum = termsEnum.docsAndPositions(null, docsAndPositionsEnum);
+ docsAndPositionsEnum = termsEnum.docs(null, docsAndPositionsEnum, DocsEnum.FLAG_ALL);
} else {
docsEnum = termsEnum.docs(null, docsEnum);
}
@@ -412,7 +413,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
final int[] postings = scratch.get();
-
+
ent = new LowFreqTerm(postings, payloads, docFreq, (int) totalTermFreq);
} else {
final int[] docs = new int[docFreq];
@@ -524,14 +525,14 @@ public final class DirectPostingsFormat extends PostingsFormat {
sizeInBytes += ((skips!=null) ? RamUsageEstimator.sizeOf(skips) : 0);
sizeInBytes += ((skipOffsets!=null) ? RamUsageEstimator.sizeOf(skipOffsets) : 0);
sizeInBytes += ((sameCounts!=null) ? RamUsageEstimator.sizeOf(sameCounts) : 0);
-
+
if(terms!=null) {
sizeInBytes += RamUsageEstimator.shallowSizeOf(terms);
for(TermAndSkip termAndSkip : terms) {
sizeInBytes += (termAndSkip!=null) ? termAndSkip.ramBytesUsed() : 0;
}
}
-
+
return sizeInBytes;
}
@@ -547,7 +548,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
int upto = termOffsets[ord];
final int termLen = termOffsets[1+ord] - upto;
int otherUpto = other.offset;
-
+
final int stop = upto + Math.min(termLen, other.length);
while (upto < stop) {
int diff = (termBytes[upto++] & 0xFF) - (otherBytes[otherUpto++] & 0xFF);
@@ -555,7 +556,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return diff;
}
}
-
+
// One is a prefix of the other, or, they are equal:
return termLen - other.length;
}
@@ -707,7 +708,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
public boolean hasPositions() {
return hasPos;
}
-
+
@Override
public boolean hasPayloads() {
return hasPayloads;
@@ -855,10 +856,26 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) {
+ public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
// TODO: implement reuse
// it's hairy!
+ if ((flags & DocsEnum.FLAG_POSITIONS) >= DocsEnum.FLAG_POSITIONS) {
+ if (!hasPos) {
+ return null;
+ }
+
+ if (terms[termOrd] instanceof LowFreqTerm) {
+ final LowFreqTerm term = ((LowFreqTerm) terms[termOrd]);
+ final int[] postings = term.postings;
+ final byte[] payloads = term.payloads;
+ return new LowFreqDocsAndPositionsEnum(liveDocs, hasOffsets, hasPayloads).reset(postings, payloads);
+ } else {
+ final HighFreqTerm term = (HighFreqTerm) terms[termOrd];
+ return new HighFreqDocsAndPositionsEnum(liveDocs, hasOffsets).reset(term.docIDs, term.freqs, term.positions, term.payloads);
+ }
+ }
+
if (terms[termOrd] instanceof LowFreqTerm) {
final int[] postings = ((LowFreqTerm) terms[termOrd]).postings;
if (hasFreq) {
@@ -927,25 +944,6 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
- if (!hasPos) {
- return null;
- }
-
- // TODO: implement reuse
- // it's hairy!
-
- if (terms[termOrd] instanceof LowFreqTerm) {
- final LowFreqTerm term = ((LowFreqTerm) terms[termOrd]);
- final int[] postings = term.postings;
- final byte[] payloads = term.payloads;
- return new LowFreqDocsAndPositionsEnum(liveDocs, hasOffsets, hasPayloads).reset(postings, payloads);
- } else {
- final HighFreqTerm term = (HighFreqTerm) terms[termOrd];
- return new HighFreqDocsAndPositionsEnum(liveDocs, hasOffsets).reset(term.docIDs, term.freqs, term.positions, term.payloads);
- }
- }
}
private final class DirectIntersectTermsEnum extends TermsEnum {
@@ -1203,7 +1201,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
// if (DEBUG) {
// System.out.println(" term=" + new BytesRef(termBytes, termOffset, termLength).utf8ToString() + " skips=" + Arrays.toString(skips));
// }
-
+
assert termOrd < state.changeOrd;
assert stateUpto <= termLength: "term.length=" + termLength + "; stateUpto=" + stateUpto;
@@ -1336,7 +1334,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
compiledAutomaton.automaton.initTransition(nextState, states[stateUpto].transition);
states[stateUpto].transitionUpto = -1;
states[stateUpto].transitionMax = -1;
-
+
if (stateUpto == termLength) {
// if (DEBUG) {
// System.out.println(" term ends after push");
@@ -1456,6 +1454,20 @@ public final class DirectPostingsFormat extends PostingsFormat {
public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) {
// TODO: implement reuse
// it's hairy!
+ if ((flags & DocsEnum.FLAG_POSITIONS) >= DocsEnum.FLAG_POSITIONS) {
+ if (!hasPos) {
+ return null;
+ }
+ if (terms[termOrd] instanceof LowFreqTerm) {
+ final LowFreqTerm term = ((LowFreqTerm) terms[termOrd]);
+ final int[] postings = term.postings;
+ final byte[] payloads = term.payloads;
+ return new LowFreqDocsAndPositionsEnum(liveDocs, hasOffsets, hasPayloads).reset(postings, payloads);
+ } else {
+ final HighFreqTerm term = (HighFreqTerm) terms[termOrd];
+ return new HighFreqDocsAndPositionsEnum(liveDocs, hasOffsets).reset(term.docIDs, term.freqs, term.positions, term.payloads);
+ }
+ }
if (terms[termOrd] instanceof LowFreqTerm) {
final int[] postings = ((LowFreqTerm) terms[termOrd]).postings;
@@ -1485,26 +1497,6 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
@Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
- if (!hasPos) {
- return null;
- }
-
- // TODO: implement reuse
- // it's hairy!
-
- if (terms[termOrd] instanceof LowFreqTerm) {
- final LowFreqTerm term = ((LowFreqTerm) terms[termOrd]);
- final int[] postings = term.postings;
- final byte[] payloads = term.payloads;
- return new LowFreqDocsAndPositionsEnum(liveDocs, hasOffsets, hasPayloads).reset(postings, payloads);
- } else {
- final HighFreqTerm term = (HighFreqTerm) terms[termOrd];
- return new HighFreqDocsAndPositionsEnum(liveDocs, hasOffsets).reset(term.docIDs, term.freqs, term.positions, term.payloads);
- }
- }
-
- @Override
public SeekStatus seekCeil(BytesRef term) {
throw new UnsupportedOperationException();
}
@@ -1573,12 +1565,43 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
@Override
+ public int nextPosition() throws IOException {
+ assert false : "Shouldn't be calling nextPosition() if you haven't asked for positions";
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int advance(int target) throws IOException {
// Linear scan, but this is low-freq term so it won't
// be costly:
return slowAdvance(target);
}
-
+
@Override
public long cost() {
return postings.length;
@@ -1641,12 +1664,43 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
@Override
+ public int nextPosition() throws IOException {
+ assert false : "Shouldn't be calling nextPosition() if you haven't asked for positions";
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int advance(int target) throws IOException {
// Linear scan, but this is low-freq term so it won't
// be costly:
return slowAdvance(target);
}
-
+
@Override
public long cost() {
return postings.length / 2;
@@ -1688,7 +1742,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
// System.out.println(" nextDoc freq=" + freq + " upto=" + upto + " vs " + postings.length);
// }
if (liveDocs == null) {
- if (upto < postings.length) {
+ if (upto < postings.length) {
freq = postings[upto+1];
assert freq > 0;
return postings[upto];
@@ -1725,12 +1779,43 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
@Override
+ public int nextPosition() throws IOException {
+ assert false : "should be using LowFreqDocsAndPositionsEnum";
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int advance(int target) throws IOException {
// Linear scan, but this is low-freq term so it won't
// be costly:
return slowAdvance(target);
}
-
+
@Override
public long cost() {
// TODO: could do a better estimate
@@ -1738,7 +1823,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
}
- private final static class LowFreqDocsAndPositionsEnum extends DocsAndPositionsEnum {
+ private final static class LowFreqDocsAndPositionsEnum extends DocsEnum {
private int[] postings;
private final Bits liveDocs;
private final int posMult;
@@ -1749,6 +1834,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
private int docID;
private int freq;
private int skipPositions;
+ private int pos;
private int startOffset;
private int endOffset;
private int lastPayloadOffset;
@@ -1773,10 +1859,11 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
}
- public DocsAndPositionsEnum reset(int[] postings, byte[] payloadBytes) {
+ public DocsEnum reset(int[] postings, byte[] payloadBytes) {
this.postings = postings;
upto = 0;
skipPositions = 0;
+ pos = -1;
startOffset = -1;
endOffset = -1;
docID = -1;
@@ -1787,6 +1874,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
@Override
public int nextDoc() {
+ pos = -1;
if (hasPayloads) {
for(int i=0;i 0;
skipPositions--;
- final int pos = postings[upto++];
+ pos = postings[upto++];
if (hasOffsets) {
startOffset = postings[upto++];
endOffset = postings[upto++];
@@ -1859,6 +1947,16 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
@Override
+ public int startPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
public int startOffset() {
return startOffset;
}
@@ -1884,7 +1982,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return null;
}
}
-
+
@Override
public long cost() {
// TODO: could do a better estimate
@@ -1957,6 +2055,36 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
@Override
+ public int nextPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
public int advance(int target) {
/*
upto++;
@@ -2063,7 +2191,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return docID = docIDs[upto];
}
}
-
+
@Override
public long cost() {
return docIDs.length;
@@ -2071,7 +2199,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
// TODO: specialize offsets and not
- private final static class HighFreqDocsAndPositionsEnum extends DocsAndPositionsEnum {
+ private final static class HighFreqDocsAndPositionsEnum extends DocsEnum {
private int[] docIDs;
private int[] freqs;
private int[][] positions;
@@ -2106,7 +2234,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return liveDocs;
}
- public DocsAndPositionsEnum reset(int[] docIDs, int[] freqs, int[][] positions, byte[][][] payloads) {
+ public DocsEnum reset(int[] docIDs, int[] freqs, int[][] positions, byte[][][] payloads) {
this.docIDs = docIDs;
this.freqs = freqs;
this.positions = positions;
@@ -2120,7 +2248,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
upto++;
if (liveDocs == null) {
if (upto < docIDs.length) {
- posUpto = -posJump;
+ posUpto = -posJump;
curPositions = positions[upto];
return docID = docIDs[upto];
}
@@ -2151,10 +2279,21 @@ public final class DirectPostingsFormat extends PostingsFormat {
@Override
public int nextPosition() {
posUpto += posJump;
+ assert posUpto < curPositions.length;
+ return curPositions[posUpto];
+ }
+
+ @Override
+ public int startPosition() throws IOException {
return curPositions[posUpto];
}
@Override
+ public int endPosition() throws IOException {
+ return startPosition();
+ }
+
+ @Override
public int startOffset() {
if (hasOffsets) {
return curPositions[posUpto+1];
@@ -2301,7 +2440,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return payload;
}
}
-
+
@Override
public long cost() {
return docIDs.length;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
index b5030ce..8db0154 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
@@ -32,7 +32,6 @@ import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsReaderBase;
import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
@@ -433,15 +432,6 @@ public class FSTOrdTermsReader extends FieldsProducer {
return postingsReader.docs(fieldInfo, state, liveDocs, reuse, flags);
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (!hasPositions()) {
- return null;
- }
- decodeMetaData();
- return postingsReader.docsAndPositions(fieldInfo, state, liveDocs, reuse, flags);
- }
-
// TODO: this can be achieved by making use of Util.getByOutput()
// and should have related tests
@Override
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
index 23065b2..11c90e9 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
@@ -31,7 +31,6 @@ import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsReaderBase;
import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
@@ -297,15 +296,6 @@ public class FSTTermsReader extends FieldsProducer {
}
@Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (!hasPositions()) {
- return null;
- }
- decodeMetaData();
- return postingsReader.docsAndPositions(fieldInfo, state, liveDocs, reuse, flags);
- }
-
- @Override
public void seekExact(long ord) throws IOException {
throw new UnsupportedOperationException();
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java
index f653606..1f4be0f 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java
@@ -31,7 +31,6 @@ import org.apache.lucene.codecs.DocValuesProducer;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
@@ -897,9 +896,5 @@ class MemoryDocValuesProducer extends DocValuesProducer {
throw new UnsupportedOperationException();
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- throw new UnsupportedOperationException();
- }
}
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
index c7ce7e1..00636be 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
@@ -31,7 +31,6 @@ import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.codecs.TermStats;
import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
@@ -318,7 +317,7 @@ public final class MemoryPostingsFormat extends PostingsFormat {
long sumTotalTermFreq = 0;
long sumDocFreq = 0;
DocsEnum docsEnum = null;
- DocsAndPositionsEnum posEnum = null;
+ DocsEnum posEnum = null;
int enumFlags;
IndexOptions indexOptions = fieldInfo.getIndexOptions();
@@ -333,15 +332,16 @@ public final class MemoryPostingsFormat extends PostingsFormat {
enumFlags = DocsEnum.FLAG_FREQS;
} else if (writeOffsets == false) {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS;
- } else {
- enumFlags = 0;
+ enumFlags = DocsEnum.FLAG_PAYLOADS;
+ }
+ else {
+ enumFlags = DocsEnum.FLAG_POSITIONS;
}
} else {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS | DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = DocsEnum.FLAG_PAYLOADS | DocsEnum.FLAG_OFFSETS;
} else {
- enumFlags = DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = DocsEnum.FLAG_OFFSETS;
}
}
@@ -353,7 +353,7 @@ public final class MemoryPostingsFormat extends PostingsFormat {
termsWriter.postingsWriter.reset();
if (writePositions) {
- posEnum = termsEnum.docsAndPositions(null, posEnum, enumFlags);
+ posEnum = termsEnum.docs(null, posEnum, enumFlags);
docsEnum = posEnum;
} else {
docsEnum = termsEnum.docs(null, docsEnum, enumFlags);
@@ -545,14 +545,44 @@ public final class MemoryPostingsFormat extends PostingsFormat {
public int freq() {
return freq;
}
-
+
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
@Override
public long cost() {
return numDocs;
}
}
- private final static class FSTDocsAndPositionsEnum extends DocsAndPositionsEnum {
+ private final static class FSTDocsAndPositionsEnum extends DocsEnum {
private final boolean storePayloads;
private byte[] buffer = new byte[16];
private final ByteArrayDataInput in = new ByteArrayDataInput(buffer);
@@ -701,6 +731,16 @@ public final class MemoryPostingsFormat extends PostingsFormat {
}
@Override
+ public int startPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
public int startOffset() {
return startOffset;
}
@@ -808,6 +848,26 @@ public final class MemoryPostingsFormat extends PostingsFormat {
@Override
public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) {
+
+ if ((flags & DocsEnum.FLAG_POSITIONS) >= DocsEnum.FLAG_POSITIONS) {
+ if (field.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
+ return null;
+ }
+ boolean hasOffsets = field.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
+ decodeMetaData();
+ FSTDocsAndPositionsEnum docsAndPositionsEnum;
+ if (reuse == null || !(reuse instanceof FSTDocsAndPositionsEnum)) {
+ docsAndPositionsEnum = new FSTDocsAndPositionsEnum(field.hasPayloads(), hasOffsets);
+ } else {
+ docsAndPositionsEnum = (FSTDocsAndPositionsEnum) reuse;
+ if (!docsAndPositionsEnum.canReuse(field.hasPayloads(), hasOffsets)) {
+ docsAndPositionsEnum = new FSTDocsAndPositionsEnum(field.hasPayloads(), hasOffsets);
+ }
+ }
+ //System.out.println("D&P reset this=" + this);
+ return docsAndPositionsEnum.reset(postingsSpare, liveDocs, docFreq);
+ }
+
decodeMetaData();
FSTDocsEnum docsEnum;
@@ -823,27 +883,6 @@ public final class MemoryPostingsFormat extends PostingsFormat {
}
@Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
-
- boolean hasOffsets = field.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
- if (field.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- return null;
- }
- decodeMetaData();
- FSTDocsAndPositionsEnum docsAndPositionsEnum;
- if (reuse == null || !(reuse instanceof FSTDocsAndPositionsEnum)) {
- docsAndPositionsEnum = new FSTDocsAndPositionsEnum(field.hasPayloads(), hasOffsets);
- } else {
- docsAndPositionsEnum = (FSTDocsAndPositionsEnum) reuse;
- if (!docsAndPositionsEnum.canReuse(field.hasPayloads(), hasOffsets)) {
- docsAndPositionsEnum = new FSTDocsAndPositionsEnum(field.hasPayloads(), hasOffsets);
- }
- }
- //System.out.println("D&P reset this=" + this);
- return docsAndPositionsEnum.reset(postingsSpare, liveDocs, docFreq);
- }
-
- @Override
public BytesRef term() {
return current.input;
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
index 3a7591c..ba415a3 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
@@ -17,16 +17,6 @@ package org.apache.lucene.codecs.simpletext;
* limitations under the License.
*/
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.DOC;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.END;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.END_OFFSET;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.FIELD;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.FREQ;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.PAYLOAD;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.POS;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.START_OFFSET;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.TERM;
-
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Collection;
@@ -37,7 +27,6 @@ import java.util.Map;
import java.util.TreeMap;
import org.apache.lucene.codecs.FieldsProducer;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
@@ -68,11 +57,21 @@ import org.apache.lucene.util.fst.PairOutputs;
import org.apache.lucene.util.fst.PositiveIntOutputs;
import org.apache.lucene.util.fst.Util;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.DOC;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.END;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.END_OFFSET;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.FIELD;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.FREQ;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.PAYLOAD;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.POS;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.START_OFFSET;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.TERM;
+
class SimpleTextFieldsReader extends FieldsProducer {
private static final long BASE_RAM_BYTES_USED =
- RamUsageEstimator.shallowSizeOfInstance(SimpleTextFieldsReader.class)
- + RamUsageEstimator.shallowSizeOfInstance(TreeMap.class);
+ RamUsageEstimator.shallowSizeOfInstance(SimpleTextFieldsReader.class)
+ + RamUsageEstimator.shallowSizeOfInstance(TreeMap.class);
private final TreeMap fields;
private final IndexInput in;
@@ -93,12 +92,12 @@ class SimpleTextFieldsReader extends FieldsProducer {
}
}
}
-
+
private TreeMap readFields(IndexInput in) throws IOException {
ChecksumIndexInput input = new BufferedChecksumIndexInput(in);
BytesRefBuilder scratch = new BytesRefBuilder();
TreeMap fields = new TreeMap<>();
-
+
while (true) {
SimpleTextUtil.readLine(input, scratch);
if (scratch.get().equals(END)) {
@@ -206,9 +205,26 @@ class SimpleTextFieldsReader extends FieldsProducer {
public long totalTermFreq() {
return indexOptions == IndexOptions.DOCS ? -1 : totalTermFreq;
}
-
+
@Override
public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+
+ if (DocsEnum.requiresPositions(flags)) {
+ if (indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
+ // Positions were not indexed
+ return null;
+ }
+
+ SimpleTextDocsAndPositionsEnum docsAndPositionsEnum;
+ if (reuse != null && reuse instanceof SimpleTextDocsAndPositionsEnum && ((SimpleTextDocsAndPositionsEnum) reuse).canReuse(SimpleTextFieldsReader.this.in)) {
+ docsAndPositionsEnum = (SimpleTextDocsAndPositionsEnum) reuse;
+ } else {
+ docsAndPositionsEnum = new SimpleTextDocsAndPositionsEnum();
+ }
+ return docsAndPositionsEnum.reset(docsStart, liveDocs, indexOptions, docFreq);
+
+ }
+
SimpleTextDocsEnum docsEnum;
if (reuse != null && reuse instanceof SimpleTextDocsEnum && ((SimpleTextDocsEnum) reuse).canReuse(SimpleTextFieldsReader.this.in)) {
docsEnum = (SimpleTextDocsEnum) reuse;
@@ -218,22 +234,6 @@ class SimpleTextFieldsReader extends FieldsProducer {
return docsEnum.reset(docsStart, liveDocs, indexOptions == IndexOptions.DOCS, docFreq);
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
-
- if (indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- // Positions were not indexed
- return null;
- }
-
- SimpleTextDocsAndPositionsEnum docsAndPositionsEnum;
- if (reuse != null && reuse instanceof SimpleTextDocsAndPositionsEnum && ((SimpleTextDocsAndPositionsEnum) reuse).canReuse(SimpleTextFieldsReader.this.in)) {
- docsAndPositionsEnum = (SimpleTextDocsAndPositionsEnum) reuse;
- } else {
- docsAndPositionsEnum = new SimpleTextDocsAndPositionsEnum();
- }
- return docsAndPositionsEnum.reset(docsStart, liveDocs, indexOptions, docFreq);
- }
}
private class SimpleTextDocsEnum extends DocsEnum {
@@ -246,7 +246,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
private final BytesRefBuilder scratch = new BytesRefBuilder();
private final CharsRefBuilder scratchUTF16 = new CharsRefBuilder();
private int cost;
-
+
public SimpleTextDocsEnum() {
this.inStart = SimpleTextFieldsReader.this.in;
this.in = this.inStart.clone();
@@ -277,6 +277,37 @@ class SimpleTextFieldsReader extends FieldsProducer {
}
@Override
+ public int nextPosition() throws IOException {
+ assert false : "Shouldn't be calling nextPosition() if you haven't asked for positions";
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int nextDoc() throws IOException {
if (docID == NO_MORE_DOCS) {
return docID;
@@ -328,14 +359,14 @@ class SimpleTextFieldsReader extends FieldsProducer {
// Naive -- better to index skip data
return slowAdvance(target);
}
-
+
@Override
public long cost() {
return cost;
}
}
- private class SimpleTextDocsAndPositionsEnum extends DocsAndPositionsEnum {
+ private class SimpleTextDocsAndPositionsEnum extends DocsEnum {
private final IndexInput inStart;
private final IndexInput in;
private int docID = -1;
@@ -345,6 +376,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
private final BytesRefBuilder scratch2 = new BytesRefBuilder();
private final CharsRefBuilder scratchUTF16 = new CharsRefBuilder();
private final CharsRefBuilder scratchUTF16_2 = new CharsRefBuilder();
+ private int pos;
private BytesRef payload;
private long nextDocStart;
private boolean readOffsets;
@@ -437,7 +469,6 @@ class SimpleTextFieldsReader extends FieldsProducer {
@Override
public int nextPosition() throws IOException {
- final int pos;
if (readPositions) {
SimpleTextUtil.readLine(in, scratch);
assert StringHelper.startsWith(scratch.get(), POS): "got line=" + scratch.get().utf8ToString();
@@ -475,6 +506,16 @@ class SimpleTextFieldsReader extends FieldsProducer {
}
@Override
+ public int startPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
public int startOffset() throws IOException {
return startOffset;
}
@@ -488,7 +529,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
public BytesRef getPayload() {
return payload;
}
-
+
@Override
public long cost() {
return cost;
@@ -506,9 +547,9 @@ class SimpleTextFieldsReader extends FieldsProducer {
}
private static final long TERMS_BASE_RAM_BYTES_USED =
- RamUsageEstimator.shallowSizeOfInstance(SimpleTextTerms.class)
- + RamUsageEstimator.shallowSizeOfInstance(BytesRef.class)
- + RamUsageEstimator.shallowSizeOfInstance(CharsRef.class);
+ RamUsageEstimator.shallowSizeOfInstance(SimpleTextTerms.class)
+ + RamUsageEstimator.shallowSizeOfInstance(BytesRef.class)
+ + RamUsageEstimator.shallowSizeOfInstance(CharsRef.class);
private class SimpleTextTerms extends Terms implements Accountable {
private final long termsStart;
private final FieldInfo fieldInfo;
@@ -533,7 +574,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
final Builder>> b;
final PairOutputs outputsInner = new PairOutputs<>(posIntOutputs, posIntOutputs);
final PairOutputs> outputs = new PairOutputs<>(posIntOutputs,
- outputsInner);
+ outputsInner);
b = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs);
IndexInput in = SimpleTextFieldsReader.this.in.clone();
in.seek(termsStart);
@@ -548,8 +589,8 @@ class SimpleTextFieldsReader extends FieldsProducer {
if (scratch.get().equals(END) || StringHelper.startsWith(scratch.get(), FIELD)) {
if (lastDocsStart != -1) {
b.add(Util.toIntsRef(lastTerm.get(), scratchIntsRef),
- outputs.newPair(lastDocsStart,
- outputsInner.newPair((long) docFreq, totalTermFreq)));
+ outputs.newPair(lastDocsStart,
+ outputsInner.newPair((long) docFreq, totalTermFreq)));
sumTotalTermFreq += totalTermFreq;
}
break;
@@ -565,7 +606,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
} else if (StringHelper.startsWith(scratch.get(), TERM)) {
if (lastDocsStart != -1) {
b.add(Util.toIntsRef(lastTerm.get(), scratchIntsRef), outputs.newPair(lastDocsStart,
- outputsInner.newPair((long) docFreq, totalTermFreq)));
+ outputsInner.newPair((long) docFreq, totalTermFreq)));
}
lastDocsStart = in.getFilePointer();
final int len = scratch.length() - TERM.length;
@@ -652,7 +693,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
public boolean hasPositions() {
return fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
}
-
+
@Override
public boolean hasPayloads() {
return fieldInfo.hasPayloads();
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
index 656713d..100c888 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
@@ -20,7 +20,6 @@ package org.apache.lucene.codecs.simpletext;
import java.io.IOException;
import org.apache.lucene.codecs.FieldsConsumer;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
@@ -33,7 +32,7 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
class SimpleTextFieldsWriter extends FieldsConsumer {
-
+
private IndexOutput out;
private final BytesRefBuilder scratch = new BytesRefBuilder();
private final SegmentWriteState writeState;
@@ -81,10 +80,10 @@ class SimpleTextFieldsWriter extends FieldsConsumer {
if (hasPositions) {
if (hasPayloads) {
- flags = flags | DocsAndPositionsEnum.FLAG_PAYLOADS;
+ flags = flags | DocsEnum.FLAG_PAYLOADS;
}
if (hasOffsets) {
- flags = flags | DocsAndPositionsEnum.FLAG_OFFSETS;
+ flags = flags | DocsEnum.FLAG_OFFSETS;
}
} else {
if (hasFreqs) {
@@ -93,7 +92,6 @@ class SimpleTextFieldsWriter extends FieldsConsumer {
}
TermsEnum termsEnum = terms.iterator(null);
- DocsAndPositionsEnum posEnum = null;
DocsEnum docsEnum = null;
// for each term in field
@@ -103,12 +101,8 @@ class SimpleTextFieldsWriter extends FieldsConsumer {
break;
}
- if (hasPositions) {
- posEnum = termsEnum.docsAndPositions(null, posEnum, flags);
- docsEnum = posEnum;
- } else {
- docsEnum = termsEnum.docs(null, docsEnum, flags);
- }
+ docsEnum = termsEnum.docs(null, docsEnum, flags);
+
assert docsEnum != null: "termsEnum=" + termsEnum + " hasPos=" + hasPositions + " flags=" + flags;
boolean wroteTerm = false;
@@ -154,15 +148,15 @@ class SimpleTextFieldsWriter extends FieldsConsumer {
// for each pos in field+term+doc
for(int i=0;i= startOffset;
assert startOffset >= lastStartOffset: "startOffset=" + startOffset + " lastStartOffset=" + lastStartOffset;
lastStartOffset = startOffset;
@@ -174,7 +168,7 @@ class SimpleTextFieldsWriter extends FieldsConsumer {
newline();
}
- BytesRef payload = posEnum.getPayload();
+ BytesRef payload = docsEnum.getPayload();
if (payload != null && payload.length > 0) {
assert payload.length != 0;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
index 641ff6c..5bf69b2 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
@@ -25,7 +25,6 @@ import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.lucene.codecs.TermVectorsReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexFileNames;
@@ -59,15 +58,15 @@ import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.*;
public class SimpleTextTermVectorsReader extends TermVectorsReader {
private static final long BASE_RAM_BYTES_USED =
- RamUsageEstimator.shallowSizeOfInstance(SimpleTextTermVectorsReader.class)
- + RamUsageEstimator.shallowSizeOfInstance(BytesRef.class)
- + RamUsageEstimator.shallowSizeOfInstance(CharsRef.class);
+ RamUsageEstimator.shallowSizeOfInstance(SimpleTextTermVectorsReader.class)
+ + RamUsageEstimator.shallowSizeOfInstance(BytesRef.class)
+ + RamUsageEstimator.shallowSizeOfInstance(CharsRef.class);
private long offsets[]; /* docid -> offset in .vec file */
private IndexInput in;
private BytesRefBuilder scratch = new BytesRefBuilder();
private CharsRefBuilder scratchUTF16 = new CharsRefBuilder();
-
+
public SimpleTextTermVectorsReader(Directory directory, SegmentInfo si, IOContext context) throws IOException {
boolean success = false;
try {
@@ -82,15 +81,15 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
}
readIndex(si.getDocCount());
}
-
+
// used by clone
SimpleTextTermVectorsReader(long offsets[], IndexInput in) {
this.offsets = offsets;
this.in = in;
}
-
- // we don't actually write a .tvx-like index, instead we read the
- // vectors file in entirety up-front and save the offsets
+
+ // we don't actually write a .tvx-like index, instead we read the
+ // vectors file in entirety up-front and save the offsets
// so we can seek to the data later.
private void readIndex(int maxDoc) throws IOException {
ChecksumIndexInput input = new BufferedChecksumIndexInput(in);
@@ -106,7 +105,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
SimpleTextUtil.checkFooter(input);
assert upto == offsets.length;
}
-
+
@Override
public Fields get(int doc) throws IOException {
SortedMap fields = new TreeMap<>();
@@ -122,30 +121,30 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
assert StringHelper.startsWith(scratch.get(), FIELD);
// skip fieldNumber:
parseIntAt(FIELD.length);
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), FIELDNAME);
String fieldName = readString(FIELDNAME.length, scratch);
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), FIELDPOSITIONS);
boolean positions = Boolean.parseBoolean(readString(FIELDPOSITIONS.length, scratch));
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), FIELDOFFSETS);
boolean offsets = Boolean.parseBoolean(readString(FIELDOFFSETS.length, scratch));
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), FIELDPAYLOADS);
boolean payloads = Boolean.parseBoolean(readString(FIELDPAYLOADS.length, scratch));
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), FIELDTERMCOUNT);
int termCount = parseIntAt(FIELDTERMCOUNT.length);
-
+
SimpleTVTerms terms = new SimpleTVTerms(offsets, positions, payloads);
fields.put(fieldName, terms);
-
+
BytesRefBuilder term = new BytesRefBuilder();
for (int j = 0; j < termCount; j++) {
readLine();
@@ -154,14 +153,14 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
term.grow(termLength);
term.setLength(termLength);
System.arraycopy(scratch.bytes(), TERMTEXT.length, term.bytes(), 0, termLength);
-
+
SimpleTVPostings postings = new SimpleTVPostings();
terms.terms.put(term.toBytesRef(), postings);
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), TERMFREQ);
postings.freq = parseIntAt(TERMFREQ.length);
-
+
if (positions || offsets) {
if (positions) {
postings.positions = new int[postings.freq];
@@ -169,12 +168,12 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
postings.payloads = new BytesRef[postings.freq];
}
}
-
+
if (offsets) {
postings.startOffsets = new int[postings.freq];
postings.endOffsets = new int[postings.freq];
}
-
+
for (int k = 0; k < postings.freq; k++) {
if (positions) {
readLine();
@@ -192,12 +191,12 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
}
}
}
-
+
if (offsets) {
readLine();
assert StringHelper.startsWith(scratch.get(), STARTOFFSET);
postings.startOffsets[k] = parseIntAt(STARTOFFSET.length);
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), ENDOFFSET);
postings.endOffsets[k] = parseIntAt(ENDOFFSET.length);
@@ -216,11 +215,11 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
}
return new SimpleTextTermVectorsReader(offsets, in.clone());
}
-
+
@Override
public void close() throws IOException {
try {
- IOUtils.close(in);
+ IOUtils.close(in);
} finally {
in = null;
offsets = null;
@@ -230,20 +229,20 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
private void readLine() throws IOException {
SimpleTextUtil.readLine(in, scratch);
}
-
+
private int parseIntAt(int offset) {
scratchUTF16.copyUTF8Bytes(scratch.bytes(), offset, scratch.length()-offset);
return ArrayUtil.parseInt(scratchUTF16.chars(), 0, scratchUTF16.length());
}
-
+
private String readString(int offset, BytesRefBuilder scratch) {
scratchUTF16.copyUTF8Bytes(scratch.bytes(), offset, scratch.length()-offset);
return scratchUTF16.toString();
}
-
+
private class SimpleTVFields extends Fields {
private final SortedMap fields;
-
+
SimpleTVFields(SortedMap fields) {
this.fields = fields;
}
@@ -263,20 +262,20 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
return fields.size();
}
}
-
+
private static class SimpleTVTerms extends Terms {
final SortedMap terms;
final boolean hasOffsets;
final boolean hasPositions;
final boolean hasPayloads;
-
+
SimpleTVTerms(boolean hasOffsets, boolean hasPositions, boolean hasPayloads) {
this.hasOffsets = hasOffsets;
this.hasPositions = hasPositions;
this.hasPayloads = hasPayloads;
terms = new TreeMap<>();
}
-
+
@Override
public TermsEnum iterator(TermsEnum reuse) throws IOException {
// TODO: reuse
@@ -317,13 +316,13 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
public boolean hasPositions() {
return hasPositions;
}
-
+
@Override
public boolean hasPayloads() {
return hasPayloads;
}
}
-
+
private static class SimpleTVPostings {
private int freq;
private int positions[];
@@ -331,17 +330,17 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
private int endOffsets[];
private BytesRef payloads[];
}
-
+
private static class SimpleTVTermsEnum extends TermsEnum {
SortedMap terms;
Iterator> iterator;
Map.Entry current;
-
+
SimpleTVTermsEnum(SortedMap terms) {
this.terms = terms;
this.iterator = terms.entrySet().iterator();
}
-
+
@Override
public SeekStatus seekCeil(BytesRef text) throws IOException {
iterator = terms.tailMap(text).entrySet().iterator();
@@ -389,25 +388,26 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
@Override
public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+
+ if (DocsEnum.requiresPositions(flags)) {
+ SimpleTVPostings postings = current.getValue();
+ if (postings.positions == null && postings.startOffsets == null) {
+ return null;
+ }
+ // TODO: reuse
+ SimpleTVDocsAndPositionsEnum e = new SimpleTVDocsAndPositionsEnum();
+ e.reset(liveDocs, postings.positions, postings.startOffsets, postings.endOffsets, postings.payloads);
+ return e;
+ }
+
// TODO: reuse
SimpleTVDocsEnum e = new SimpleTVDocsEnum();
e.reset(liveDocs, (flags & DocsEnum.FLAG_FREQS) == 0 ? 1 : current.getValue().freq);
return e;
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- SimpleTVPostings postings = current.getValue();
- if (postings.positions == null && postings.startOffsets == null) {
- return null;
- }
- // TODO: reuse
- SimpleTVDocsAndPositionsEnum e = new SimpleTVDocsAndPositionsEnum();
- e.reset(liveDocs, postings.positions, postings.startOffsets, postings.endOffsets, postings.payloads);
- return e;
- }
}
-
+
// note: these two enum classes are exactly like the Default impl...
private static class SimpleTVDocsEnum extends DocsEnum {
private boolean didNext;
@@ -422,6 +422,37 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
}
@Override
+ public int nextPosition() throws IOException {
+ assert false;
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int docID() {
return doc;
}
@@ -447,14 +478,14 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
this.doc = -1;
didNext = false;
}
-
+
@Override
public long cost() {
return 1;
}
}
-
- private static class SimpleTVDocsAndPositionsEnum extends DocsAndPositionsEnum {
+
+ private static class SimpleTVDocsAndPositionsEnum extends DocsEnum {
private boolean didNext;
private int doc = -1;
private int nextPos;
@@ -512,17 +543,27 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
@Override
public int nextPosition() {
- assert (positions != null && nextPos < positions.length) ||
- startOffsets != null && nextPos < startOffsets.length;
if (positions != null) {
+ assert nextPos < positions.length;
return positions[nextPos++];
} else {
+ assert nextPos < startOffsets.length;
nextPos++;
return -1;
}
}
@Override
+ public int startPosition() throws IOException {
+ return positions[nextPos-1];
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return positions[nextPos-1];
+ }
+
+ @Override
public int startOffset() {
if (startOffsets == null) {
return -1;
@@ -539,7 +580,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
return endOffsets[nextPos-1];
}
}
-
+
@Override
public long cost() {
return 1;
@@ -550,7 +591,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
public long ramBytesUsed() {
return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(offsets);
}
-
+
@Override
public String toString() {
return getClass().getSimpleName();
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/Token.java b/lucene/core/src/java/org/apache/lucene/analysis/Token.java
index f0a66f5..13cc5f2 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/Token.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/Token.java
@@ -20,7 +20,7 @@ package org.apache.lucene.analysis;
import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.index.DocsAndPositionsEnum; // for javadoc
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.AttributeFactory;
import org.apache.lucene.util.AttributeImpl;
@@ -43,7 +43,7 @@ import org.apache.lucene.util.BytesRef;
with type "eos". The default token type is "word".
A Token can optionally have metadata (a.k.a. payload) in the form of a variable
- length byte array. Use {@link DocsAndPositionsEnum#getPayload()} to retrieve the
+ length byte array. Use {@link DocsEnum#getPayload()} to retrieve the
payloads from the index.
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java
index daf6d00..1641adc 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java
@@ -17,7 +17,7 @@ package org.apache.lucene.analysis.tokenattributes;
* limitations under the License.
*/
-import org.apache.lucene.index.DocsAndPositionsEnum; // javadocs
+import org.apache.lucene.index.DocsEnum; // javadocs
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.BytesRef;
@@ -33,7 +33,7 @@ import org.apache.lucene.util.BytesRef;
* best to use the minimum number of bytes necessary. Some codec implementations
* may optimize payload storage when all payloads have the same length.
*
- * @see DocsAndPositionsEnum
+ * @see DocsEnum
*/
public interface PayloadAttribute extends Attribute {
/**
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java
index 9afd2f9..a7a7cd9 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java
@@ -43,7 +43,7 @@ import org.apache.lucene.util.Attribute;
*
*
*
- * @see org.apache.lucene.index.DocsAndPositionsEnum
+ * @see org.apache.lucene.index.DocsEnum
*/
public interface PositionIncrementAttribute extends Attribute {
/** Set the position increment. The default value is one.
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java b/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java
index 5681c19..6b90ed2 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java
@@ -20,7 +20,6 @@ package org.apache.lucene.codecs;
import java.io.Closeable;
import java.io.IOException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.SegmentReadState;
@@ -32,7 +31,7 @@ import org.apache.lucene.util.Bits;
/** The core terms dictionaries (BlockTermsReader,
* BlockTreeTermsReader) interact with a single instance
* of this class to manage creation of {@link DocsEnum} and
- * {@link DocsAndPositionsEnum} instances. It provides an
+ * {@link DocsEnum} instances. It provides an
* IndexInput (termsIn) where this class may read any
* previously stored data that it had written in its
* corresponding {@link PostingsWriterBase} at indexing
@@ -67,11 +66,6 @@ public abstract class PostingsReaderBase implements Closeable, Accountable {
/** Must fully consume state, since after this call that
* TermState may be reused. */
public abstract DocsEnum docs(FieldInfo fieldInfo, BlockTermState state, Bits skipDocs, DocsEnum reuse, int flags) throws IOException;
-
- /** Must fully consume state, since after this call that
- * TermState may be reused. */
- public abstract DocsAndPositionsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState state, Bits skipDocs, DocsAndPositionsEnum reuse,
- int flags) throws IOException;
/**
* Checks consistency of this reader.
@@ -81,7 +75,7 @@ public abstract class PostingsReaderBase implements Closeable, Accountable {
* @lucene.internal
*/
public abstract void checkIntegrity() throws IOException;
-
+
@Override
public abstract void close() throws IOException;
}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java b/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java
index 0dc7bb5..82063a0 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java
@@ -17,12 +17,8 @@ package org.apache.lucene.codecs;
* limitations under the License.
*/
-import java.io.Closeable;
-import java.io.IOException;
-
import org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter;
-import org.apache.lucene.index.DocsAndPositionsEnum; // javadocs
-import org.apache.lucene.index.DocsEnum; // javadocs
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.SegmentWriteState;
import org.apache.lucene.index.TermsEnum;
@@ -31,6 +27,9 @@ import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
+import java.io.Closeable;
+import java.io.IOException;
+
/**
* Class that plugs into term dictionaries, such as {@link
* BlockTreeTermsWriter}, and handles writing postings.
@@ -54,8 +53,8 @@ public abstract class PostingsWriterBase implements Closeable {
public abstract void init(IndexOutput termsOut, SegmentWriteState state) throws IOException;
/** Write all postings for one term; use the provided
- * {@link TermsEnum} to pull a {@link DocsEnum} or {@link
- * DocsAndPositionsEnum}. This method should not
+ * {@link TermsEnum} to pull a {@link DocsEnum}.
+ * This method should not
* re-position the {@code TermsEnum}! It is already
* positioned on the term that should be written. This
* method must set the bit in the provided {@link
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java b/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java
index 35ebba1..d59b882 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java
@@ -19,7 +19,6 @@ package org.apache.lucene.codecs;
import java.io.IOException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexOptions;
@@ -43,7 +42,6 @@ public abstract class PushPostingsWriterBase extends PostingsWriterBase {
// Reused in writeTerm
private DocsEnum docsEnum;
- private DocsAndPositionsEnum posEnum;
private int enumFlags;
/** {@link FieldInfo} of current field being written. */
@@ -103,15 +101,15 @@ public abstract class PushPostingsWriterBase extends PostingsWriterBase {
enumFlags = DocsEnum.FLAG_FREQS;
} else if (writeOffsets == false) {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS;
+ enumFlags = DocsEnum.FLAG_PAYLOADS;
} else {
- enumFlags = 0;
+ enumFlags = DocsEnum.FLAG_POSITIONS;
}
} else {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS | DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = DocsEnum.FLAG_PAYLOADS | DocsEnum.FLAG_OFFSETS;
} else {
- enumFlags = DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = DocsEnum.FLAG_OFFSETS;
}
}
@@ -121,12 +119,7 @@ public abstract class PushPostingsWriterBase extends PostingsWriterBase {
@Override
public final BlockTermState writeTerm(BytesRef term, TermsEnum termsEnum, FixedBitSet docsSeen) throws IOException {
startTerm();
- if (writePositions == false) {
- docsEnum = termsEnum.docs(null, docsEnum, enumFlags);
- } else {
- posEnum = termsEnum.docsAndPositions(null, posEnum, enumFlags);
- docsEnum = posEnum;
- }
+ docsEnum = termsEnum.docs(null, docsEnum, enumFlags);
assert docsEnum != null;
int docFreq = 0;
@@ -149,13 +142,13 @@ public abstract class PushPostingsWriterBase extends PostingsWriterBase {
if (writePositions) {
for(int i=0;i= 0;
+ assert !hasPositions || pos >= 0 ;
addPosition(pos, startOffset, endOffset, payload);
}
}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
index 952d226..fb42924 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
@@ -19,9 +19,7 @@ package org.apache.lucene.codecs.blocktree;
import java.io.IOException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.IndexInput;
@@ -208,17 +206,6 @@ final class IntersectTermsEnum extends TermsEnum {
return fr.parent.postingsReader.docs(fr.fieldInfo, currentFrame.termState, skipDocs, reuse, flags);
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (fr.fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- // Positions were not indexed:
- return null;
- }
-
- currentFrame.decodeMetaData();
- return fr.parent.postingsReader.docsAndPositions(fr.fieldInfo, currentFrame.termState, skipDocs, reuse, flags);
- }
-
private int getState() {
int state = currentFrame.state;
for(int idx=0;idx= FLAG_POSITIONS);
+ }
+
private AttributeSource atts = null;
/** Sole constructor. (For invocation by subclass
@@ -64,4 +81,31 @@ public abstract class DocsEnum extends DocIdSetIterator {
if (atts == null) atts = new AttributeSource();
return atts;
}
+
+ /**
+ * Returns the next position. If there are no more
+ * positions, or the iterator does not support positions,
+ * this will return DocsEnum.NO_MORE_POSITIONS */
+ public abstract int nextPosition() throws IOException;
+
+ /** Returns current starting position, or NO_MORE_POSITIONS if not supported */
+ public abstract int startPosition() throws IOException;
+
+ /** Returns current ending position, or NO_MORE_POSITIONS if not supported */
+ public abstract int endPosition() throws IOException;
+
+ /** Returns start offset for the current position, or -1
+ * if offsets were not indexed. */
+ public abstract int startOffset() throws IOException;
+
+ /** Returns end offset for the current position, or -1 if
+ * offsets were not indexed. */
+ public abstract int endOffset() throws IOException;
+
+ /** Returns the payload at this position, or null if no
+ * payload was indexed. You should not modify anything
+ * (neither members of the returned BytesRef nor bytes
+ * in the byte[]). */
+ public abstract BytesRef getPayload() throws IOException;
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
index 47422a9..9c748af 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
@@ -17,14 +17,14 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import java.io.IOException;
-import java.util.Iterator;
-
import org.apache.lucene.search.CachingWrapperFilter;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
+import java.io.IOException;
+import java.util.Iterator;
+
/** A FilterLeafReader contains another LeafReader, which it
* uses as its basic source of data, possibly transforming the data along the
* way or providing additional functionality. The class
@@ -219,10 +219,6 @@ public class FilterLeafReader extends LeafReader {
return in.docs(liveDocs, reuse, flags);
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- return in.docsAndPositions(liveDocs, reuse, flags);
- }
}
/** Base class for filtering {@link DocsEnum} implementations. */
@@ -267,55 +263,18 @@ public class FilterLeafReader extends LeafReader {
}
@Override
- public long cost() {
- return in.cost();
- }
- }
-
- /** Base class for filtering {@link DocsAndPositionsEnum} implementations. */
- public static class FilterDocsAndPositionsEnum extends DocsAndPositionsEnum {
- /** The underlying DocsAndPositionsEnum instance. */
- protected final DocsAndPositionsEnum in;
-
- /**
- * Create a new FilterDocsAndPositionsEnum
- * @param in the underlying DocsAndPositionsEnum instance.
- */
- public FilterDocsAndPositionsEnum(DocsAndPositionsEnum in) {
- if (in == null) {
- throw new NullPointerException("incoming DocsAndPositionsEnum cannot be null");
- }
- this.in = in;
- }
-
- @Override
- public AttributeSource attributes() {
- return in.attributes();
- }
-
- @Override
- public int docID() {
- return in.docID();
- }
-
- @Override
- public int freq() throws IOException {
- return in.freq();
- }
-
- @Override
- public int nextDoc() throws IOException {
- return in.nextDoc();
+ public int nextPosition() throws IOException {
+ return in.nextPosition();
}
@Override
- public int advance(int target) throws IOException {
- return in.advance(target);
+ public int startPosition() throws IOException {
+ return in.startPosition();
}
@Override
- public int nextPosition() throws IOException {
- return in.nextPosition();
+ public int endPosition() throws IOException {
+ return in.endPosition();
}
@Override
@@ -332,7 +291,7 @@ public class FilterLeafReader extends LeafReader {
public BytesRef getPayload() throws IOException {
return in.getPayload();
}
-
+
@Override
public long cost() {
return in.cost();
diff --git a/lucene/core/src/java/org/apache/lucene/index/FilteredTermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/FilteredTermsEnum.java
index b6bfcc4..600c9b5 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FilteredTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FilteredTermsEnum.java
@@ -182,11 +182,6 @@ public abstract class FilteredTermsEnum extends TermsEnum {
public DocsEnum docs(Bits bits, DocsEnum reuse, int flags) throws IOException {
return tenum.docs(bits, reuse, flags);
}
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits bits, DocsAndPositionsEnum reuse, int flags) throws IOException {
- return tenum.docsAndPositions(bits, reuse, flags);
- }
/** This enum does not support seeking!
* @throws UnsupportedOperationException In general, subclasses do not
diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
index fe5d31f..bf2e2ab 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
@@ -24,7 +24,7 @@ import java.util.List;
import java.util.Map;
import org.apache.lucene.index.FreqProxTermsWriterPerField.FreqProxPostingsArray;
-import org.apache.lucene.util.AttributeSource; // javadocs
+import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
@@ -235,6 +235,33 @@ class FreqProxFields extends Fields {
throw new IllegalArgumentException("liveDocs must be null");
}
+ if ((flags & DocsEnum.FLAG_POSITIONS) >= DocsEnum.FLAG_POSITIONS) {
+ FreqProxDocsAndPositionsEnum posEnum;
+
+ if (!terms.hasProx) {
+ // Caller wants positions but we didn't index them;
+ // don't lie:
+ throw new IllegalArgumentException("did not index positions");
+ }
+
+ if (!terms.hasOffsets && (flags & DocsEnum.FLAG_OFFSETS) == DocsEnum.FLAG_OFFSETS) {
+ // Caller wants offsets but we didn't index them;
+ // don't lie:
+ throw new IllegalArgumentException("did not index offsets");
+ }
+
+ if (reuse instanceof FreqProxDocsAndPositionsEnum) {
+ posEnum = (FreqProxDocsAndPositionsEnum) reuse;
+ if (posEnum.postingsArray != postingsArray) {
+ posEnum = new FreqProxDocsAndPositionsEnum(terms, postingsArray);
+ }
+ } else {
+ posEnum = new FreqProxDocsAndPositionsEnum(terms, postingsArray);
+ }
+ posEnum.reset(sortedTermIDs[ord]);
+ return posEnum;
+ }
+
FreqProxDocsEnum docsEnum;
if (!terms.hasFreq && (flags & DocsEnum.FLAG_FREQS) != 0) {
@@ -255,37 +282,6 @@ class FreqProxFields extends Fields {
return docsEnum;
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
- if (liveDocs != null) {
- throw new IllegalArgumentException("liveDocs must be null");
- }
- FreqProxDocsAndPositionsEnum posEnum;
-
- if (!terms.hasProx) {
- // Caller wants positions but we didn't index them;
- // don't lie:
- throw new IllegalArgumentException("did not index positions");
- }
-
- if (!terms.hasOffsets && (flags & DocsAndPositionsEnum.FLAG_OFFSETS) != 0) {
- // Caller wants offsets but we didn't index them;
- // don't lie:
- throw new IllegalArgumentException("did not index offsets");
- }
-
- if (reuse instanceof FreqProxDocsAndPositionsEnum) {
- posEnum = (FreqProxDocsAndPositionsEnum) reuse;
- if (posEnum.postingsArray != postingsArray) {
- posEnum = new FreqProxDocsAndPositionsEnum(terms, postingsArray);
- }
- } else {
- posEnum = new FreqProxDocsAndPositionsEnum(terms, postingsArray);
- }
- posEnum.reset(sortedTermIDs[ord]);
- return posEnum;
- }
-
/**
* Expert: Returns the TermsEnums internal state to position the TermsEnum
* without re-seeking the term dictionary.
@@ -348,6 +344,36 @@ class FreqProxFields extends Fields {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int nextDoc() throws IOException {
if (reader.eof()) {
if (ended) {
@@ -389,7 +415,7 @@ class FreqProxFields extends Fields {
}
}
- private static class FreqProxDocsAndPositionsEnum extends DocsAndPositionsEnum {
+ private static class FreqProxDocsAndPositionsEnum extends DocsEnum {
final FreqProxTermsWriterPerField terms;
final FreqProxPostingsArray postingsArray;
@@ -501,6 +527,16 @@ class FreqProxFields extends Fields {
}
@Override
+ public int startPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
public int startOffset() {
if (!readOffsets) {
throw new IllegalStateException("offsets were not indexed");
diff --git a/lucene/core/src/java/org/apache/lucene/index/LeafReader.java b/lucene/core/src/java/org/apache/lucene/index/LeafReader.java
index 2f9c604..df01c2e 100644
--- a/lucene/core/src/java/org/apache/lucene/index/LeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/LeafReader.java
@@ -19,7 +19,6 @@ package org.apache.lucene.index;
import java.io.IOException;
-import org.apache.lucene.index.IndexReader.ReaderClosedListener;
import org.apache.lucene.util.Bits;
/** {@code LeafReader} is an abstract class, providing an interface for accessing an
@@ -209,34 +208,21 @@ public abstract class LeafReader extends IndexReader {
* This will return null if either the field or
* term does not exist.
* @see TermsEnum#docs(Bits, DocsEnum) */
- public final DocsEnum termDocsEnum(Term term) throws IOException {
+ public final DocsEnum termDocsEnum(Term term, int flags) throws IOException {
assert term.field() != null;
assert term.bytes() != null;
final Terms terms = terms(term.field());
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(term.bytes())) {
- return termsEnum.docs(getLiveDocs(), null);
+ return termsEnum.docs(getLiveDocs(), null, flags);
}
}
return null;
}
- /** Returns {@link DocsAndPositionsEnum} for the specified
- * term. This will return null if the
- * field or term does not exist or positions weren't indexed.
- * @see TermsEnum#docsAndPositions(Bits, DocsAndPositionsEnum) */
- public final DocsAndPositionsEnum termPositionsEnum(Term term) throws IOException {
- assert term.field() != null;
- assert term.bytes() != null;
- final Terms terms = terms(term.field());
- if (terms != null) {
- final TermsEnum termsEnum = terms.iterator(null);
- if (termsEnum.seekExact(term.bytes())) {
- return termsEnum.docsAndPositions(getLiveDocs(), null);
- }
- }
- return null;
+ public final DocsEnum termDocsEnum(Term term) throws IOException {
+ return termDocsEnum(term, DocsEnum.FLAG_FREQS);
}
/** Returns {@link NumericDocValues} for this field, or
diff --git a/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java b/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java
index fad0eed..b41e3d3 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java
@@ -107,33 +107,29 @@ public class MappedMultiFields extends FilterFields {
if (liveDocs != null) {
throw new IllegalArgumentException("liveDocs must be null");
}
- MappingMultiDocsEnum mappingDocsEnum;
- if (reuse instanceof MappingMultiDocsEnum) {
- mappingDocsEnum = (MappingMultiDocsEnum) reuse;
- } else {
- mappingDocsEnum = new MappingMultiDocsEnum(mergeState);
- }
-
- MultiDocsEnum docsEnum = (MultiDocsEnum) in.docs(liveDocs, mappingDocsEnum.multiDocsEnum, flags);
- mappingDocsEnum.reset(docsEnum);
- return mappingDocsEnum;
- }
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (liveDocs != null) {
- throw new IllegalArgumentException("liveDocs must be null");
- }
MappingMultiDocsAndPositionsEnum mappingDocsAndPositionsEnum;
if (reuse instanceof MappingMultiDocsAndPositionsEnum) {
mappingDocsAndPositionsEnum = (MappingMultiDocsAndPositionsEnum) reuse;
} else {
mappingDocsAndPositionsEnum = new MappingMultiDocsAndPositionsEnum(mergeState);
}
-
- MultiDocsAndPositionsEnum docsAndPositionsEnum = (MultiDocsAndPositionsEnum) in.docsAndPositions(liveDocs, mappingDocsAndPositionsEnum.multiDocsAndPositionsEnum, flags);
+
+ MultiDocsAndPositionsEnum docsAndPositionsEnum = (MultiDocsAndPositionsEnum) in.docs(liveDocs, mappingDocsAndPositionsEnum.multiDocsAndPositionsEnum, flags);
mappingDocsAndPositionsEnum.reset(docsAndPositionsEnum);
return mappingDocsAndPositionsEnum;
+
+/*
+ MappingMultiDocsEnum mappingDocsEnum;
+ if (reuse instanceof MappingMultiDocsEnum) {
+ mappingDocsEnum = (MappingMultiDocsEnum) reuse;
+ } else {
+ mappingDocsEnum = new MappingMultiDocsEnum(mergeState);
+ }
+
+ MultiDocsEnum docsEnum = (MultiDocsEnum) in.docs(liveDocs, mappingDocsEnum.multiDocsEnum, flags);
+ mappingDocsEnum.reset(docsEnum);
+ return mappingDocsEnum;*/
}
}
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsAndPositionsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsAndPositionsEnum.java
index 8fd316a..78eb05f 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsAndPositionsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsAndPositionsEnum.java
@@ -17,11 +17,11 @@ package org.apache.lucene.index;
* limitations under the License.
*/
+import java.io.IOException;
+
import org.apache.lucene.index.MultiDocsAndPositionsEnum.EnumWithSlice;
import org.apache.lucene.util.BytesRef;
-import java.io.IOException;
-
/**
* Exposes flex API, merged from flex API of sub-segments,
* remapping docIDs (this is used for segment merging).
@@ -29,12 +29,12 @@ import java.io.IOException;
* @lucene.experimental
*/
-final class MappingMultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
+final class MappingMultiDocsAndPositionsEnum extends DocsEnum {
private MultiDocsAndPositionsEnum.EnumWithSlice[] subs;
int numSubs;
int upto;
MergeState.DocMap currentMap;
- DocsAndPositionsEnum current;
+ DocsEnum current;
int currentBase;
int doc = -1;
private MergeState mergeState;
@@ -115,6 +115,16 @@ final class MappingMultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
}
@Override
+ public int startPosition() throws IOException {
+ return current.startPosition();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return current.endPosition();
+ }
+
+ @Override
public int startOffset() throws IOException {
return current.startOffset();
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsEnum.java
index 2aa9e5f..01cd894 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsEnum.java
@@ -17,10 +17,11 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import org.apache.lucene.index.MultiDocsEnum.EnumWithSlice;
-
import java.io.IOException;
+import org.apache.lucene.index.MultiDocsEnum.EnumWithSlice;
+import org.apache.lucene.util.BytesRef;
+
/**
* Exposes flex API, merged from flex API of sub-segments,
* remapping docIDs (this is used for segment merging).
@@ -70,6 +71,36 @@ final class MappingMultiDocsEnum extends DocsEnum {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int docID() {
return doc;
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java
index 33e2127..5c9a5ab 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java
@@ -17,31 +17,31 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import org.apache.lucene.util.BytesRef;
-
import java.io.IOException;
import java.util.Arrays;
+import org.apache.lucene.util.BytesRef;
+
/**
* Exposes flex API, merged from flex API of sub-segments.
*
* @lucene.experimental
*/
-public final class MultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
+public final class MultiDocsAndPositionsEnum extends DocsEnum {
private final MultiTermsEnum parent;
- final DocsAndPositionsEnum[] subDocsAndPositionsEnum;
+ final DocsEnum[] subDocsAndPositionsEnum;
private final EnumWithSlice[] subs;
int numSubs;
int upto;
- DocsAndPositionsEnum current;
+ DocsEnum current;
int currentBase;
int doc = -1;
/** Sole constructor. */
public MultiDocsAndPositionsEnum(MultiTermsEnum parent, int subReaderCount) {
this.parent = parent;
- subDocsAndPositionsEnum = new DocsAndPositionsEnum[subReaderCount];
+ subDocsAndPositionsEnum = new DocsEnum[subReaderCount];
this.subs = new EnumWithSlice[subReaderCount];
for (int i = 0; i < subs.length; i++) {
subs[i] = new EnumWithSlice();
@@ -144,6 +144,16 @@ public final class MultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
}
@Override
+ public int startPosition() throws IOException {
+ return current.startPosition();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return current.endPosition();
+ }
+
+ @Override
public int startOffset() throws IOException {
return current.startOffset();
}
@@ -159,14 +169,14 @@ public final class MultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
}
// TODO: implement bulk read more efficiently than super
- /** Holds a {@link DocsAndPositionsEnum} along with the
+ /** Holds a {@link DocsEnum} along with the
* corresponding {@link ReaderSlice}. */
public final static class EnumWithSlice {
EnumWithSlice() {
}
- /** {@link DocsAndPositionsEnum} for this sub-reader. */
- public DocsAndPositionsEnum docsAndPositionsEnum;
+ /** {@link DocsEnum} for this sub-reader. */
+ public DocsEnum docsAndPositionsEnum;
/** {@link ReaderSlice} describing how this sub-reader
* fits into the composite reader. */
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java
index 082d266..d76e24e 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java
@@ -21,6 +21,8 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.util.Arrays;
+import org.apache.lucene.util.BytesRef;
+
/**
* Exposes {@link DocsEnum}, merged from {@link DocsEnum}
* API of sub-segments.
@@ -89,6 +91,36 @@ public final class MultiDocsEnum extends DocsEnum {
public int docID() {
return doc;
}
+
+ @Override
+ public int nextPosition() throws IOException {
+ return current.nextPosition();
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return current.startPosition();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return current.endPosition();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return current.startOffset();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return current.endOffset();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return current.getPayload();
+ }
@Override
public int advance(int target) throws IOException {
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
index 8a6dd0c..b203601 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiFields.java
@@ -146,29 +146,29 @@ public final class MultiFields extends Fields {
return null;
}
- /** Returns {@link DocsAndPositionsEnum} for the specified
+ /** Returns {@link DocsEnum} for the specified
* field and term. This will return null if the field or
* term does not exist or positions were not indexed.
* @see #getTermPositionsEnum(IndexReader, Bits, String, BytesRef, int) */
- public static DocsAndPositionsEnum getTermPositionsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term) throws IOException {
- return getTermPositionsEnum(r, liveDocs, field, term, DocsAndPositionsEnum.FLAG_OFFSETS | DocsAndPositionsEnum.FLAG_PAYLOADS);
+ public static DocsEnum getTermPositionsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term) throws IOException {
+ return getTermPositionsEnum(r, liveDocs, field, term, DocsEnum.FLAG_OFFSETS | DocsEnum.FLAG_PAYLOADS);
}
- /** Returns {@link DocsAndPositionsEnum} for the specified
+ /** Returns {@link DocsEnum} for the specified
* field and term, with control over whether offsets and payloads are
* required. Some codecs may be able to optimize
* their implementation when offsets and/or payloads are not
* required. This will return null if the field or term does not
* exist or positions were not indexed. See {@link
- * TermsEnum#docsAndPositions(Bits,DocsAndPositionsEnum,int)}. */
- public static DocsAndPositionsEnum getTermPositionsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term, int flags) throws IOException {
+ * TermsEnum#docs(Bits,DocsEnum,int)}. */
+ public static DocsEnum getTermPositionsEnum(IndexReader r, Bits liveDocs, String field, BytesRef term, int flags) throws IOException {
assert field != null;
assert term != null;
final Terms terms = getTerms(r, field);
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(term)) {
- return termsEnum.docsAndPositions(liveDocs, null, flags);
+ return termsEnum.docs(liveDocs, null, flags);
}
}
return null;
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java
index 6ae2c7c..110cc98 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java
@@ -17,14 +17,14 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.PriorityQueue;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.Bits;
-
import java.io.IOException;
import java.util.Arrays;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.PriorityQueue;
+
/**
* Exposes {@link TermsEnum} API, merged from {@link TermsEnum} API of sub-segments.
* This does a merge sort, by term text, of the sub-readers.
@@ -37,8 +37,7 @@ public final class MultiTermsEnum extends TermsEnum {
private final TermsEnumWithSlice[] subs; // all of our subs (one per sub-reader)
private final TermsEnumWithSlice[] currentSubs; // current subs that have at least one term for this field
private final TermsEnumWithSlice[] top;
- private final MultiDocsEnum.EnumWithSlice[] subDocs;
- private final MultiDocsAndPositionsEnum.EnumWithSlice[] subDocsAndPositions;
+ private final MultiDocsAndPositionsEnum.EnumWithSlice[] subDocs;
private BytesRef lastSeek;
private boolean lastSeekExact;
@@ -77,14 +76,11 @@ public final class MultiTermsEnum extends TermsEnum {
queue = new TermMergeQueue(slices.length);
top = new TermsEnumWithSlice[slices.length];
subs = new TermsEnumWithSlice[slices.length];
- subDocs = new MultiDocsEnum.EnumWithSlice[slices.length];
- subDocsAndPositions = new MultiDocsAndPositionsEnum.EnumWithSlice[slices.length];
+ subDocs = new MultiDocsAndPositionsEnum.EnumWithSlice[slices.length];
for(int i=0;iTerm enumerations are always ordered by
@@ -150,9 +150,10 @@ public abstract class TermsEnum implements BytesRefIterator {
}
/** Get {@link DocsEnum} for the current term, with
- * control over whether freqs are required. Do not
- * call this when the enum is unpositioned. This method
- * will not return null.
+ * control over whether freqs, positions, offsets or payloads
+ * are required. Do not call this when the enum is
+ * unpositioned. This method may return null if the postings
+ * information required is not available from the index
*
* @param liveDocs unset bits are documents that should not
* be returned
@@ -162,34 +163,6 @@ public abstract class TermsEnum implements BytesRefIterator {
* @see #docs(Bits, DocsEnum, int) */
public abstract DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException;
- /** Get {@link DocsAndPositionsEnum} for the current term.
- * Do not call this when the enum is unpositioned. This
- * method will return null if positions were not
- * indexed.
- *
- * @param liveDocs unset bits are documents that should not
- * be returned
- * @param reuse pass a prior DocsAndPositionsEnum for possible reuse
- * @see #docsAndPositions(Bits, DocsAndPositionsEnum, int) */
- public final DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException {
- return docsAndPositions(liveDocs, reuse, DocsAndPositionsEnum.FLAG_OFFSETS | DocsAndPositionsEnum.FLAG_PAYLOADS);
- }
-
- /** Get {@link DocsAndPositionsEnum} for the current term,
- * with control over whether offsets and payloads are
- * required. Some codecs may be able to optimize their
- * implementation when offsets and/or payloads are not required.
- * Do not call this when the enum is unpositioned. This
- * will return null if positions were not indexed.
-
- * @param liveDocs unset bits are documents that should not
- * be returned
- * @param reuse pass a prior DocsAndPositionsEnum for possible reuse
- * @param flags specifies which optional per-position values you
- * require; see {@link DocsAndPositionsEnum#FLAG_OFFSETS} and
- * {@link DocsAndPositionsEnum#FLAG_PAYLOADS}. */
- public abstract DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException;
-
/**
* Expert: Returns the TermsEnums internal state to position the TermsEnum
* without re-seeking the term dictionary.
@@ -250,11 +223,6 @@ public abstract class TermsEnum implements BytesRefIterator {
}
@Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
- throw new IllegalStateException("this method should never be called");
- }
-
- @Override
public BytesRef next() {
return null;
}
@@ -273,5 +241,6 @@ public abstract class TermsEnum implements BytesRefIterator {
public void seekExact(BytesRef term, TermState state) {
throw new IllegalStateException("this method should never be called");
}
+
};
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
index bef19ca..cb97904 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
@@ -17,6 +17,7 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import org.apache.lucene.index.DocsEnum;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
@@ -242,7 +243,7 @@ public class BooleanQuery extends Query implements Iterable {
for (Iterator wIter = weights.iterator(); wIter.hasNext();) {
Weight w = wIter.next();
BooleanClause c = cIter.next();
- if (w.scorer(context, context.reader().getLiveDocs()) == null) {
+ if (w.scorer(context, DocsEnum.FLAG_FREQS, context.reader().getLiveDocs()) == null) {
if (c.isRequired()) {
fail = true;
Explanation r = new Explanation(0.0f, "no match on required clause (" + c.getQuery().toString() + ")");
@@ -305,19 +306,19 @@ public class BooleanQuery extends Query implements Iterable {
}
@Override
- public BulkScorer bulkScorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public BulkScorer bulkScorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
if (minNrShouldMatch > 1) {
// TODO: (LUCENE-4872) in some cases BooleanScorer may be faster for minNrShouldMatch
// but the same is even true of pure conjunctions...
- return super.bulkScorer(context, acceptDocs);
+ return super.bulkScorer(context, flags, acceptDocs);
}
List optional = new ArrayList();
Iterator cIter = clauses.iterator();
for (Weight w : weights) {
BooleanClause c = cIter.next();
- BulkScorer subScorer = w.bulkScorer(context, acceptDocs);
+ BulkScorer subScorer = w.bulkScorer(context, flags, acceptDocs);
if (subScorer == null) {
if (c.isRequired()) {
return null;
@@ -326,10 +327,10 @@ public class BooleanQuery extends Query implements Iterable {
// TODO: there are some cases where BooleanScorer
// would handle conjunctions faster than
// BooleanScorer2...
- return super.bulkScorer(context, acceptDocs);
+ return super.bulkScorer(context, flags, acceptDocs);
} else if (c.isProhibited()) {
// TODO: there are some cases where BooleanScorer could do this faster
- return super.bulkScorer(context, acceptDocs);
+ return super.bulkScorer(context, flags, acceptDocs);
} else {
optional.add(subScorer);
}
@@ -343,7 +344,7 @@ public class BooleanQuery extends Query implements Iterable {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs)
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs)
throws IOException {
// initially the user provided value,
// but if minNrShouldMatch == optional.size(),
@@ -356,7 +357,7 @@ public class BooleanQuery extends Query implements Iterable {
Iterator cIter = clauses.iterator();
for (Weight w : weights) {
BooleanClause c = cIter.next();
- Scorer subScorer = w.scorer(context, acceptDocs);
+ Scorer subScorer = w.scorer(context, flags, acceptDocs);
if (subScorer == null) {
if (c.isRequired()) {
return null;
@@ -433,6 +434,15 @@ public class BooleanQuery extends Query implements Iterable {
}
}
}
+
+ @Override
+ public String toString() {
+ StringBuffer sb = new StringBuffer("BooleanWeight[");
+ for (Weight weight : weights) {
+ sb.append(weight.toString()).append(",");
+ }
+ return sb.append("]").toString();
+ }
private Scorer req(List required, boolean disableCoord) {
if (required.size() == 1) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanTopLevelScorers.java b/lucene/core/src/java/org/apache/lucene/search/BooleanTopLevelScorers.java
index 2c49ec7..721ade2 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanTopLevelScorers.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanTopLevelScorers.java
@@ -21,8 +21,6 @@ import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
-import org.apache.lucene.search.Scorer.ChildScorer;
-
/** Internal document-at-a-time scorers used to deal with stupid coord() computation */
class BooleanTopLevelScorers {
@@ -61,7 +59,7 @@ class BooleanTopLevelScorers {
private final Scorer req;
private final Scorer opt;
- CoordinatingConjunctionScorer(Weight weight, float coords[], Scorer req, int reqCount, Scorer opt) {
+ CoordinatingConjunctionScorer(Weight weight, float coords[], Scorer req, int reqCount, Scorer opt) throws IOException {
super(weight, new Scorer[] { req, opt });
this.coords = coords;
this.req = req;
diff --git a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
index d50bec3..b912b8e 100644
--- a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
@@ -17,15 +17,16 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.RamUsageEstimator;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.RamUsageEstimator;
+
/**
* Caches all docs, and optionally also scores, coming from
* a search, and is then able to replay them to another
@@ -74,10 +75,41 @@ public abstract class CachingCollector extends FilterCollector {
public final int freq() { throw new UnsupportedOperationException(); }
@Override
+ public int nextPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
public final int nextDoc() { throw new UnsupportedOperationException(); }
@Override
public long cost() { return 1; }
+
}
private static class NoScoreCachingCollector extends CachingCollector {
diff --git a/lucene/core/src/java/org/apache/lucene/search/Collector.java b/lucene/core/src/java/org/apache/lucene/search/Collector.java
index 0ac853f..448c08c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Collector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Collector.java
@@ -72,5 +72,5 @@ public interface Collector {
* next atomic reader context
*/
LeafCollector getLeafCollector(LeafReaderContext context) throws IOException;
-
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
index 3e81187..4b6dabc 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
@@ -23,18 +23,21 @@ import java.util.Collection;
import java.util.Comparator;
import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
/** Scorer for conjunctions, sets of queries, all of which are required. */
class ConjunctionScorer extends Scorer {
+
protected int lastDoc = -1;
protected final DocsAndFreqs[] docsAndFreqs;
private final DocsAndFreqs lead;
private final float coord;
+ private final Scorer[] scorers; // to preserve order for positional queries
ConjunctionScorer(Weight weight, Scorer[] scorers) {
this(weight, scorers, 1f);
}
-
+
ConjunctionScorer(Weight weight, Scorer[] scorers, float coord) {
super(weight);
this.coord = coord;
@@ -52,6 +55,7 @@ class ConjunctionScorer extends Scorer {
});
lead = docsAndFreqs[0]; // least frequent DocsEnum leads the intersection
+ this.scorers = scorers;
}
private int doNext(int doc) throws IOException {
@@ -109,22 +113,52 @@ class ConjunctionScorer extends Scorer {
}
return sum * coord;
}
-
+
@Override
public int freq() {
return docsAndFreqs.length;
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public long cost() {
return lead.scorer.cost();
}
@Override
public Collection getChildren() {
- ArrayList children = new ArrayList<>(docsAndFreqs.length);
- for (DocsAndFreqs docs : docsAndFreqs) {
- children.add(new ChildScorer(docs.scorer, "MUST"));
+ ArrayList children = new ArrayList<>(scorers.length);
+ for (Scorer scorer : scorers) {
+ children.add(new ChildScorer(scorer, "MUST"));
}
return children;
}
@@ -133,7 +167,7 @@ class ConjunctionScorer extends Scorer {
final long cost;
final Scorer scorer;
int doc = -1;
-
+
DocsAndFreqs(Scorer scorer) {
this.scorer = scorer;
this.cost = scorer.cost();
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
index e6c7a03..adae450 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
@@ -17,17 +17,19 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.ToStringUtils;
-
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Set;
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.ToStringUtils;
+
/**
* A query that wraps another query or a filter and simply returns a constant score equal to the
* query boost for every document that matches the filter or query.
@@ -134,14 +136,13 @@ public class ConstantScoreQuery extends Query {
}
@Override
- public BulkScorer bulkScorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
- final DocIdSetIterator disi;
+ public BulkScorer bulkScorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
if (filter != null) {
assert query == null;
- return super.bulkScorer(context, acceptDocs);
+ return super.bulkScorer(context, flags, acceptDocs);
} else {
assert query != null && innerWeight != null;
- BulkScorer bulkScorer = innerWeight.bulkScorer(context, acceptDocs);
+ BulkScorer bulkScorer = innerWeight.bulkScorer(context, flags, acceptDocs);
if (bulkScorer == null) {
return null;
}
@@ -150,29 +151,32 @@ public class ConstantScoreQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
- final DocIdSetIterator disi;
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
if (filter != null) {
assert query == null;
final DocIdSet dis = filter.getDocIdSet(context, acceptDocs);
if (dis == null) {
return null;
}
- disi = dis.iterator();
+ final DocIdSetIterator disi = dis.iterator();
+ if (disi == null)
+ return null;
+ return new ConstantDocIdSetIteratorScorer(disi, this, queryWeight);
} else {
assert query != null && innerWeight != null;
- disi = innerWeight.scorer(context, acceptDocs);
+ Scorer scorer = innerWeight.scorer(context, flags, acceptDocs);
+ if (scorer == null) {
+ return null;
+ }
+ return new ConstantScoreScorer(scorer, queryWeight);
}
- if (disi == null) {
- return null;
- }
- return new ConstantScorer(disi, this, queryWeight);
+
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- final Scorer cs = scorer(context, context.reader().getLiveDocs());
+ final Scorer cs = scorer(context, DocsEnum.FLAG_FREQS, context.reader().getLiveDocs());
final boolean exists = (cs != null && cs.advance(doc) == doc);
final ComplexExplanation result = new ComplexExplanation();
@@ -216,17 +220,46 @@ public class ConstantScoreQuery extends Query {
@Override
public void setScorer(Scorer scorer) throws IOException {
// we must wrap again here, but using the scorer passed in as parameter:
- in.setScorer(new ConstantScorer(scorer, weight, theScore));
+ in.setScorer(new ConstantScoreScorer(scorer, theScore));
}
};
}
}
- protected class ConstantScorer extends Scorer {
+ protected class ConstantScoreScorer extends FilterScorer {
+
+ private final float score;
+
+ public ConstantScoreScorer(Scorer wrapped, float score) {
+ super(wrapped);
+ this.score = score;
+ }
+
+ @Override
+ public int freq() throws IOException {
+ return 1;
+ }
+
+ @Override
+ public float score() throws IOException {
+ return score;
+ }
+
+ @Override
+ public Collection getChildren() {
+ if (query != null) {
+ return Collections.singletonList(new ChildScorer(in, "constant"));
+ } else {
+ return Collections.emptyList();
+ }
+ }
+ }
+
+ protected class ConstantDocIdSetIteratorScorer extends Scorer {
final DocIdSetIterator docIdSetIterator;
final float theScore;
- public ConstantScorer(DocIdSetIterator docIdSetIterator, Weight w, float theScore) {
+ public ConstantDocIdSetIteratorScorer(DocIdSetIterator docIdSetIterator, Weight w, float theScore) {
super(w);
this.theScore = theScore;
this.docIdSetIterator = docIdSetIterator;
@@ -254,10 +287,40 @@ public class ConstantScoreQuery extends Query {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int advance(int target) throws IOException {
return docIdSetIterator.advance(target);
}
-
+
@Override
public long cost() {
return docIdSetIterator.cost();
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
index e27063a..5ab87a3 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
@@ -16,6 +16,11 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.util.Bits;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
@@ -23,11 +28,6 @@ import java.util.Iterator;
import java.util.List;
import java.util.Set;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.util.Bits;
-
/**
* A query that generates the union of documents produced by its subqueries, and that scores each document with the maximum
* score for that document as produced by any subquery, plus a tie breaking increment for any additional matching subqueries.
@@ -153,11 +153,11 @@ public class DisjunctionMaxQuery extends Query implements Iterable {
/** Create the scorer used to score our associated DisjunctionMaxQuery */
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
List scorers = new ArrayList<>();
for (Weight w : weights) {
// we will advance() subscorers
- Scorer subScorer = w.scorer(context, acceptDocs);
+ Scorer subScorer = w.scorer(context, flags, acceptDocs);
if (subScorer != null) {
scorers.add(subScorer);
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
index b5d0a0d..e80242e 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
@@ -46,6 +46,7 @@ final class DisjunctionMaxScorer extends DisjunctionScorer {
DisjunctionMaxScorer(Weight weight, float tieBreakerMultiplier, Scorer[] subScorers) {
super(weight, subScorers);
this.tieBreakerMultiplier = tieBreakerMultiplier;
+
}
@Override
@@ -66,4 +67,5 @@ final class DisjunctionMaxScorer extends DisjunctionScorer {
protected float getFinal() {
return scoreMax + (scoreSum - scoreMax) * tieBreakerMultiplier;
}
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java
index 5b7e2ff..2840c2b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java
@@ -21,18 +21,20 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
+import org.apache.lucene.util.BytesRef;
+
/**
* Base class for Scorers that score disjunctions.
*/
abstract class DisjunctionScorer extends Scorer {
- private final Scorer subScorers[];
- private int numScorers;
+ protected final Scorer subScorers[];
/** The document number of the current match. */
protected int doc = -1;
+ protected int numScorers;
/** Number of matching scorers for the current match. */
protected int freq = -1;
-
+
protected DisjunctionScorer(Weight weight, Scorer subScorers[]) {
super(weight);
this.subScorers = subScorers;
@@ -115,6 +117,36 @@ abstract class DisjunctionScorer extends Scorer {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public final long cost() {
long sum = 0;
for (int i = 0; i < numScorers; i++) {
@@ -167,7 +199,7 @@ abstract class DisjunctionScorer extends Scorer {
}
}
}
-
+
// if we haven't already computed freq + score, do so
private void visitScorers() throws IOException {
reset();
@@ -209,4 +241,5 @@ abstract class DisjunctionScorer extends Scorer {
/** Return final score */
protected abstract float getFinal();
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
index f291695..f775ad6 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
@@ -19,13 +19,14 @@ package org.apache.lucene.search;
import java.io.IOException;
+
/** A Scorer for OR like queries, counterpart of ConjunctionScorer.
* This Scorer implements {@link Scorer#advance(int)} and uses advance() on the given Scorers.
*/
final class DisjunctionSumScorer extends DisjunctionScorer {
private double score;
private final float[] coord;
-
+
/** Construct a DisjunctionScorer.
* @param weight The weight to be used.
* @param subScorers Array of at least two subscorers.
@@ -50,4 +51,5 @@ final class DisjunctionSumScorer extends DisjunctionScorer {
protected float getFinal() {
return (float)score * coord[freq];
}
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java
index e73b241..b82679c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java
@@ -22,6 +22,7 @@ import java.util.Arrays;
import org.apache.lucene.index.*;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.BytesRef;
final class ExactPhraseScorer extends Scorer {
private final int endMinus1;
@@ -35,21 +36,21 @@ final class ExactPhraseScorer extends Scorer {
private final long cost;
private final static class ChunkState {
- final DocsAndPositionsEnum posEnum;
+ final DocsEnum posEnum;
final int offset;
int posUpto;
int posLimit;
int pos;
int lastPos;
- public ChunkState(DocsAndPositionsEnum posEnum, int offset) {
+ public ChunkState(DocsEnum posEnum, int offset) {
this.posEnum = posEnum;
this.offset = offset;
}
}
private final ChunkState[] chunkStates;
- private final DocsAndPositionsEnum lead;
+ private final DocsEnum lead;
private int docID = -1;
private int freq;
@@ -79,7 +80,7 @@ final class ExactPhraseScorer extends Scorer {
// TODO: don't dup this logic from conjunctionscorer :)
advanceHead: for(;;) {
for (int i = 1; i < chunkStates.length; i++) {
- final DocsAndPositionsEnum de = chunkStates[i].posEnum;
+ final DocsEnum de = chunkStates[i].posEnum;
if (de.docID() < doc) {
int d = de.advance(doc);
@@ -125,6 +126,36 @@ final class ExactPhraseScorer extends Scorer {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int docID() {
return docID;
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java b/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java
index e2a50c8..4b0fbef 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java
@@ -17,14 +17,17 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import java.io.IOException;
import java.util.Collection;
+import org.apache.lucene.util.BytesRef;
+
/** Used by {@link BulkScorer}s that need to pass a {@link
* Scorer} to {@link LeafCollector#setScorer}. */
-final class FakeScorer extends Scorer {
- float score;
- int doc = -1;
- int freq = 1;
+public final class FakeScorer extends Scorer {
+ public float score;
+ public int doc = -1;
+ public int freq = 1;
public FakeScorer() {
super(null);
@@ -46,6 +49,36 @@ final class FakeScorer extends Scorer {
}
@Override
+ public int nextPosition() throws IOException {
+ throw new UnsupportedOperationException("FakeScorer doesn't support nextPosition()");
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ throw new UnsupportedOperationException("FakeScorer doesn't support startPosition()");
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ throw new UnsupportedOperationException("FakeScorer doesn't support endPosition()");
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException("FakeScorer doesn't support startOffset()");
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException("FakeScorer doesn't support endOffset()");
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException("FakeScorer doesn't support getPayload()");
+ }
+
+ @Override
public int nextDoc() {
throw new UnsupportedOperationException("FakeScorer doesn't support nextDoc()");
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FilterScorer.java b/lucene/core/src/java/org/apache/lucene/search/FilterScorer.java
index 88881bd..8a1ecfc 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FilterScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FilterScorer.java
@@ -18,9 +18,9 @@ package org.apache.lucene.search;
*/
import java.io.IOException;
-import java.util.Collection;
import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.BytesRef;
/**
* A {@code FilterScorer} contains another {@code Scorer}, which it
@@ -32,13 +32,18 @@ import org.apache.lucene.util.AttributeSource;
* further override some of these methods and may also provide additional
* methods and fields.
*/
-abstract class FilterScorer extends Scorer {
+public abstract class FilterScorer extends Scorer {
protected final Scorer in;
public FilterScorer(Scorer in) {
super(in.weight);
this.in = in;
}
+
+ public FilterScorer(Scorer in, Weight weight) {
+ super(weight);
+ this.in = in;
+ }
@Override
public float score() throws IOException {
@@ -61,6 +66,11 @@ abstract class FilterScorer extends Scorer {
}
@Override
+ public int nextPosition() throws IOException {
+ return in.nextPosition();
+ }
+
+ @Override
public int advance(int target) throws IOException {
return in.advance(target);
}
@@ -71,6 +81,31 @@ abstract class FilterScorer extends Scorer {
}
@Override
+ public int startPosition() throws IOException {
+ return in.startPosition();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return in.endPosition();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return in.startOffset();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return in.endOffset();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return in.getPayload();
+ }
+
+ @Override
public AttributeSource attributes() {
return in.attributes();
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java b/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java
index c95f05b..ce77a68 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java
@@ -17,17 +17,17 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.ToStringUtils;
-
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Set;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ToStringUtils;
+
/**
* A query that applies a filter to the results of another query.
@@ -119,7 +119,7 @@ public class FilteredQuery extends Query {
// return a filtering scorer
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
assert filter != null;
DocIdSet filterDocIdSet = filter.getDocIdSet(context, acceptDocs);
@@ -128,12 +128,12 @@ public class FilteredQuery extends Query {
return null;
}
- return strategy.filteredScorer(context, weight, filterDocIdSet);
+ return strategy.filteredScorer(context, weight, filterDocIdSet, flags);
}
// return a filtering top scorer
@Override
- public BulkScorer bulkScorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public BulkScorer bulkScorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
assert filter != null;
DocIdSet filterDocIdSet = filter.getDocIdSet(context, acceptDocs);
@@ -142,7 +142,8 @@ public class FilteredQuery extends Query {
return null;
}
- return strategy.filteredBulkScorer(context, weight, filterDocIdSet);
+ return strategy.filteredBulkScorer(context, weight, filterDocIdSet, flags);
+
}
};
}
@@ -153,13 +154,13 @@ public class FilteredQuery extends Query {
* than document scoring or if the filter has a linear running time to compute
* the next matching doc like exact geo distances.
*/
- private static final class QueryFirstScorer extends Scorer {
+ private static final class QueryFirstScorer extends FilterScorer {
private final Scorer scorer;
private int scorerDoc = -1;
private final Bits filterBits;
protected QueryFirstScorer(Weight weight, Bits filterBits, Scorer other) {
- super(weight);
+ super(other, weight);
this.scorer = other;
this.filterBits = filterBits;
}
@@ -184,29 +185,16 @@ public class FilteredQuery extends Query {
return scorerDoc = doc;
}
}
-
@Override
public int docID() {
return scorerDoc;
}
-
- @Override
- public float score() throws IOException {
- return scorer.score();
- }
-
- @Override
- public int freq() throws IOException { return scorer.freq(); }
-
+
@Override
public Collection getChildren() {
return Collections.singleton(new ChildScorer(scorer, "FILTERED"));
}
- @Override
- public long cost() {
- return scorer.cost();
- }
}
private static class QueryFirstBulkScorer extends BulkScorer {
@@ -249,7 +237,7 @@ public class FilteredQuery extends Query {
* jumping past the target document. When both land on the same document, it's
* collected.
*/
- private static final class LeapFrogScorer extends Scorer {
+ private static final class LeapFrogScorer extends FilterScorer {
private final DocIdSetIterator secondary;
private final DocIdSetIterator primary;
private final Scorer scorer;
@@ -257,7 +245,7 @@ public class FilteredQuery extends Query {
private int secondaryDoc = -1;
protected LeapFrogScorer(Weight weight, DocIdSetIterator primary, DocIdSetIterator secondary, Scorer scorer) {
- super(weight);
+ super(scorer, weight);
this.primary = primary;
this.secondary = secondary;
this.scorer = scorer;
@@ -297,17 +285,7 @@ public class FilteredQuery extends Query {
public final int docID() {
return secondaryDoc;
}
-
- @Override
- public final float score() throws IOException {
- return scorer.score();
- }
-
- @Override
- public final int freq() throws IOException {
- return scorer.freq();
- }
-
+
@Override
public final Collection getChildren() {
return Collections.singleton(new ChildScorer(scorer, "FILTERED"));
@@ -455,12 +433,13 @@ public class FilteredQuery extends Query {
* the {@link org.apache.lucene.index.LeafReaderContext} for which to return the {@link Scorer}.
* @param weight the {@link FilteredQuery} {@link Weight} to create the filtered scorer.
* @param docIdSet the filter {@link DocIdSet} to apply
+ * @param flags the low level Posting Features for this scorer.
* @return a filtered scorer
*
* @throws IOException if an {@link IOException} occurs
*/
public abstract Scorer filteredScorer(LeafReaderContext context,
- Weight weight, DocIdSet docIdSet) throws IOException;
+ Weight weight, DocIdSet docIdSet, int flags) throws IOException;
/**
* Returns a filtered {@link BulkScorer} based on this
@@ -475,8 +454,8 @@ public class FilteredQuery extends Query {
* @return a filtered top scorer
*/
public BulkScorer filteredBulkScorer(LeafReaderContext context,
- Weight weight, DocIdSet docIdSet) throws IOException {
- Scorer scorer = filteredScorer(context, weight, docIdSet);
+ Weight weight, DocIdSet docIdSet, int flags) throws IOException {
+ Scorer scorer = filteredScorer(context, weight, docIdSet, flags);
if (scorer == null) {
return null;
}
@@ -484,6 +463,7 @@ public class FilteredQuery extends Query {
// ignore scoreDocsInOrder:
return new Weight.DefaultBulkScorer(scorer);
}
+
}
/**
@@ -497,7 +477,7 @@ public class FilteredQuery extends Query {
public static class RandomAccessFilterStrategy extends FilterStrategy {
@Override
- public Scorer filteredScorer(LeafReaderContext context, Weight weight, DocIdSet docIdSet) throws IOException {
+ public Scorer filteredScorer(LeafReaderContext context, Weight weight, DocIdSet docIdSet, int flags) throws IOException {
final DocIdSetIterator filterIter = docIdSet.iterator();
if (filterIter == null) {
// this means the filter does not accept any documents.
@@ -509,11 +489,11 @@ public class FilteredQuery extends Query {
final boolean useRandomAccess = filterAcceptDocs != null && useRandomAccess(filterAcceptDocs, filterIter.cost());
if (useRandomAccess) {
// if we are using random access, we return the inner scorer, just with other acceptDocs
- return weight.scorer(context, filterAcceptDocs);
+ return weight.scorer(context, flags, filterAcceptDocs);
} else {
// we are gonna advance() this scorer, so we set inorder=true/toplevel=false
// we pass null as acceptDocs, as our filter has already respected acceptDocs, no need to do twice
- final Scorer scorer = weight.scorer(context, null);
+ final Scorer scorer = weight.scorer(context, flags, null);
return (scorer == null) ? null : new LeapFrogScorer(weight, filterIter, scorer, scorer);
}
}
@@ -546,14 +526,14 @@ public class FilteredQuery extends Query {
@Override
public Scorer filteredScorer(LeafReaderContext context,
- Weight weight, DocIdSet docIdSet) throws IOException {
+ Weight weight, DocIdSet docIdSet, int flags) throws IOException {
final DocIdSetIterator filterIter = docIdSet.iterator();
if (filterIter == null) {
// this means the filter does not accept any documents.
return null;
}
// we pass null as acceptDocs, as our filter has already respected acceptDocs, no need to do twice
- final Scorer scorer = weight.scorer(context, null);
+ final Scorer scorer = weight.scorer(context, flags, null);
if (scorer == null) {
return null;
}
@@ -583,29 +563,27 @@ public class FilteredQuery extends Query {
@Override
public Scorer filteredScorer(final LeafReaderContext context,
Weight weight,
- DocIdSet docIdSet) throws IOException {
+ DocIdSet docIdSet, int flags) throws IOException {
Bits filterAcceptDocs = docIdSet.bits();
if (filterAcceptDocs == null) {
// Filter does not provide random-access Bits; we
// must fallback to leapfrog:
- return LEAP_FROG_QUERY_FIRST_STRATEGY.filteredScorer(context, weight, docIdSet);
+ return LEAP_FROG_QUERY_FIRST_STRATEGY.filteredScorer(context, weight, docIdSet, flags);
}
- final Scorer scorer = weight.scorer(context, null);
- return scorer == null ? null : new QueryFirstScorer(weight,
- filterAcceptDocs, scorer);
+ final Scorer scorer = weight.scorer(context, flags, null);
+ return scorer == null ? null : new QueryFirstScorer(weight, filterAcceptDocs, scorer);
}
@Override
- public BulkScorer filteredBulkScorer(final LeafReaderContext context,
- Weight weight,
- DocIdSet docIdSet) throws IOException {
+ public BulkScorer filteredBulkScorer(final LeafReaderContext context, Weight weight,
+ DocIdSet docIdSet, int flags) throws IOException {
Bits filterAcceptDocs = docIdSet.bits();
if (filterAcceptDocs == null) {
// Filter does not provide random-access Bits; we
// must fallback to leapfrog:
- return LEAP_FROG_QUERY_FIRST_STRATEGY.filteredBulkScorer(context, weight, docIdSet);
+ return LEAP_FROG_QUERY_FIRST_STRATEGY.filteredBulkScorer(context, weight, docIdSet, flags);
}
- final Scorer scorer = weight.scorer(context, null);
+ final Scorer scorer = weight.scorer(context, flags, null);
return scorer == null ? null : new QueryFirstBulkScorer(scorer, filterAcceptDocs);
}
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java b/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
index 3199966..bde82aa 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
@@ -22,7 +22,6 @@ import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FilteredTermsEnum;
import org.apache.lucene.index.Term;
@@ -271,12 +270,6 @@ public class FuzzyTermsEnum extends TermsEnum {
}
@Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs,
- DocsAndPositionsEnum reuse, int flags) throws IOException {
- return actualEnum.docsAndPositions(liveDocs, reuse, flags);
- }
-
- @Override
public void seekExact(BytesRef term, TermState state) throws IOException {
actualEnum.seekExact(term, state);
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
index 84df738..26ae7aa 100644
--- a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
+++ b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
@@ -27,10 +27,11 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
-import org.apache.lucene.index.DirectoryReader; // javadocs
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
-import org.apache.lucene.index.IndexWriter; // javadocs
+import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.ReaderUtil;
@@ -41,7 +42,7 @@ import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.Terms;
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.search.similarities.Similarity;
-import org.apache.lucene.store.NIOFSDirectory; // javadoc
+import org.apache.lucene.store.NIOFSDirectory;
import org.apache.lucene.util.ThreadInterruptedException;
/** Implements search over a single IndexReader.
@@ -586,7 +587,7 @@ public class IndexSearcher {
// continue with the following leaf
continue;
}
- BulkScorer scorer = weight.bulkScorer(ctx, ctx.reader().getLiveDocs());
+ BulkScorer scorer = weight.bulkScorer(ctx, DocsEnum.FLAG_FREQS, ctx.reader().getLiveDocs());
if (scorer != null) {
try {
scorer.score(leafCollector);
diff --git a/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java b/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
index ed49b3c..19dcf98 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
@@ -17,14 +17,15 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
+import java.io.IOException;
+import java.util.Set;
+
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
-import org.apache.lucene.util.ToStringUtils;
import org.apache.lucene.util.Bits;
-
-import java.util.Set;
-import java.io.IOException;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.ToStringUtils;
/**
* A query that matches all documents.
@@ -73,6 +74,36 @@ public class MatchAllDocsQuery extends Query {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int advance(int target) throws IOException {
doc = target-1;
return nextDoc();
@@ -114,7 +145,7 @@ public class MatchAllDocsQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
return new MatchAllScorer(context.reader(), acceptDocs, this, queryWeight);
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java
index 3f31ace..f81ec4a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java
@@ -24,6 +24,7 @@ import java.util.Comparator;
import java.util.List;
import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
/**
* A Scorer for OR like queries, counterpart of ConjunctionScorer.
@@ -110,6 +111,7 @@ class MinShouldMatchSumScorer extends Scorer {
this.coord = coord;
minheapHeapify();
assert minheapCheck();
+
}
@Override
@@ -145,6 +147,7 @@ class MinShouldMatchSumScorer extends Scorer {
break;
}
}
+
return doc;
}
@@ -231,6 +234,36 @@ class MinShouldMatchSumScorer extends Scorer {
return nrMatchers;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
/**
* Advances to the first match beyond the current whose document number is
* greater than or equal to a given target.
diff --git a/lucene/core/src/java/org/apache/lucene/search/MultiCollector.java b/lucene/core/src/java/org/apache/lucene/search/MultiCollector.java
index 495fbf7..bee5e86 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MultiCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MultiCollector.java
@@ -101,7 +101,6 @@ public class MultiCollector implements Collector {
return new MultiLeafCollector(leafCollectors);
}
-
private static class MultiLeafCollector implements LeafCollector {
private final LeafCollector[] collectors;
diff --git a/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java b/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
index 3d1fa5e..2769b39 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
@@ -21,7 +21,6 @@ import java.io.IOException;
import java.util.*;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
@@ -179,7 +178,7 @@ public class MultiPhraseQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
assert !termArrays.isEmpty();
final LeafReader reader = context.reader();
final Bits liveDocs = acceptDocs;
@@ -197,7 +196,7 @@ public class MultiPhraseQuery extends Query {
for (int pos=0; pos 1) {
@@ -229,7 +228,7 @@ public class MultiPhraseQuery extends Query {
return null;
}
termsEnum.seekExact(term.bytes(), termState);
- postingsEnum = termsEnum.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
+ postingsEnum = termsEnum.docs(liveDocs, null, flags | DocsEnum.FLAG_POSITIONS);
if (postingsEnum == null) {
// term does exist, but has no positions
@@ -257,7 +256,7 @@ public class MultiPhraseQuery extends Query {
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- Scorer scorer = scorer(context, context.reader().getLiveDocs());
+ Scorer scorer = scorer(context, DocsEnum.FLAG_POSITIONS, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
@@ -408,15 +407,15 @@ public class MultiPhraseQuery extends Query {
*/
// TODO: if ever we allow subclassing of the *PhraseScorer
-class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
+class UnionDocsAndPositionsEnum extends DocsEnum {
- private static final class DocsQueue extends PriorityQueue {
- DocsQueue(List docsEnums) throws IOException {
+ private static final class DocsQueue extends PriorityQueue {
+ DocsQueue(List docsEnums) throws IOException {
super(docsEnums.size());
- Iterator i = docsEnums.iterator();
+ Iterator i = docsEnums.iterator();
while (i.hasNext()) {
- DocsAndPositionsEnum postings = i.next();
+ DocsEnum postings = i.next();
if (postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
add(postings);
}
@@ -424,7 +423,7 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
}
@Override
- public final boolean lessThan(DocsAndPositionsEnum a, DocsAndPositionsEnum b) {
+ public final boolean lessThan(DocsEnum a, DocsEnum b) {
return a.docID() < b.docID();
}
}
@@ -446,6 +445,10 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
return _array[_index++];
}
+ final int top() {
+ return _array[_index];
+ }
+
final void sort() {
Arrays.sort(_array, _index, _lastIndex);
}
@@ -474,7 +477,7 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
private long cost;
public UnionDocsAndPositionsEnum(Bits liveDocs, LeafReaderContext context, Term[] terms, Map termContexts, TermsEnum termsEnum) throws IOException {
- List docsEnums = new LinkedList<>();
+ List docsEnums = new LinkedList<>();
for (int i = 0; i < terms.length; i++) {
final Term term = terms[i];
TermState termState = termContexts.get(term).get(context.ord);
@@ -483,7 +486,7 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
continue;
}
termsEnum.seekExact(term.bytes(), termState);
- DocsAndPositionsEnum postings = termsEnum.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
+ DocsEnum postings = termsEnum.docs(liveDocs, null, DocsEnum.FLAG_POSITIONS);
if (postings == null) {
// term does exist, but has no positions
throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run PhraseQuery (term=" + term.text() + ")");
@@ -509,7 +512,7 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
_doc = _queue.top().docID();
// merge sort all positions together
- DocsAndPositionsEnum postings;
+ DocsEnum postings;
do {
postings = _queue.top();
@@ -537,6 +540,16 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
}
@Override
+ public int startPosition() throws IOException {
+ return _posList.top();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return _posList.top();
+ }
+
+ @Override
public int startOffset() {
return -1;
}
@@ -554,7 +567,7 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
@Override
public final int advance(int target) throws IOException {
while (_queue.top() != null && target > _queue.top().docID()) {
- DocsAndPositionsEnum postings = _queue.pop();
+ DocsEnum postings = _queue.pop();
if (postings.advance(target) != NO_MORE_DOCS) {
_queue.add(postings);
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java b/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java
index c975b01..e604fa7 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java
@@ -29,13 +29,13 @@ final class PhrasePositions {
int count; // remaining pos in this doc
int offset; // position in phrase
final int ord; // unique across all PhrasePositions instances
- final DocsAndPositionsEnum postings; // stream of docs & positions
+ final DocsEnum postings; // stream of docs & positions
PhrasePositions next; // used to make lists
int rptGroup = -1; // >=0 indicates that this is a repeating PP
int rptInd; // index in the rptGroup
final Term[] terms; // for repetitions initialization
- PhrasePositions(DocsAndPositionsEnum postings, int o, int ord, Term[] terms) {
+ PhrasePositions(DocsEnum postings, int o, int ord, Term[] terms) {
this.postings = postings;
offset = o;
this.ord = ord;
diff --git a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
index bf5a373..96c34e6 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
@@ -22,19 +22,18 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.ToStringUtils;
@@ -138,13 +137,13 @@ public class PhraseQuery extends Query {
}
static class PostingsAndFreq implements Comparable {
- final DocsAndPositionsEnum postings;
+ final DocsEnum postings;
final int docFreq;
final int position;
final Term[] terms;
final int nTerms; // for faster comparisons
- public PostingsAndFreq(DocsAndPositionsEnum postings, int docFreq, int position, Term... terms) {
+ public PostingsAndFreq(DocsEnum postings, int docFreq, int position, Term... terms) {
this.postings = postings;
this.docFreq = docFreq;
this.position = position;
@@ -245,7 +244,7 @@ public class PhraseQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
assert !terms.isEmpty();
final LeafReader reader = context.reader();
final Bits liveDocs = acceptDocs;
@@ -267,7 +266,7 @@ public class PhraseQuery extends Query {
return null;
}
te.seekExact(t.bytes(), state);
- DocsAndPositionsEnum postingsEnum = te.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
+ DocsEnum postingsEnum = te.docs(liveDocs, null, flags | DocsEnum.FLAG_POSITIONS);
// PhraseQuery on a field that did not index
// positions.
@@ -276,7 +275,7 @@ public class PhraseQuery extends Query {
// term does exist, but has no positions
throw new IllegalStateException("field \"" + t.field() + "\" was indexed without position data; cannot run PhraseQuery (term=" + t.text() + ")");
}
- postingsFreqs[i] = new PostingsAndFreq(postingsEnum, te.docFreq(), positions.get(i).intValue(), t);
+ postingsFreqs[i] = new PostingsAndFreq(postingsEnum, te.docFreq(), positions.get(i), t);
}
// sort by increasing docFreq order
@@ -298,7 +297,7 @@ public class PhraseQuery extends Query {
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- Scorer scorer = scorer(context, context.reader().getLiveDocs());
+ Scorer scorer = scorer(context, DocsEnum.FLAG_POSITIONS, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/QueryRescorer.java b/lucene/core/src/java/org/apache/lucene/search/QueryRescorer.java
index 2f17145..8d0d573 100644
--- a/lucene/core/src/java/org/apache/lucene/search/QueryRescorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/QueryRescorer.java
@@ -17,13 +17,14 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.LeafReaderContext;
+
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
-import org.apache.lucene.index.LeafReaderContext;
-
/** A {@link Rescorer} that uses a provided Query to assign
* scores to the first-pass hits.
*
@@ -82,7 +83,7 @@ public abstract class QueryRescorer extends Rescorer {
if (readerContext != null) {
// We advanced to another segment:
docBase = readerContext.docBase;
- scorer = weight.scorer(readerContext, null);
+ scorer = weight.scorer(readerContext, DocsEnum.FLAG_NONE, null);
}
if(scorer != null) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/QueryWrapperFilter.java b/lucene/core/src/java/org/apache/lucene/search/QueryWrapperFilter.java
index 8d8a010..270e4cc 100644
--- a/lucene/core/src/java/org/apache/lucene/search/QueryWrapperFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/search/QueryWrapperFilter.java
@@ -17,11 +17,12 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import java.io.IOException;
-
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.Bits;
+import java.io.IOException;
+
/**
* Constrains search results to only match those which also match a provided
* query.
@@ -56,7 +57,7 @@ public class QueryWrapperFilter extends Filter {
return new DocIdSet() {
@Override
public DocIdSetIterator iterator() throws IOException {
- return weight.scorer(privateContext, acceptDocs);
+ return weight.scorer(privateContext, DocsEnum.FLAG_FREQS, acceptDocs);
}
@Override
diff --git a/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java b/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
index 4e2a5f1..cf6aec1 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
@@ -27,7 +27,7 @@ import java.util.Collections;
* This Scorer implements {@link Scorer#advance(int)},
* and it uses the skipTo() on the given scorers.
*/
-class ReqExclScorer extends Scorer {
+class ReqExclScorer extends FilterScorer {
private Scorer reqScorer;
private DocIdSetIterator exclDisi;
private int doc = -1;
@@ -37,7 +37,7 @@ class ReqExclScorer extends Scorer {
* @param exclDisi indicates exclusion.
*/
public ReqExclScorer(Scorer reqScorer, DocIdSetIterator exclDisi) {
- super(reqScorer.weight);
+ super(reqScorer);
this.reqScorer = reqScorer;
this.exclDisi = exclDisi;
}
@@ -103,11 +103,6 @@ class ReqExclScorer extends Scorer {
public float score() throws IOException {
return reqScorer.score(); // reqScorer may be null when next() or skipTo() already return false
}
-
- @Override
- public int freq() throws IOException {
- return reqScorer.freq();
- }
@Override
public Collection getChildren() {
@@ -129,8 +124,4 @@ class ReqExclScorer extends Scorer {
return doc = toNonExcluded();
}
- @Override
- public long cost() {
- return reqScorer.cost();
- }
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java
index d7b4d86..38a9c0e 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java
@@ -20,6 +20,8 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
+import org.apache.lucene.util.BytesRef;
+
/** A Scorer for queries with a required part and an optional part.
* Delays skipTo() on the optional part until a score() is needed.
*
@@ -49,12 +51,14 @@ class ReqOptSumScorer extends Scorer {
@Override
public int nextDoc() throws IOException {
- return reqScorer.nextDoc();
+ int doc = reqScorer.nextDoc();
+ return doc;
}
@Override
public int advance(int target) throws IOException {
- return reqScorer.advance(target);
+ int doc = reqScorer.advance(target);
+ return doc;
}
@Override
@@ -93,6 +97,36 @@ class ReqOptSumScorer extends Scorer {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public Collection getChildren() {
ArrayList children = new ArrayList<>(2);
children.add(new ChildScorer(reqScorer, "MUST"));
diff --git a/lucene/core/src/java/org/apache/lucene/search/Scorer.java b/lucene/core/src/java/org/apache/lucene/search/Scorer.java
index 929d3b9..696325b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Scorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Scorer.java
@@ -20,6 +20,7 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
+import java.util.Locale;
import org.apache.lucene.index.DocsEnum;
@@ -60,6 +61,13 @@ public abstract class Scorer extends DocsEnum {
* {@link LeafCollector#collect}.
*/
public abstract float score() throws IOException;
+
+ /** Returns the score of the current interval spanned by this scorer.
+ * Initially invalid, until {@link #nextPosition()} is called
+ */
+ public float intervalScore() throws IOException {
+ return 1;
+ }
/** returns parent Weight
* @lucene.experimental
@@ -67,6 +75,15 @@ public abstract class Scorer extends DocsEnum {
public Weight getWeight() {
return weight;
}
+
+ @Override
+ public String toString() {
+ try {
+ return String.format(Locale.ROOT, "%d:%d(%d)->%d(%d)", docID(), startPosition(), startOffset(), endPosition(), endOffset());
+ } catch (IOException e) {
+ return super.toString();
+ }
+ }
/** Returns child sub-scorers
* @lucene.experimental */
diff --git a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
index 80a0270..cad273a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
@@ -27,6 +27,7 @@ import java.util.LinkedHashMap;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
final class SloppyPhraseScorer extends Scorer {
@@ -522,7 +523,37 @@ final class SloppyPhraseScorer extends Scorer {
public int freq() {
return numMatches;
}
-
+
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
float sloppyFreq() {
return sloppyFreq;
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermQuery.java b/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
index 1bc2978..e4e46f5 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
@@ -17,71 +17,75 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import java.io.IOException;
-import java.util.Set;
-
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.ToStringUtils;
-/** A Query that matches documents containing a term.
- This may be combined with other terms with a {@link BooleanQuery}.
- */
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ * A Query that matches documents containing a term. This may be combined with
+ * other terms with a {@link BooleanQuery}.
+ */
public class TermQuery extends Query {
private final Term term;
private final int docFreq;
private final TermContext perReaderTermState;
-
+
final class TermWeight extends Weight {
private final Similarity similarity;
private final Similarity.SimWeight stats;
private final TermContext termStates;
-
+
public TermWeight(IndexSearcher searcher, TermContext termStates)
- throws IOException {
+ throws IOException {
assert termStates != null : "TermContext must not be null";
this.termStates = termStates;
this.similarity = searcher.getSimilarity();
- this.stats = similarity.computeWeight(
- getBoost(),
- searcher.collectionStatistics(term.field()),
+ this.stats = similarity.computeWeight(getBoost(),
+ searcher.collectionStatistics(term.field()),
searcher.termStatistics(term, termStates));
}
-
+
@Override
- public String toString() { return "weight(" + TermQuery.this + ")"; }
-
+ public String toString() {
+ return "weight(" + TermQuery.this + ")";
+ }
+
@Override
- public Query getQuery() { return TermQuery.this; }
-
+ public Query getQuery() {
+ return TermQuery.this;
+ }
+
@Override
public float getValueForNormalization() {
return stats.getValueForNormalization();
}
-
+
@Override
public void normalize(float queryNorm, float topLevelBoost) {
stats.normalize(queryNorm, topLevelBoost);
}
-
+
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
assert termStates.topReaderContext == ReaderUtil.getTopLevelContext(context) : "The top-reader used to create Weight (" + termStates.topReaderContext + ") is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context);
final TermsEnum termsEnum = getTermsEnum(context);
if (termsEnum == null) {
return null;
}
- DocsEnum docs = termsEnum.docs(acceptDocs, null);
+ DocsEnum docs = termsEnum.docs(acceptDocs, null, flags);
assert docs != null;
return new TermScorer(this, docs, similarity.simScorer(stats, context));
}
@@ -96,90 +100,100 @@ public class TermQuery extends Query {
assert termNotInReader(context.reader(), term) : "no termstate found but term exists in reader term=" + term;
return null;
}
- //System.out.println("LD=" + reader.getLiveDocs() + " set?=" + (reader.getLiveDocs() != null ? reader.getLiveDocs().get(0) : "null"));
- final TermsEnum termsEnum = context.reader().terms(term.field()).iterator(null);
+ // System.out.println("LD=" + reader.getLiveDocs() + " set?=" +
+ // (reader.getLiveDocs() != null ? reader.getLiveDocs().get(0) : "null"));
+ final TermsEnum termsEnum = context.reader().terms(term.field())
+ .iterator(null);
termsEnum.seekExact(term.bytes(), state);
return termsEnum;
}
private boolean termNotInReader(LeafReader reader, Term term) throws IOException {
// only called from assert
- //System.out.println("TQ.termNotInReader reader=" + reader + " term=" + field + ":" + bytes.utf8ToString());
+ // System.out.println("TQ.termNotInReader reader=" + reader + " term=" +
+ // field + ":" + bytes.utf8ToString());
return reader.docFreq(term) == 0;
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- Scorer scorer = scorer(context, context.reader().getLiveDocs());
+ Scorer scorer = scorer(context, DocsEnum.FLAG_FREQS, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
float freq = scorer.freq();
SimScorer docScorer = similarity.simScorer(stats, context);
ComplexExplanation result = new ComplexExplanation();
- result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
- Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "termFreq=" + freq));
+ result.setDescription("weight(" + getQuery() + " in " + doc + ") ["
+ + similarity.getClass().getSimpleName() + "], result of:");
+ Explanation scoreExplanation = docScorer.explain(doc,
+ new Explanation(freq, "termFreq=" + freq));
result.addDetail(scoreExplanation);
result.setValue(scoreExplanation.getValue());
result.setMatch(true);
return result;
}
}
- return new ComplexExplanation(false, 0.0f, "no matching term");
+ return new ComplexExplanation(false, 0.0f, "no matching term");
}
}
-
+
/** Constructs a query for the term t. */
public TermQuery(Term t) {
this(t, -1);
}
-
- /** Expert: constructs a TermQuery that will use the
- * provided docFreq instead of looking up the docFreq
- * against the searcher. */
+
+ /**
+ * Expert: constructs a TermQuery that will use the provided docFreq instead
+ * of looking up the docFreq against the searcher.
+ */
public TermQuery(Term t, int docFreq) {
term = t;
this.docFreq = docFreq;
perReaderTermState = null;
}
- /** Expert: constructs a TermQuery that will use the
- * provided docFreq instead of looking up the docFreq
- * against the searcher. */
+ /**
+ * Expert: constructs a TermQuery that will use the provided docFreq instead
+ * of looking up the docFreq against the searcher.
+ */
public TermQuery(Term t, TermContext states) {
assert states != null;
term = t;
docFreq = states.docFreq();
perReaderTermState = states;
}
-
+
/** Returns the term of this query. */
- public Term getTerm() { return term; }
-
+ public Term getTerm() {
+ return term;
+ }
+
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
final IndexReaderContext context = searcher.getTopReaderContext();
final TermContext termState;
- if (perReaderTermState == null || perReaderTermState.topReaderContext != context) {
- // make TermQuery single-pass if we don't have a PRTS or if the context differs!
+ if (perReaderTermState == null
+ || perReaderTermState.topReaderContext != context) {
+ // make TermQuery single-pass if we don't have a PRTS or if the context
+ // differs!
termState = TermContext.build(context, term);
} else {
- // PRTS was pre-build for this IS
- termState = this.perReaderTermState;
+ // PRTS was pre-build for this IS
+ termState = this.perReaderTermState;
}
-
+
// we must not ignore the given docFreq - if set use the given value (lie)
- if (docFreq != -1)
- termState.setDocFreq(docFreq);
+ if (docFreq != -1) termState.setDocFreq(docFreq);
return new TermWeight(searcher, termState);
}
-
+
@Override
public void extractTerms(Set terms) {
terms.add(getTerm());
}
-
+
/** Prints a user-readable version of this query. */
@Override
public String toString(String field) {
@@ -192,21 +206,20 @@ public class TermQuery extends Query {
buffer.append(ToStringUtils.boost(getBoost()));
return buffer.toString();
}
-
+
/** Returns true iff o is equal to this. */
@Override
public boolean equals(Object o) {
- if (!(o instanceof TermQuery))
- return false;
- TermQuery other = (TermQuery)o;
+ if (!(o instanceof TermQuery)) return false;
+ TermQuery other = (TermQuery) o;
return (this.getBoost() == other.getBoost())
- && this.term.equals(other.term);
+ && this.term.equals(other.term);
}
-
- /** Returns a hash code value for this object.*/
+
+ /** Returns a hash code value for this object. */
@Override
public int hashCode() {
return Float.floatToIntBits(getBoost()) ^ term.hashCode();
}
-
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermScorer.java b/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
index 6697524..45ce808 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
@@ -21,22 +21,23 @@ import java.io.IOException;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.BytesRef;
/** Expert: A Scorer for documents matching a Term.
*/
final class TermScorer extends Scorer {
private final DocsEnum docsEnum;
private final Similarity.SimScorer docScorer;
-
+
/**
* Construct a TermScorer.
- *
+ *
* @param weight
* The weight of the Term in the query.
* @param td
* An iterator over the documents matching the Term.
* @param docScorer
- * The Similarity.SimScorer implementation
+ * The Similarity.SimScorer implementation
* to be used for score computations.
*/
TermScorer(Weight weight, DocsEnum td, Similarity.SimScorer docScorer) {
@@ -55,27 +56,57 @@ final class TermScorer extends Scorer {
return docsEnum.freq();
}
+ @Override
+ public int nextPosition() throws IOException {
+ return docsEnum.nextPosition();
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return docsEnum.startPosition();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return docsEnum.endPosition();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return docsEnum.startOffset();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return docsEnum.endOffset();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return docsEnum.getPayload();
+ }
+
/**
* Advances to the next document matching the query.
- *
+ *
* @return the document matching the query or NO_MORE_DOCS if there are no more documents.
*/
@Override
public int nextDoc() throws IOException {
return docsEnum.nextDoc();
}
-
+
@Override
public float score() throws IOException {
assert docID() != NO_MORE_DOCS;
- return docScorer.score(docsEnum.docID(), docsEnum.freq());
+ return docScorer.score(docsEnum.docID(), docsEnum.freq());
}
/**
* Advances to the first match beyond the current whose document number is
* greater than or equal to a given target.
* The implementation uses {@link DocsEnum#advance(int)}.
- *
+ *
* @param target
* The target document number.
* @return the matching document or NO_MORE_DOCS if none exist.
@@ -84,7 +115,7 @@ final class TermScorer extends Scorer {
public int advance(int target) throws IOException {
return docsEnum.advance(target);
}
-
+
@Override
public long cost() {
return docsEnum.cost();
@@ -92,5 +123,5 @@ final class TermScorer extends Scorer {
/** Returns a string representation of this TermScorer. */
@Override
- public String toString() { return "scorer(" + weight + ")"; }
+ public String toString() { return "scorer(" + weight + ")[" + super.toString() + "]"; }
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/TimeLimitingCollector.java b/lucene/core/src/java/org/apache/lucene/search/TimeLimitingCollector.java
index e179089..9c72f2c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TimeLimitingCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TimeLimitingCollector.java
@@ -17,12 +17,12 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import java.io.IOException;
+
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.ThreadInterruptedException;
-import java.io.IOException;
-
/**
* The {@link TimeLimitingCollector} is used to timeout search requests that
* take longer than the maximum allowed search time limit. After this time is
@@ -156,7 +156,7 @@ public class TimeLimitingCollector implements Collector {
};
}
-
+
/**
* This is so the same timer can be used with a multi-phase search process such as grouping.
* We don't want to create a new TimeLimitingCollector for each phase because that would
diff --git a/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java b/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java
index fb06e0a..0776eb2 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java
@@ -17,6 +17,7 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import org.apache.lucene.index.DocsEnum;
/**
* Just counts the total number of hits.
diff --git a/lucene/core/src/java/org/apache/lucene/search/Weight.java b/lucene/core/src/java/org/apache/lucene/search/Weight.java
index 741dc88..70117fa 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Weight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Weight.java
@@ -19,8 +19,8 @@ package org.apache.lucene.search;
import java.io.IOException;
+import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexReaderContext; // javadocs
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.util.Bits;
@@ -34,7 +34,7 @@ import org.apache.lucene.util.Bits;
* {@link org.apache.lucene.index.LeafReader} dependent state should reside in the {@link Scorer}.
*
* Since {@link Weight} creates {@link Scorer} instances for a given
- * {@link org.apache.lucene.index.LeafReaderContext} ({@link #scorer(org.apache.lucene.index.LeafReaderContext, Bits)})
+ * {@link org.apache.lucene.index.LeafReaderContext} ({@link #scorer(org.apache.lucene.index.LeafReaderContext, int, Bits)})
* callers must maintain the relationship between the searcher's top-level
* {@link IndexReaderContext} and the context used to create a {@link Scorer}.
*
The query normalization factor is passed to {@link #normalize(float, float)}. At
* this point the weighting is complete.
*
A Scorer is constructed by
- * {@link #scorer(org.apache.lucene.index.LeafReaderContext, Bits)}.
+ * {@link #scorer(org.apache.lucene.index.LeafReaderContext, int, Bits)}.
*
*
* @since 2.9
@@ -91,7 +91,7 @@ public abstract class Weight {
* @return a {@link Scorer} which scores documents in/out-of order.
* @throws IOException if there is a low-level I/O error
*/
- public abstract Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException;
+ public abstract Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException;
/**
* Optional method, to return a {@link BulkScorer} to
@@ -111,9 +111,9 @@ public abstract class Weight {
* passes them to a collector.
* @throws IOException if there is a low-level I/O error
*/
- public BulkScorer bulkScorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public BulkScorer bulkScorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
- Scorer scorer = scorer(context, acceptDocs);
+ Scorer scorer = scorer(context, flags, acceptDocs);
if (scorer == null) {
// No docs match
return null;
@@ -178,4 +178,5 @@ public abstract class Weight {
}
}
}
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/package.html b/lucene/core/src/java/org/apache/lucene/search/package.html
index 1635797..c0be57a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/package.html
+++ b/lucene/core/src/java/org/apache/lucene/search/package.html
@@ -436,14 +436,14 @@ on the built-in available scoring models and extending or changing Similarity.
that scores via a {@link org.apache.lucene.search.similarities.Similarity Similarity} will just defer to the Similarity's implementation:
{@link org.apache.lucene.search.similarities.Similarity.SimWeight#normalize SimWeight#normalize(float,float)}.
- {@link org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.LeafReaderContext, org.apache.lucene.util.Bits)
- scorer(LeafReaderContext context, Bits acceptDocs)} —
+ {@link org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.LeafReaderContext, int, org.apache.lucene.util.Bits)
+ scorer(LeafReaderContext context, int flags, Bits acceptDocs)} —
Construct a new {@link org.apache.lucene.search.Scorer Scorer} for this Weight. See The Scorer Class
below for help defining a Scorer. As the name implies, the Scorer is responsible for doing the actual scoring of documents
given the Query.
- {@link org.apache.lucene.search.Weight#bulkScorer(org.apache.lucene.index.LeafReaderContext, org.apache.lucene.util.Bits)
+ {@link org.apache.lucene.search.Weight#bulkScorer(org.apache.lucene.index.LeafReaderContext, int, org.apache.lucene.util.Bits)
scorer(LeafReaderContext context, Bits acceptDocs)} —
Construct a new {@link org.apache.lucene.search.BulkScorer BulkScorer} for this Weight. See The BulkScorer Class
below for help defining a BulkScorer. This is an optional method, and most queries do not implement it.
diff --git a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
index 1be23e6..3f54a54 100644
--- a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
@@ -17,11 +17,12 @@ package org.apache.lucene.search.payloads;
* limitations under the License.
*/
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.search.similarities.Similarity;
@@ -148,14 +149,14 @@ public class PayloadNearQuery extends SpanNearQuery {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
return new PayloadNearSpanScorer(query.getSpans(context, acceptDocs, termContexts), this,
similarity, similarity.simScorer(stats, context));
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- PayloadNearSpanScorer scorer = (PayloadNearSpanScorer) scorer(context, context.reader().getLiveDocs());
+ PayloadNearSpanScorer scorer = (PayloadNearSpanScorer) scorer(context, DocsEnum.FLAG_PAYLOADS, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
index 6afbdf2..8d3168f 100644
--- a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
@@ -19,7 +19,7 @@ package org.apache.lucene.search.payloads;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
@@ -79,7 +79,7 @@ public class PayloadTermQuery extends SpanTermQuery {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
return new PayloadTermSpanScorer((TermSpans) query.getSpans(context, acceptDocs, termContexts),
this, similarity.simScorer(stats, context));
}
@@ -120,7 +120,7 @@ public class PayloadTermQuery extends SpanTermQuery {
protected void processPayload(Similarity similarity) throws IOException {
if (termSpans.isPayloadAvailable()) {
- final DocsAndPositionsEnum postings = termSpans.getPostings();
+ final DocsEnum postings = termSpans.getPostings();
payload = postings.getPayload();
if (payload != null) {
payloadScore = function.currentScore(doc, term.field(),
@@ -176,7 +176,7 @@ public class PayloadTermQuery extends SpanTermQuery {
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- PayloadTermSpanScorer scorer = (PayloadTermSpanScorer) scorer(context, context.reader().getLiveDocs());
+ PayloadTermSpanScorer scorer = (PayloadTermSpanScorer) scorer(context, DocsEnum.FLAG_POSITIONS, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java b/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
index ea45f69..beb7b90 100644
--- a/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
+++ b/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
@@ -17,10 +17,8 @@ package org.apache.lucene.search.similarities;
* limitations under the License.
*/
-import java.io.IOException;
-
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.FieldInvertState;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.Explanation;
@@ -28,9 +26,11 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermStatistics;
-import org.apache.lucene.search.spans.SpanQuery; // javadoc
+import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.SmallFloat; // javadoc
+import org.apache.lucene.util.SmallFloat;
+
+import java.io.IOException;
/**
* Similarity defines the components of Lucene scoring.
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
index 74a098d..416ba5c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
@@ -19,9 +19,10 @@ package org.apache.lucene.search.spans;
import java.io.IOException;
-import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.BytesRef;
/**
* Public for extension only.
@@ -96,16 +97,47 @@ public class SpanScorer extends Scorer {
public int freq() throws IOException {
return numMatches;
}
-
+
+ @Override
+ public int nextPosition() throws IOException {
+ throw new UnsupportedOperationException("SpanQueries do not support nextPosition() iteration");
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
/** Returns the intermediate "sloppy freq" adjusted for edit distance
* @lucene.internal */
// only public so .payloads can see it.
public float sloppyFreq() throws IOException {
return freq;
}
-
+
@Override
public long cost() {
return spans.cost();
}
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
index c6dab4e..c3c07f9 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
@@ -17,10 +17,13 @@ package org.apache.lucene.search.spans;
* limitations under the License.
*/
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.Fields;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.Terms;
@@ -28,10 +31,6 @@ import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.ToStringUtils;
-import java.io.IOException;
-import java.util.Map;
-import java.util.Set;
-
/** Matches spans containing a term. */
public class SpanTermQuery extends SpanQuery {
protected Term term;
@@ -115,7 +114,7 @@ public class SpanTermQuery extends SpanQuery {
final TermsEnum termsEnum = context.reader().terms(term.field()).iterator(null);
termsEnum.seekExact(term.bytes(), state);
- final DocsAndPositionsEnum postings = termsEnum.docsAndPositions(acceptDocs, null, DocsAndPositionsEnum.FLAG_PAYLOADS);
+ final DocsEnum postings = termsEnum.docs(acceptDocs, null, DocsEnum.FLAG_PAYLOADS);
if (postings != null) {
return new TermSpans(postings, term);
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
index 0e06343..6fb80ab 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
@@ -17,11 +17,18 @@ package org.apache.lucene.search.spans;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermStatistics;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.util.Bits;
@@ -81,7 +88,7 @@ public class SpanWeight extends Weight {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
if (stats == null) {
return null;
} else {
@@ -91,7 +98,7 @@ public class SpanWeight extends Weight {
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- SpanScorer scorer = (SpanScorer) scorer(context, context.reader().getLiveDocs());
+ SpanScorer scorer = (SpanScorer) scorer(context, DocsEnum.FLAG_POSITIONS, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java b/lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java
index d4974a5..39f72eb 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java
@@ -17,7 +17,7 @@ package org.apache.lucene.search.spans;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.BytesRef;
@@ -30,7 +30,7 @@ import java.util.Collection;
* Public for extension only
*/
public class TermSpans extends Spans {
- protected final DocsAndPositionsEnum postings;
+ protected final DocsEnum postings;
protected final Term term;
protected int doc;
protected int freq;
@@ -38,7 +38,7 @@ public class TermSpans extends Spans {
protected int position;
protected boolean readPayload;
- public TermSpans(DocsAndPositionsEnum postings, Term term) {
+ public TermSpans(DocsEnum postings, Term term) {
this.postings = postings;
this.term = term;
doc = -1;
@@ -132,7 +132,7 @@ public class TermSpans extends Spans {
(doc == -1 ? "START" : (doc == Integer.MAX_VALUE) ? "END" : doc + "-" + position);
}
- public DocsAndPositionsEnum getPostings() {
+ public DocsEnum getPostings() {
return postings;
}
diff --git a/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java b/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java
index 4747557..35d7b0b 100644
--- a/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java
+++ b/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java
@@ -22,13 +22,32 @@ import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.Random;
-import org.apache.lucene.store.*;
-import org.apache.lucene.document.*;
-import org.apache.lucene.analysis.*;
-import org.apache.lucene.index.*;
-import org.apache.lucene.search.*;
+import com.carrotsearch.randomizedtesting.annotations.Seed;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.MergePolicy;
+import org.apache.lucene.index.StoredDocument;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
+@Seed("1614EBB06B1F4D76:B454400327B737DA")
public class TestSearchForDuplicates extends LuceneTestCase {
static final String PRIORITY_FIELD ="priority";
diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java b/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
index a4310c4..240935b 100644
--- a/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
@@ -25,9 +25,9 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.store.Directory;
@@ -84,7 +84,7 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
writer.addDocument(doc);
IndexReader reader = writer.getReader();
- DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader,
+ DocsEnum termPositions = MultiFields.getTermPositionsEnum(reader,
MultiFields.getLiveDocs(reader),
"preanalyzed",
new BytesRef("term1"));
diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java b/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
index 485ea3f..1ce5a5a 100644
--- a/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
+++ b/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
@@ -25,7 +25,7 @@ import java.util.Random;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.LeafReader;
@@ -321,7 +321,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase {
final Terms terms = fields.terms("f");
final TermsEnum te = terms.iterator(null);
assertEquals(new BytesRef("a"), te.next());
- final DocsAndPositionsEnum dpe = te.docsAndPositions(null, null);
+ final DocsEnum dpe = te.docs(null, null, DocsEnum.FLAG_ALL);
assertEquals(0, dpe.nextDoc());
assertEquals(2, dpe.freq());
assertEquals(0, dpe.nextPosition());
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java
index 195746c..d9f6d35 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java
@@ -33,7 +33,6 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexWriter;
@@ -284,8 +283,8 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
public void assertTermsEnum(TermsEnum leftTermsEnum, TermsEnum rightTermsEnum, boolean deep) throws Exception {
BytesRef term;
Bits randomBits = new RandomBits(MAXDOC, random().nextDouble(), random());
- DocsAndPositionsEnum leftPositions = null;
- DocsAndPositionsEnum rightPositions = null;
+ DocsEnum leftPositions = null;
+ DocsEnum rightPositions = null;
DocsEnum leftDocs = null;
DocsEnum rightDocs = null;
@@ -294,55 +293,55 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
assertTermStats(leftTermsEnum, rightTermsEnum);
if (deep) {
// with payloads + off
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docs(null, leftPositions, DocsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.docs(null, rightPositions, DocsEnum.FLAG_ALL));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docs(randomBits, leftPositions, DocsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.docs(randomBits, rightPositions, DocsEnum.FLAG_ALL));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
+ leftPositions = leftTermsEnum.docs(null, leftPositions, DocsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.docs(null, rightPositions, DocsEnum.FLAG_ALL));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
+ leftPositions = leftTermsEnum.docs(randomBits, leftPositions, DocsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.docs(randomBits, rightPositions, DocsEnum.FLAG_ALL));
// with payloads only
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsAndPositionsEnum.FLAG_PAYLOADS),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsAndPositionsEnum.FLAG_PAYLOADS));
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsAndPositionsEnum.FLAG_PAYLOADS),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsAndPositionsEnum.FLAG_PAYLOADS));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docs(null, leftPositions, DocsEnum.FLAG_PAYLOADS),
+ rightPositions = rightTermsEnum.docs(null, rightPositions, DocsEnum.FLAG_PAYLOADS));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docs(randomBits, leftPositions, DocsEnum.FLAG_PAYLOADS),
+ rightPositions = rightTermsEnum.docs(randomBits, rightPositions, DocsEnum.FLAG_PAYLOADS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsAndPositionsEnum.FLAG_PAYLOADS),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsAndPositionsEnum.FLAG_PAYLOADS));
+ leftPositions = leftTermsEnum.docs(null, leftPositions, DocsEnum.FLAG_PAYLOADS),
+ rightPositions = rightTermsEnum.docs(null, rightPositions, DocsEnum.FLAG_PAYLOADS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsAndPositionsEnum.FLAG_PAYLOADS),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsAndPositionsEnum.FLAG_PAYLOADS));
+ leftPositions = leftTermsEnum.docs(randomBits, leftPositions, DocsEnum.FLAG_PAYLOADS),
+ rightPositions = rightTermsEnum.docs(randomBits, rightPositions, DocsEnum.FLAG_PAYLOADS));
// with offsets only
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsAndPositionsEnum.FLAG_OFFSETS),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsAndPositionsEnum.FLAG_OFFSETS));
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsAndPositionsEnum.FLAG_OFFSETS),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsAndPositionsEnum.FLAG_OFFSETS));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docs(null, leftPositions, DocsEnum.FLAG_OFFSETS),
+ rightPositions = rightTermsEnum.docs(null, rightPositions, DocsEnum.FLAG_OFFSETS));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docs(randomBits, leftPositions, DocsEnum.FLAG_OFFSETS),
+ rightPositions = rightTermsEnum.docs(randomBits, rightPositions, DocsEnum.FLAG_OFFSETS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsAndPositionsEnum.FLAG_OFFSETS),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsAndPositionsEnum.FLAG_OFFSETS));
+ leftPositions = leftTermsEnum.docs(null, leftPositions, DocsEnum.FLAG_OFFSETS),
+ rightPositions = rightTermsEnum.docs(null, rightPositions, DocsEnum.FLAG_OFFSETS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsAndPositionsEnum.FLAG_OFFSETS),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsAndPositionsEnum.FLAG_OFFSETS));
+ leftPositions = leftTermsEnum.docs(randomBits, leftPositions, DocsEnum.FLAG_OFFSETS),
+ rightPositions = rightTermsEnum.docs(randomBits, rightPositions, DocsEnum.FLAG_OFFSETS));
// with positions only
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsEnum.FLAG_NONE),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsEnum.FLAG_NONE));
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsEnum.FLAG_NONE),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsEnum.FLAG_NONE));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docs(null, leftPositions, DocsEnum.FLAG_POSITIONS),
+ rightPositions = rightTermsEnum.docs(null, rightPositions, DocsEnum.FLAG_POSITIONS));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docs(randomBits, leftPositions, DocsEnum.FLAG_POSITIONS),
+ rightPositions = rightTermsEnum.docs(randomBits, rightPositions, DocsEnum.FLAG_POSITIONS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsEnum.FLAG_NONE),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsEnum.FLAG_NONE));
+ leftPositions = leftTermsEnum.docs(null, leftPositions, DocsEnum.FLAG_POSITIONS),
+ rightPositions = rightTermsEnum.docs(null, rightPositions, DocsEnum.FLAG_POSITIONS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsEnum.FLAG_NONE),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsEnum.FLAG_NONE));
+ leftPositions = leftTermsEnum.docs(randomBits, leftPositions, DocsEnum.FLAG_POSITIONS),
+ rightPositions = rightTermsEnum.docs(randomBits, rightPositions, DocsEnum.FLAG_POSITIONS));
// with freqs:
assertDocsEnum(leftDocs = leftTermsEnum.docs(null, leftDocs),
@@ -389,7 +388,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
/**
* checks docs + freqs + positions + payloads, sequentially
*/
- public void assertDocsAndPositionsEnum(DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws Exception {
+ public void assertDocsAndPositionsEnum(DocsEnum leftDocs, DocsEnum rightDocs) throws Exception {
if (leftDocs == null || rightDocs == null) {
assertNull(leftDocs);
assertNull(rightDocs);
@@ -462,7 +461,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
/**
* checks advancing docs + positions
*/
- public void assertPositionsSkipping(int docFreq, DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws Exception {
+ public void assertPositionsSkipping(int docFreq, DocsEnum leftDocs, DocsEnum rightDocs) throws Exception {
if (leftDocs == null || rightDocs == null) {
assertNull(leftDocs);
assertNull(rightDocs);
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java
index e9b5620..87d88cf 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java
@@ -30,11 +30,11 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.BaseCompressingDocValuesFormatTestCase;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SerialMergeScheduler;
import org.apache.lucene.index.Term;
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
index 3b19087..5be8c78 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
@@ -16,6 +16,7 @@ package org.apache.lucene.codecs.perfield;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
import java.io.IOException;
import org.apache.lucene.analysis.MockAnalyzer;
@@ -265,7 +266,7 @@ public class TestPerFieldPostingsFormat2 extends LuceneTestCase {
}
dir.close();
}
-
+
public void testSameCodecDifferentInstance() throws Exception {
Codec codec = new AssertingCodec() {
@Override
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
index 5ae4d89..f2cd826 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
@@ -343,7 +343,7 @@ public class TestCodecs extends LuceneTestCase {
assertTrue(doc != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(docs[i], doc);
if (doPos) {
- this.verifyPositions(positions[i], ((DocsAndPositionsEnum) docsEnum));
+ this.verifyPositions(positions[i], docsEnum);
}
}
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc());
@@ -351,7 +351,7 @@ public class TestCodecs extends LuceneTestCase {
byte[] data = new byte[10];
- private void verifyPositions(final PositionData[] positions, final DocsAndPositionsEnum posEnum) throws Throwable {
+ private void verifyPositions(final PositionData[] positions, final DocsEnum posEnum) throws Throwable {
for(int i=0;i hits = new ArrayList<>();
@@ -252,7 +253,7 @@ public class TestBooleanQuery extends LuceneTestCase {
for(int iter2=0;iter2<10;iter2++) {
weight = s.createNormalizedWeight(q);
- scorer = weight.scorer(s.leafContexts.get(0), null);
+ scorer = weight.scorer(s.leafContexts.get(0), DocsEnum.FLAG_FREQS, null);
if (VERBOSE) {
System.out.println(" iter2=" + iter2);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
index 8b9e0bb..dcdefca 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
@@ -30,15 +30,13 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.search.BooleanQuery.BooleanWeight;
import org.apache.lucene.search.Scorer.ChildScorer;
-import org.apache.lucene.search.Weight.DefaultBulkScorer;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.LuceneTestCase;
@@ -279,8 +277,8 @@ public class TestBooleanQueryVisitSubscorers extends LuceneTestCase {
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new BooleanWeight(searcher, false) {
@Override
- public BulkScorer bulkScorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
- Scorer scorer = scorer(context, acceptDocs);
+ public BulkScorer bulkScorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
+ Scorer scorer = scorer(context, flags, acceptDocs);
if (scorer == null) {
return null;
}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
index 58f4f38..436a0dc 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
@@ -92,12 +92,12 @@ public class TestBooleanScorer extends LuceneTestCase {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) {
throw new UnsupportedOperationException();
}
@Override
- public BulkScorer bulkScorer(LeafReaderContext context, Bits acceptDocs) {
+ public BulkScorer bulkScorer(LeafReaderContext context, int flags, Bits acceptDocs) {
return new BulkScorer() {
@Override
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java
index 0b6e02a..2bf3d8e 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java
@@ -19,6 +19,7 @@ package org.apache.lucene.search;
import java.io.IOException;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
public class TestCachingCollector extends LuceneTestCase {
@@ -38,6 +39,36 @@ public class TestCachingCollector extends LuceneTestCase {
public int freq() throws IOException { return 0; }
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int docID() { return 0; }
@Override
@@ -45,7 +76,7 @@ public class TestCachingCollector extends LuceneTestCase {
@Override
public int advance(int target) throws IOException { return 0; }
-
+
@Override
public long cost() {
return 1;
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
index 2cb8f52..17906cd 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
@@ -17,6 +17,8 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import java.io.IOException;
+
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
@@ -26,8 +28,6 @@ import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
-import java.io.IOException;
-
/** This class only tests some basic functionality in CSQ, the main parts are mostly
* tested by MultiTermQuery tests, explanations seems to be tested in TestExplanations! */
public class TestConstantScoreQuery extends LuceneTestCase {
@@ -56,9 +56,9 @@ public class TestConstantScoreQuery extends LuceneTestCase {
public void setScorer(Scorer scorer) {
this.scorer = scorer;
assertEquals("Scorer is implemented by wrong class", scorerClassName, scorer.getClass().getName());
- if (innerScorerClassName != null && scorer instanceof ConstantScoreQuery.ConstantScorer) {
- final ConstantScoreQuery.ConstantScorer innerScorer = (ConstantScoreQuery.ConstantScorer) scorer;
- assertEquals("inner Scorer is implemented by wrong class", innerScorerClassName, innerScorer.docIdSetIterator.getClass().getName());
+ if (innerScorerClassName != null && scorer instanceof ConstantScoreQuery.ConstantScoreScorer) {
+ final ConstantScoreQuery.ConstantScoreScorer innerScorer = (ConstantScoreQuery.ConstantScoreScorer) scorer;
+ assertEquals("inner Scorer is implemented by wrong class", innerScorerClassName, innerScorer.in.getClass().getName());
}
}
@@ -108,13 +108,13 @@ public class TestConstantScoreQuery extends LuceneTestCase {
final Query csqbq = new ConstantScoreQuery(bq);
csqbq.setBoost(17.0f);
- checkHits(searcher, csq1, csq1.getBoost(), ConstantScoreQuery.ConstantScorer.class.getName(), null);
- checkHits(searcher, csq2, csq2.getBoost(), ConstantScoreQuery.ConstantScorer.class.getName(), ConstantScoreQuery.ConstantScorer.class.getName());
+ checkHits(searcher, csq1, csq1.getBoost(), ConstantScoreQuery.ConstantScoreScorer.class.getName(), null);
+ checkHits(searcher, csq2, csq2.getBoost(), ConstantScoreQuery.ConstantScoreScorer.class.getName(), ConstantScoreQuery.ConstantScoreScorer.class.getName());
// for the combined BQ, the scorer should always be BooleanScorer's BucketScorer, because our scorer supports out-of order collection!
final String bucketScorerClass = FakeScorer.class.getName();
checkHits(searcher, bq, csq1.getBoost() + csq2.getBoost(), bucketScorerClass, null);
- checkHits(searcher, csqbq, csqbq.getBoost(), ConstantScoreQuery.ConstantScorer.class.getName(), bucketScorerClass);
+ checkHits(searcher, csqbq, csqbq.getBoost(), ConstantScoreQuery.ConstantScoreScorer.class.getName(), bucketScorerClass);
} finally {
if (reader != null) reader.close();
if (directory != null) directory.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
index 34923e3..03372ce 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
@@ -17,21 +17,21 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.FieldInvertState;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.SlowCompositeReaderWrapper;
-import org.apache.lucene.index.FieldInvertState;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SlowCompositeReaderWrapper;
import org.apache.lucene.index.StoredDocument;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.similarities.DefaultSimilarity;
@@ -39,11 +39,12 @@ import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols;
import java.util.Locale;
-import java.io.IOException;
/**
* Test of the DisjunctionMaxQuery.
@@ -180,7 +181,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
assertTrue(s.getTopReaderContext() instanceof LeafReaderContext);
final Weight dw = s.createNormalizedWeight(dq);
LeafReaderContext context = (LeafReaderContext)s.getTopReaderContext();
- final Scorer ds = dw.scorer(context, context.reader().getLiveDocs());
+ final Scorer ds = dw.scorer(context, DocsEnum.FLAG_FREQS, context.reader().getLiveDocs());
final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS;
if (skipOk) {
fail("firsttime skipTo found a match? ... "
@@ -196,7 +197,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
QueryUtils.check(random(), dq, s);
final Weight dw = s.createNormalizedWeight(dq);
LeafReaderContext context = (LeafReaderContext)s.getTopReaderContext();
- final Scorer ds = dw.scorer(context, context.reader().getLiveDocs());
+ final Scorer ds = dw.scorer(context, DocsEnum.FLAG_FREQS, context.reader().getLiveDocs());
assertTrue("firsttime skipTo found no match",
ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
assertEquals("found wrong docid", "d4", r.document(ds.docID()).get("id"));
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
index 2216ed5..c89d318 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
@@ -29,8 +29,9 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.index.Term;
@@ -124,7 +125,7 @@ public class TestMinShouldMatch2 extends LuceneTestCase {
if (slow) {
return new SlowMinShouldMatchScorer(weight, reader, searcher);
} else {
- return weight.scorer(reader.getContext(), null);
+ return weight.scorer(reader.getContext(), DocsEnum.FLAG_FREQS, null);
}
}
@@ -315,6 +316,36 @@ public class TestMinShouldMatch2 extends LuceneTestCase {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int docID() {
return currentDoc;
}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
index 26cf76a..3de48e9 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
@@ -17,19 +17,19 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.document.Field;
-import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+import java.util.LinkedList;
+
import org.apache.lucene.document.Document;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.Directory;
-
-import java.io.IOException;
-import java.util.LinkedList;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
/**
* This class tests PhrasePrefixQuery class.
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
index 68e958e..4ac6bff 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
@@ -17,17 +17,27 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import java.io.*;
-import java.util.*;
-
-import org.apache.lucene.analysis.*;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenFilter;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.document.*;
-import org.apache.lucene.index.*;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.*;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -35,7 +45,7 @@ import org.junit.BeforeClass;
* Tests {@link PhraseQuery}.
*
* @see TestPositionIncrement
- */
+ */
public class TestPhraseQuery extends LuceneTestCase {
/** threshold for comparing floats */
@@ -184,7 +194,7 @@ public class TestPhraseQuery extends LuceneTestCase {
* slop is the total number of positional moves allowed
* to line up a phrase
*/
- public void testMulipleTerms() throws Exception {
+ public void testMultipleTerms() throws Exception {
query.setSlop(2);
query.add(new Term("field", "one"));
query.add(new Term("field", "three"));
@@ -670,7 +680,7 @@ public class TestPhraseQuery extends LuceneTestCase {
}
}
- assertTrue("phrase '" + sb + "' not found; start=" + start, found);
+ assertTrue("phrase '" + sb + "' not found; start=" + start + ", it=" + i + ", expected doc " + docID, found);
}
reader.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java b/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java
index 6086ff6..e716f22 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java
@@ -31,7 +31,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SlowCompositeReaderWrapper;
@@ -100,7 +100,7 @@ public class TestPositionIncrement extends LuceneTestCase {
IndexSearcher searcher = newSearcher(reader);
- DocsAndPositionsEnum pos = MultiFields.getTermPositionsEnum(searcher.getIndexReader(),
+ DocsEnum pos = MultiFields.getTermPositionsEnum(searcher.getIndexReader(),
MultiFields.getLiveDocs(searcher.getIndexReader()),
"field",
new BytesRef("1"));
@@ -212,7 +212,7 @@ public class TestPositionIncrement extends LuceneTestCase {
final IndexReader readerFromWriter = writer.getReader();
LeafReader r = SlowCompositeReaderWrapper.wrap(readerFromWriter);
- DocsAndPositionsEnum tp = r.termPositionsEnum(new Term("content", "a"));
+ DocsEnum tp = r.termDocsEnum(new Term("content", "a"), DocsEnum.FLAG_ALL);
int count = 0;
assertTrue(tp.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
index f464ff9..a3fdfa4 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
@@ -17,12 +17,15 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import java.io.IOException;
+
+import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.document.Document;
public class TestPositiveScoresOnlyCollector extends LuceneTestCase {
@@ -41,6 +44,36 @@ public class TestPositiveScoresOnlyCollector extends LuceneTestCase {
return 1;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
@Override public int docID() { return idx; }
@Override public int nextDoc() {
@@ -51,7 +84,7 @@ public class TestPositiveScoresOnlyCollector extends LuceneTestCase {
idx = target;
return idx < scores.length ? idx : NO_MORE_DOCS;
}
-
+
@Override
public long cost() {
return scores.length;
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java b/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java
index 20e337c..f60c34b 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java
@@ -25,8 +25,8 @@ import java.util.Set;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
@@ -36,6 +36,7 @@ import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
@@ -443,7 +444,7 @@ public class TestQueryRescorer extends LuceneTestCase {
}
@Override
- public Scorer scorer(final LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(final LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
return new Scorer(null) {
int docID = -1;
@@ -459,6 +460,36 @@ public class TestQueryRescorer extends LuceneTestCase {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public long cost() {
return 1;
}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java
index 3fd3c2b..b447b0b 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java
@@ -23,6 +23,7 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
public class TestScoreCachingWrappingScorer extends LuceneTestCase {
@@ -47,6 +48,36 @@ public class TestScoreCachingWrappingScorer extends LuceneTestCase {
return 1;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
@Override public int docID() { return doc; }
@Override public int nextDoc() {
@@ -57,7 +88,7 @@ public class TestScoreCachingWrappingScorer extends LuceneTestCase {
doc = target;
return doc < scores.length ? doc : NO_MORE_DOCS;
}
-
+
@Override
public long cost() {
return scores.length;
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSearchWithThreads.java b/lucene/core/src/test/org/apache/lucene/search/TestSearchWithThreads.java
index cd5ac03..58de70d 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSearchWithThreads.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSearchWithThreads.java
@@ -20,16 +20,18 @@ package org.apache.lucene.search;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
+import com.carrotsearch.randomizedtesting.annotations.Seed;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
@SuppressCodecs({ "SimpleText", "Memory", "Direct" })
+@Seed("12017F5C55C9DD62")
public class TestSearchWithThreads extends LuceneTestCase {
int NUM_DOCS;
final int NUM_SEARCH_THREADS = 5;
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSimpleExplanations.java b/lucene/core/src/test/org/apache/lucene/search/TestSimpleExplanations.java
index afbabc2..ec9b4de 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSimpleExplanations.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSimpleExplanations.java
@@ -242,6 +242,13 @@ public class TestSimpleExplanations extends BaseExplanationTestCase {
}
/* MultiPhraseQuery */
+
+ /*
+ "w1 w2 w3 w4 w5",
+ "w1 w3 w2 w3 zz",
+ "w1 xx w2 yy w3",
+ "w1 w3 xx w2 yy w3 zz"
+ */
public void testMPQ1() throws Exception {
MultiPhraseQuery q = new MultiPhraseQuery();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
index afd3030..4c3e6c3 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
@@ -19,7 +19,7 @@ package org.apache.lucene.search;
import java.io.IOException;
-import org.apache.lucene.util.LuceneTestCase;
+import com.carrotsearch.randomizedtesting.annotations.Seed;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
@@ -32,7 +32,9 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+@Seed("2542F68A58928060")
public class TestSloppyPhraseQuery extends LuceneTestCase {
private static final String S_1 = "A A A";
@@ -144,7 +146,7 @@ public class TestSloppyPhraseQuery extends LuceneTestCase {
IndexReader reader = writer.getReader();
IndexSearcher searcher = newSearcher(reader);
- MaxFreqCollector c = new MaxFreqCollector();
+ MaxScoreCollector c = new MaxScoreCollector();
searcher.search(query, c);
assertEquals("slop: "+slop+" query: "+query+" doc: "+doc+" Wrong number of hits", expectedNumResults, c.totalHits);
@@ -176,7 +178,7 @@ public class TestSloppyPhraseQuery extends LuceneTestCase {
return query;
}
- static class MaxFreqCollector extends SimpleCollector {
+ static class MaxScoreCollector extends SimpleCollector {
float max;
int totalHits;
Scorer scorer;
@@ -189,7 +191,7 @@ public class TestSloppyPhraseQuery extends LuceneTestCase {
@Override
public void collect(int doc) throws IOException {
totalHits++;
- max = Math.max(max, scorer.freq());
+ max = Math.max(max, scorer.score());
}
}
@@ -205,7 +207,6 @@ public class TestSloppyPhraseQuery extends LuceneTestCase {
@Override
public void collect(int doc) throws IOException {
- assertFalse(Float.isInfinite(scorer.freq()));
assertFalse(Float.isInfinite(scorer.score()));
}
});
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
index 30a3581..06b0058 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
@@ -24,8 +24,9 @@ import java.util.List;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SlowCompositeReaderWrapper;
import org.apache.lucene.index.Term;
@@ -78,7 +79,7 @@ public class TestTermScorer extends LuceneTestCase {
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
assertTrue(indexSearcher.getTopReaderContext() instanceof LeafReaderContext);
LeafReaderContext context = (LeafReaderContext)indexSearcher.getTopReaderContext();
- BulkScorer ts = weight.bulkScorer(context, context.reader().getLiveDocs());
+ BulkScorer ts = weight.bulkScorer(context, DocsEnum.FLAG_FREQS, context.reader().getLiveDocs());
// we have 2 documents with the term all in them, one document for all the
// other values
final List docs = new ArrayList<>();
@@ -135,7 +136,7 @@ public class TestTermScorer extends LuceneTestCase {
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
assertTrue(indexSearcher.getTopReaderContext() instanceof LeafReaderContext);
LeafReaderContext context = (LeafReaderContext) indexSearcher.getTopReaderContext();
- Scorer ts = weight.scorer(context, context.reader().getLiveDocs());
+ Scorer ts = weight.scorer(context, DocsEnum.FLAG_FREQS, context.reader().getLiveDocs());
assertTrue("next did not return a doc",
ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue("score is not correct", ts.score() == 1.6931472f);
@@ -154,7 +155,7 @@ public class TestTermScorer extends LuceneTestCase {
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
assertTrue(indexSearcher.getTopReaderContext() instanceof LeafReaderContext);
LeafReaderContext context = (LeafReaderContext) indexSearcher.getTopReaderContext();
- Scorer ts = weight.scorer(context, context.reader().getLiveDocs());
+ Scorer ts = weight.scorer(context, DocsEnum.FLAG_FREQS, context.reader().getLiveDocs());
assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
// The next doc should be doc 5
assertTrue("doc should be number 5", ts.docID() == 5);
diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
index 649f301..4c1db28 100644
--- a/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
+++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
@@ -20,16 +20,17 @@ package org.apache.lucene.search.spans;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.CheckHits;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
@@ -183,7 +184,7 @@ public class TestNearSpansOrdered extends LuceneTestCase {
Weight w = searcher.createNormalizedWeight(q);
IndexReaderContext topReaderContext = searcher.getTopReaderContext();
LeafReaderContext leave = topReaderContext.leaves().get(0);
- Scorer s = w.scorer(leave, leave.reader().getLiveDocs());
+ Scorer s = w.scorer(leave, DocsEnum.FLAG_POSITIONS, leave.reader().getLiveDocs());
assertEquals(1, s.advance(1));
}
diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java
index 5a960f3..5517599 100644
--- a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java
+++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java
@@ -17,14 +17,12 @@ package org.apache.lucene.search.spans;
* limitations under the License.
*/
-import java.io.IOException;
-import java.util.List;
-
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.IndexWriter;
@@ -43,6 +41,9 @@ import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+import java.util.List;
+
public class TestSpans extends LuceneTestCase {
private IndexSearcher searcher;
private IndexReader reader;
@@ -429,7 +430,7 @@ public class TestSpans extends LuceneTestCase {
slop,
ordered);
- spanScorer = searcher.createNormalizedWeight(snq).scorer(ctx, ctx.reader().getLiveDocs());
+ spanScorer = searcher.createNormalizedWeight(snq).scorer(ctx, DocsEnum.FLAG_POSITIONS, ctx.reader().getLiveDocs());
} finally {
searcher.setSimilarity(oldSim);
}
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionRescorer.java b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionRescorer.java
index 18ec02c..8f77e95 100644
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionRescorer.java
+++ b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionRescorer.java
@@ -18,7 +18,6 @@ package org.apache.lucene.expressions;
*/
import java.io.IOException;
-import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -27,12 +26,11 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.FakeScorer;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Rescorer;
-import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortRescorer;
-import org.apache.lucene.search.Weight;
/**
* A {@link Rescorer} that uses an expression to re-score
@@ -58,56 +56,6 @@ class ExpressionRescorer extends SortRescorer {
this.bindings = bindings;
}
- private static class FakeScorer extends Scorer {
- float score;
- int doc = -1;
- int freq = 1;
-
- public FakeScorer() {
- super(null);
- }
-
- @Override
- public int advance(int target) {
- throw new UnsupportedOperationException("FakeScorer doesn't support advance(int)");
- }
-
- @Override
- public int docID() {
- return doc;
- }
-
- @Override
- public int freq() {
- return freq;
- }
-
- @Override
- public int nextDoc() {
- throw new UnsupportedOperationException("FakeScorer doesn't support nextDoc()");
- }
-
- @Override
- public float score() {
- return score;
- }
-
- @Override
- public long cost() {
- return 1;
- }
-
- @Override
- public Weight getWeight() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public Collection getChildren() {
- throw new UnsupportedOperationException();
- }
- }
-
@Override
public Explanation explain(IndexSearcher searcher, Explanation firstPassExplanation, int docID) throws IOException {
Explanation result = super.explain(searcher, firstPassExplanation, docID);
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysQuery.java b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysQuery.java
index 6fde4ae..648fa34 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysQuery.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysQuery.java
@@ -19,8 +19,9 @@ package org.apache.lucene.facet;
import java.io.IOException;
import java.util.Arrays;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.BulkScorer;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
@@ -29,10 +30,8 @@ import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.BulkScorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
-
/** Only purpose is to punch through and return a
* DrillSidewaysScorer */
@@ -111,17 +110,17 @@ class DrillSidewaysQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
// We can only run as a top scorer:
throw new UnsupportedOperationException();
}
@Override
- public BulkScorer bulkScorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public BulkScorer bulkScorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
// TODO: it could be better if we take acceptDocs
// into account instead of baseScorer?
- Scorer baseScorer = baseWeight.scorer(context, acceptDocs);
+ Scorer baseScorer = baseWeight.scorer(context, flags, acceptDocs);
DrillSidewaysScorer.DocsAndCost[] dims = new DrillSidewaysScorer.DocsAndCost[drillDowns.length];
int nullCount = 0;
@@ -166,7 +165,7 @@ class DrillSidewaysQuery extends Query {
dims[dim].disi = disi;
}
} else {
- DocIdSetIterator disi = ((Weight) drillDowns[dim]).scorer(context, null);
+ DocIdSetIterator disi = ((Weight) drillDowns[dim]).scorer(context, flags, null);
if (disi == null) {
nullCount++;
continue;
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java
index 7988cb2..5a88fa5 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java
@@ -21,15 +21,16 @@ import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.BulkScorer;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.BulkScorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
class DrillSidewaysScorer extends BulkScorer {
@@ -648,6 +649,36 @@ class DrillSidewaysScorer extends BulkScorer {
}
@Override
+ public int nextPosition() throws IOException {
+ throw new UnsupportedOperationException("FakeScorer doesn't support nextPosition()");
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
public int nextDoc() {
throw new UnsupportedOperationException("FakeScorer doesn't support nextDoc()");
}
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java
index a35660d..974c6d0 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/TaxonomyFacetSumValueSource.java
@@ -18,21 +18,20 @@ package org.apache.lucene.facet.taxonomy;
*/
import java.io.IOException;
-import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import org.apache.lucene.facet.FacetsCollector.MatchingDocs;
import org.apache.lucene.facet.FacetsCollector;
+import org.apache.lucene.facet.FacetsCollector.MatchingDocs;
import org.apache.lucene.facet.FacetsConfig;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.docvalues.DoubleDocValues;
import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.FakeScorer;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Weight;
import org.apache.lucene.util.IntsRef;
/** Aggregates sum of values from {@link
@@ -62,28 +61,6 @@ public class TaxonomyFacetSumValueSource extends FloatTaxonomyFacets {
sumValues(fc.getMatchingDocs(), fc.getKeepScores(), valueSource);
}
- private static final class FakeScorer extends Scorer {
- float score;
- int docID;
- FakeScorer() { super(null); }
- @Override public float score() throws IOException { return score; }
- @Override public int freq() throws IOException { throw new UnsupportedOperationException(); }
- @Override public int docID() { return docID; }
- @Override public int nextDoc() throws IOException { throw new UnsupportedOperationException(); }
- @Override public int advance(int target) throws IOException { throw new UnsupportedOperationException(); }
- @Override public long cost() { return 0; }
-
- @Override
- public Weight getWeight() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public Collection getChildren() {
- throw new UnsupportedOperationException();
- }
- }
-
private final void sumValues(List matchingDocs, boolean keepScores, ValueSource valueSource) throws IOException {
final FakeScorer scorer = new FakeScorer();
Map context = new HashMap<>();
@@ -104,7 +81,7 @@ public class TaxonomyFacetSumValueSource extends FloatTaxonomyFacets {
while ((doc = docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
ords.get(doc, scratch);
if (keepScores) {
- scorer.docID = doc;
+ scorer.doc = doc;
scorer.score = scores[scoresIdx++];
}
float value = (float) functionValues.doubleVal(doc);
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java
index 6a59db8..e28562d 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java
@@ -1,16 +1,16 @@
package org.apache.lucene.facet.taxonomy.directory;
-import java.io.IOException;
-
import org.apache.lucene.facet.taxonomy.ParallelTaxonomyArrays;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.ArrayUtil;
+import java.io.IOException;
+
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@@ -129,9 +129,9 @@ class TaxonomyIndexArrays extends ParallelTaxonomyArrays {
// it's ok to use MultiFields because we only iterate on one posting list.
// breaking it to loop over the leaves() only complicates code for no
// apparent gain.
- DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(reader, null,
+ DocsEnum positions = MultiFields.getTermPositionsEnum(reader, null,
Consts.FIELD_PAYLOADS, Consts.PAYLOAD_PARENT_BYTES_REF,
- DocsAndPositionsEnum.FLAG_PAYLOADS);
+ DocsEnum.FLAG_PAYLOADS);
// shouldn't really happen, if it does, something's wrong
if (positions == null || positions.advance(first) == DocIdSetIterator.NO_MORE_DOCS) {
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
index 16cbf40..473d13f 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
@@ -17,13 +17,24 @@ package org.apache.lucene.search.grouping;
* limitations under the License.
*/
-
import java.io.IOException;
-import java.util.Collection;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.search.*;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.FakeScorer;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.LeafFieldComparator;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopDocsCollector;
+import org.apache.lucene.search.TopFieldCollector;
+import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.PriorityQueue;
@@ -86,56 +97,6 @@ public class BlockGroupingCollector extends SimpleCollector {
private final GroupQueue groupQueue;
private boolean groupCompetes;
- private final static class FakeScorer extends Scorer {
-
- float score;
- int doc;
-
- public FakeScorer() {
- super(null);
- }
-
- @Override
- public float score() {
- return score;
- }
-
- @Override
- public int freq() {
- throw new UnsupportedOperationException(); // TODO: wtf does this class do?
- }
-
- @Override
- public int docID() {
- return doc;
- }
-
- @Override
- public int advance(int target) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public int nextDoc() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public long cost() {
- return 1;
- }
-
- @Override
- public Weight getWeight() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public Collection getChildren() {
- throw new UnsupportedOperationException();
- }
- }
-
private static final class OneGroup {
LeafReaderContext readerContext;
//int groupOrd;
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
index ec3e829..943c15b 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
@@ -18,10 +18,27 @@
package org.apache.lucene.search.grouping;
import java.io.IOException;
-import java.util.*;
-
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.carrotsearch.randomizedtesting.annotations.Seed;
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.*;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReaderContext;
@@ -34,7 +51,20 @@ import org.apache.lucene.index.SlowCompositeReaderWrapper;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.valuesource.BytesRefFieldSource;
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.CachingCollector;
+import org.apache.lucene.search.CachingWrapperFilter;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MultiCollector;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.search.grouping.function.FunctionAllGroupsCollector;
import org.apache.lucene.search.grouping.function.FunctionFirstPassGroupingCollector;
import org.apache.lucene.search.grouping.function.FunctionSecondPassGroupingCollector;
@@ -54,6 +84,7 @@ import org.apache.lucene.util.mutable.MutableValueStr;
// - test ties
// - test compound sort
+@Seed("3C4E441C6A8DA6A2:4E026113DBED10D1")
public class TestGrouping extends LuceneTestCase {
public void testBasic() throws Exception {
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java
index 936ac2b..1b654fe 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java
@@ -16,6 +16,7 @@ package org.apache.lucene.search.highlight;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
import java.io.IOException;
import org.apache.lucene.analysis.TokenStream;
@@ -24,7 +25,7 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.AttributeFactory;
@@ -122,7 +123,7 @@ public final class TokenStreamFromTermVector extends TokenStream {
final TermsEnum termsEnum = vector.iterator(null);
BytesRef termBytesRef;
- DocsAndPositionsEnum dpEnum = null;
+ DocsEnum dpEnum = null;
//int sumFreq = 0;
while ((termBytesRef = termsEnum.next()) != null) {
//Grab the term (in same way as BytesRef.utf8ToString() but we don't want a String obj)
@@ -130,7 +131,7 @@ public final class TokenStreamFromTermVector extends TokenStream {
final char[] termChars = new char[termBytesRef.length];
final int termCharsLen = UnicodeUtil.UTF8toUTF16(termBytesRef, termChars);
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.docs(null, dpEnum, DocsEnum.FLAG_POSITIONS);
assert dpEnum != null; // presumably checked by TokenSources.hasPositions earlier
dpEnum.nextDoc();
final int freq = dpEnum.freq();
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/MultiTermHighlighting.java b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/MultiTermHighlighting.java
index 67cdf91..0a5e23e 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/MultiTermHighlighting.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/MultiTermHighlighting.java
@@ -26,7 +26,7 @@ import java.util.List;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.AutomatonQuery;
import org.apache.lucene.search.BooleanClause;
@@ -47,10 +47,10 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util.automaton.Automata;
-import org.apache.lucene.util.automaton.Operations;
+import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
import org.apache.lucene.util.automaton.LevenshteinAutomata;
-import org.apache.lucene.util.automaton.Automaton;
+import org.apache.lucene.util.automaton.Operations;
/**
* Support for highlighting multiterm queries in PostingsHighlighter.
@@ -197,7 +197,7 @@ class MultiTermHighlighting {
*
* This is solely used internally by PostingsHighlighter: DO NOT USE THIS METHOD!
*/
- static DocsAndPositionsEnum getDocsEnum(final TokenStream ts, final CharacterRunAutomaton[] matchers) throws IOException {
+ static DocsEnum getDocsEnum(final TokenStream ts, final CharacterRunAutomaton[] matchers) throws IOException {
final CharTermAttribute charTermAtt = ts.addAttribute(CharTermAttribute.class);
final OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
ts.reset();
@@ -207,7 +207,7 @@ class MultiTermHighlighting {
// would only serve to make this method less bogus.
// instead, we always return freq() = Integer.MAX_VALUE and let PH terminate based on offset...
- return new DocsAndPositionsEnum() {
+ return new DocsEnum() {
int currentDoc = -1;
int currentMatch = -1;
int currentStartOffset = -1;
@@ -237,7 +237,19 @@ class MultiTermHighlighting {
currentStartOffset = currentEndOffset = Integer.MAX_VALUE;
return Integer.MAX_VALUE;
}
-
+
+ @Override
+ public int startPosition() throws IOException {
+ if (currentStartOffset < Integer.MAX_VALUE)
+ return 0;
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return startPosition();
+ }
+
@Override
public int freq() throws IOException {
return Integer.MAX_VALUE; // lie
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
index 14f364b..db23b6a 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
@@ -31,7 +31,7 @@ import java.util.SortedSet;
import java.util.TreeSet;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader;
@@ -455,7 +455,7 @@ public class PostingsHighlighter {
private Map highlightField(String field, String contents[], BreakIterator bi, BytesRef terms[], int[] docids, List leaves, int maxPassages, Query query) throws IOException {
Map highlights = new HashMap<>();
-
+
PassageFormatter fieldFormatter = getFormatter(field);
if (fieldFormatter == null) {
throw new NullPointerException("PassageFormatter cannot be null");
@@ -477,7 +477,7 @@ public class PostingsHighlighter {
// we are processing in increasing docid order, so we only need to reinitialize stuff on segment changes
// otherwise, we will just advance() existing enums to the new document in the same segment.
- DocsAndPositionsEnum postings[] = null;
+ DocsEnum postings[] = null;
TermsEnum termsEnum = null;
int lastLeaf = -1;
@@ -499,7 +499,7 @@ public class PostingsHighlighter {
Terms t = r.terms(field);
if (t != null) {
termsEnum = t.iterator(null);
- postings = new DocsAndPositionsEnum[terms.length];
+ postings = new DocsEnum[terms.length];
}
}
if (termsEnum == null) {
@@ -508,7 +508,7 @@ public class PostingsHighlighter {
// if there are multi-term matches, we have to initialize the "fake" enum for each document
if (automata.length > 0) {
- DocsAndPositionsEnum dp = MultiTermHighlighting.getDocsEnum(analyzer.tokenStream(field, content), automata);
+ DocsEnum dp = MultiTermHighlighting.getDocsEnum(analyzer.tokenStream(field, content), automata);
dp.advance(doc - subContext.docBase);
postings[terms.length-1] = dp; // last term is the multiterm matcher
}
@@ -534,7 +534,7 @@ public class PostingsHighlighter {
// we can intersect these with the postings lists via BreakIterator.preceding(offset),s
// score each sentence as norm(sentenceStartOffset) * sum(weight * tf(freq))
private Passage[] highlightDoc(String field, BytesRef terms[], int contentLength, BreakIterator bi, int doc,
- TermsEnum termsEnum, DocsAndPositionsEnum[] postings, int n) throws IOException {
+ TermsEnum termsEnum, DocsEnum[] postings, int n) throws IOException {
PassageScorer scorer = getScorer(field);
if (scorer == null) {
throw new NullPointerException("PassageScorer cannot be null");
@@ -543,7 +543,7 @@ public class PostingsHighlighter {
float weights[] = new float[terms.length];
// initialize postings
for (int i = 0; i < terms.length; i++) {
- DocsAndPositionsEnum de = postings[i];
+ DocsEnum de = postings[i];
int pDoc;
if (de == EMPTY) {
continue;
@@ -552,7 +552,7 @@ public class PostingsHighlighter {
if (!termsEnum.seekExact(terms[i])) {
continue; // term not found
}
- de = postings[i] = termsEnum.docsAndPositions(null, null, DocsAndPositionsEnum.FLAG_OFFSETS);
+ de = postings[i] = termsEnum.docs(null, null, DocsEnum.FLAG_OFFSETS);
if (de == null) {
// no positions available
throw new IllegalArgumentException("field '" + field + "' was indexed without offsets, cannot highlight");
@@ -590,7 +590,7 @@ public class PostingsHighlighter {
OffsetsEnum off;
while ((off = pq.poll()) != null) {
- final DocsAndPositionsEnum dp = off.dp;
+ final DocsEnum dp = off.dp;
int start = dp.startOffset();
if (start == -1) {
throw new IllegalArgumentException("field '" + field + "' was indexed without offsets, cannot highlight");
@@ -698,11 +698,11 @@ public class PostingsHighlighter {
}
private static class OffsetsEnum implements Comparable {
- DocsAndPositionsEnum dp;
+ DocsEnum dp;
int pos;
int id;
- OffsetsEnum(DocsAndPositionsEnum dp, int id) throws IOException {
+ OffsetsEnum(DocsEnum dp, int id) throws IOException {
this.dp = dp;
this.id = id;
this.pos = 1;
@@ -724,10 +724,20 @@ public class PostingsHighlighter {
}
}
- private static final DocsAndPositionsEnum EMPTY = new DocsAndPositionsEnum() {
+ private static final DocsEnum EMPTY = new DocsEnum() {
+
+ @Override
+ public int nextPosition() throws IOException { return -1; }
@Override
- public int nextPosition() throws IOException { return 0; }
+ public int startPosition() throws IOException {
+ assert false; return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
@Override
public int startOffset() throws IOException { return Integer.MAX_VALUE; }
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
index 29c307a..4f36e64 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
@@ -22,7 +22,7 @@ import java.util.Iterator;
import java.util.LinkedList;
import java.util.Set;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
@@ -93,7 +93,7 @@ public class FieldTermStack {
final CharsRefBuilder spare = new CharsRefBuilder();
final TermsEnum termsEnum = vector.iterator(null);
- DocsAndPositionsEnum dpEnum = null;
+ DocsEnum dpEnum = null;
BytesRef text;
int numDocs = reader.maxDoc();
@@ -104,7 +104,7 @@ public class FieldTermStack {
if (!termSet.contains(term)) {
continue;
}
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.docs(null, dpEnum, DocsEnum.FLAG_POSITIONS);
if (dpEnum == null) {
// null snippet
return;
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java
index 536259a..a52d9b7 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java
@@ -16,9 +16,6 @@ package org.apache.lucene.search.highlight.custom;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-import java.io.IOException;
-import java.util.Map;
-
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
@@ -36,6 +33,9 @@ import org.apache.lucene.search.highlight.WeightedSpanTerm;
import org.apache.lucene.search.highlight.WeightedSpanTermExtractor;
import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+import java.util.Map;
+
/**
* Tests the extensibility of {@link WeightedSpanTermExtractor} and
* {@link QueryScorer} in a user defined package
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java b/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java
deleted file mode 100644
index cbd1ff8..0000000
--- a/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java
+++ /dev/null
@@ -1,75 +0,0 @@
-package org.apache.lucene.search.join;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.Collection;
-
-import org.apache.lucene.search.LeafCollector;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Weight;
-
-/** Passed to {@link LeafCollector#setScorer} during join collection. */
-final class FakeScorer extends Scorer {
- float score;
- int doc = -1;
- int freq = 1;
-
- public FakeScorer() {
- super(null);
- }
-
- @Override
- public int advance(int target) {
- throw new UnsupportedOperationException("FakeScorer doesn't support advance(int)");
- }
-
- @Override
- public int docID() {
- return doc;
- }
-
- @Override
- public int freq() {
- throw new UnsupportedOperationException("FakeScorer doesn't support freq()");
- }
-
- @Override
- public int nextDoc() {
- throw new UnsupportedOperationException("FakeScorer doesn't support nextDoc()");
- }
-
- @Override
- public float score() {
- return score;
- }
-
- @Override
- public long cost() {
- return 1;
- }
-
- @Override
- public Weight getWeight() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public Collection getChildren() {
- throw new UnsupportedOperationException();
- }
-}
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
index ce7f445..d7a3a62 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
@@ -27,12 +27,10 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.BulkScorer;
import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
@@ -167,7 +165,7 @@ class TermsIncludingScoreQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
Terms terms = context.reader().terms(field);
if (terms == null) {
return null;
@@ -183,9 +181,10 @@ class TermsIncludingScoreQuery extends Query {
return new SVInOrderScorer(this, acceptDocs, segmentTermsEnum, context.reader().maxDoc(), cost);
}
}
+
};
}
-
+
class SVInOrderScorer extends Scorer {
final DocIdSetIterator matchingDocsIterator;
@@ -231,6 +230,36 @@ class TermsIncludingScoreQuery extends Query {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int docID() {
return currentDoc;
}
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java
index 5a39106..26790f0 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java
@@ -25,7 +25,6 @@ import java.util.Set;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
-import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
@@ -34,6 +33,7 @@ import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
/**
* Just like {@link ToParentBlockJoinQuery}, except this
@@ -128,9 +128,9 @@ public class ToChildBlockJoinQuery extends Query {
// NOTE: acceptDocs applies (and is checked) only in the
// child document space
@Override
- public Scorer scorer(LeafReaderContext readerContext, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext readerContext, int flags, Bits acceptDocs) throws IOException {
- final Scorer parentScorer = parentWeight.scorer(readerContext, null);
+ final Scorer parentScorer = parentWeight.scorer(readerContext, flags, null);
if (parentScorer == null) {
// No matches
@@ -274,6 +274,36 @@ public class ToChildBlockJoinQuery extends Query {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int advance(int childTarget) throws IOException {
//System.out.println("Q.advance childTarget=" + childTarget);
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
index 36fc37e..bd0e5ae 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
@@ -17,17 +17,34 @@ package org.apache.lucene.search.join;
* limitations under the License.
*/
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Queue;
+
+import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexWriter; // javadocs
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.FakeScorer;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.FieldValueHitQueue;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.LeafFieldComparator;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreCachingWrappingScorer;
+import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Scorer.ChildScorer;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopDocsCollector;
+import org.apache.lucene.search.TopFieldCollector;
+import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.search.grouping.GroupDocs;
import org.apache.lucene.search.grouping.TopGroups;
import org.apache.lucene.util.ArrayUtil;
-import java.io.IOException;
-import java.util.*;
-
/** Collects parent document hits for a Query containing one more more
* BlockJoinQuery clauses, sorted by the
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinIndexSearcher.java b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinIndexSearcher.java
index 31a0463..a277e91 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinIndexSearcher.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinIndexSearcher.java
@@ -21,6 +21,7 @@ import java.io.IOException;
import java.util.List;
import java.util.concurrent.ExecutorService;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Collector;
@@ -55,7 +56,7 @@ public class ToParentBlockJoinIndexSearcher extends IndexSearcher {
// we force the use of Scorer (not BulkScorer) to make sure
// that the scorer passed to LeafCollector.setScorer supports
// Scorer.getChildren
- Scorer scorer = weight.scorer(ctx, ctx.reader().getLiveDocs());
+ Scorer scorer = weight.scorer(ctx, DocsEnum.FLAG_NONE, ctx.reader().getLiveDocs());
if (scorer != null) {
final LeafCollector leafCollector = collector.getLeafCollector(ctx);
leafCollector.setScorer(scorer);
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java
index bba2225..987ae79 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java
@@ -23,6 +23,7 @@ import java.util.Collections;
import java.util.Locale;
import java.util.Set;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.LeafReaderContext;
@@ -39,6 +40,7 @@ import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
/**
* This query requires that you index
@@ -160,9 +162,9 @@ public class ToParentBlockJoinQuery extends Query {
// NOTE: acceptDocs applies (and is checked) only in the
// parent document space
@Override
- public Scorer scorer(LeafReaderContext readerContext, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext readerContext, int flags, Bits acceptDocs) throws IOException {
- final Scorer childScorer = childWeight.scorer(readerContext, readerContext.reader().getLiveDocs());
+ final Scorer childScorer = childWeight.scorer(readerContext, flags, readerContext.reader().getLiveDocs());
if (childScorer == null) {
// No matches
return null;
@@ -188,7 +190,7 @@ public class ToParentBlockJoinQuery extends Query {
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- BlockJoinScorer scorer = (BlockJoinScorer) scorer(context, context.reader().getLiveDocs());
+ BlockJoinScorer scorer = (BlockJoinScorer) scorer(context, DocsEnum.FLAG_FREQS, context.reader().getLiveDocs());
if (scorer != null && scorer.advance(doc) == doc) {
return scorer.explain(context.docBase);
}
@@ -370,6 +372,36 @@ public class ToParentBlockJoinQuery extends Query {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int advance(int parentTarget) throws IOException {
//System.out.println("Q.advance parentTarget=" + parentTarget);
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
index 63ba2bb..0130ec6 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
@@ -25,14 +25,55 @@ import java.util.List;
import java.util.Locale;
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.*;
-import org.apache.lucene.index.*;
-import org.apache.lucene.search.*;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.LogDocMergePolicy;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.NoMergePolicy;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.ReaderUtil;
+import org.apache.lucene.index.StoredDocument;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.search.grouping.GroupDocs;
import org.apache.lucene.search.grouping.TopGroups;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.*;
+import org.apache.lucene.util.BitSet;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.TestUtil;
public class TestBlockJoin extends LuceneTestCase {
@@ -1148,7 +1189,7 @@ public class TestBlockJoin extends LuceneTestCase {
ToParentBlockJoinQuery q = new ToParentBlockJoinQuery(tq, parentFilter, ScoreMode.Avg);
Weight weight = s.createNormalizedWeight(q);
- DocIdSetIterator disi = weight.scorer(s.getIndexReader().leaves().get(0), null);
+ DocIdSetIterator disi = weight.scorer(s.getIndexReader().leaves().get(0), DocsEnum.FLAG_FREQS, null);
assertEquals(1, disi.advance(1));
r.close();
dir.close();
@@ -1182,7 +1223,7 @@ public class TestBlockJoin extends LuceneTestCase {
ToParentBlockJoinQuery q = new ToParentBlockJoinQuery(tq, parentFilter, ScoreMode.Avg);
Weight weight = s.createNormalizedWeight(q);
- DocIdSetIterator disi = weight.scorer(s.getIndexReader().leaves().get(0), null);
+ DocIdSetIterator disi = weight.scorer(s.getIndexReader().leaves().get(0), DocsEnum.FLAG_FREQS, null);
assertEquals(2, disi.advance(0));
r.close();
dir.close();
diff --git a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
index a578102..51ef5d6 100644
--- a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
+++ b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
@@ -34,7 +34,6 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
@@ -983,16 +982,8 @@ public class MemoryIndex {
if (reuse == null || !(reuse instanceof MemoryDocsEnum)) {
reuse = new MemoryDocsEnum();
}
- return ((MemoryDocsEnum) reuse).reset(liveDocs, info.sliceArray.freq[info.sortedTerms[termUpto]]);
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
- if (reuse == null || !(reuse instanceof MemoryDocsAndPositionsEnum)) {
- reuse = new MemoryDocsAndPositionsEnum();
- }
final int ord = info.sortedTerms[termUpto];
- return ((MemoryDocsAndPositionsEnum) reuse).reset(liveDocs, info.sliceArray.start[ord], info.sliceArray.end[ord], info.sliceArray.freq[ord]);
+ return ((MemoryDocsEnum) reuse).reset(liveDocs, info.sliceArray.start[ord], info.sliceArray.end[ord], info.sliceArray.freq[ord]);
}
@Override
@@ -1010,68 +1001,25 @@ public class MemoryIndex {
}
private class MemoryDocsEnum extends DocsEnum {
- private boolean hasNext;
- private Bits liveDocs;
- private int doc = -1;
- private int freq;
-
- public DocsEnum reset(Bits liveDocs, int freq) {
- this.liveDocs = liveDocs;
- hasNext = true;
- doc = -1;
- this.freq = freq;
- return this;
- }
-
- @Override
- public int docID() {
- return doc;
- }
-
- @Override
- public int nextDoc() {
- if (hasNext && (liveDocs == null || liveDocs.get(0))) {
- hasNext = false;
- return doc = 0;
- } else {
- return doc = NO_MORE_DOCS;
- }
- }
-
- @Override
- public int advance(int target) throws IOException {
- return slowAdvance(target);
- }
- @Override
- public int freq() throws IOException {
- return freq;
- }
-
- @Override
- public long cost() {
- return 1;
- }
- }
-
- private class MemoryDocsAndPositionsEnum extends DocsAndPositionsEnum {
private final SliceReader sliceReader;
private int posUpto; // for assert
private boolean hasNext;
private Bits liveDocs;
private int doc = -1;
private int freq;
+ private int pos;
private int startOffset;
private int endOffset;
private int payloadIndex;
private final BytesRefBuilder payloadBuilder;//only non-null when storePayloads
- public MemoryDocsAndPositionsEnum() {
+ public MemoryDocsEnum() {
this.sliceReader = new SliceReader(intBlockPool);
this.payloadBuilder = storePayloads ? new BytesRefBuilder() : null;
}
- public DocsAndPositionsEnum reset(Bits liveDocs, int start, int end, int freq) {
+ public DocsEnum reset(Bits liveDocs, int start, int end, int freq) {
this.liveDocs = liveDocs;
this.sliceReader.reset(start, end);
posUpto = 0; // for assert
@@ -1089,6 +1037,7 @@ public class MemoryIndex {
@Override
public int nextDoc() {
+ pos = -1;
if (hasNext && (liveDocs == null || liveDocs.get(0))) {
hasNext = false;
return doc = 0;
@@ -1109,10 +1058,12 @@ public class MemoryIndex {
@Override
public int nextPosition() {
- assert posUpto++ < freq;
+ posUpto++;
+ assert posUpto < freq;
assert !sliceReader.endOfSlice() : " stores offsets : " + startOffset;
int pos = sliceReader.readInt();
if (storeOffsets) {
+ //pos = sliceReader.readInt();
startOffset = sliceReader.readInt();
endOffset = sliceReader.readInt();
}
@@ -1123,6 +1074,16 @@ public class MemoryIndex {
}
@Override
+ public int startPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
public int startOffset() {
return startOffset;
}
diff --git a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
index a507552..71a64fd 100644
--- a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
+++ b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
@@ -42,7 +42,6 @@ import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.CompositeReader;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexOptions;
@@ -199,9 +198,9 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
while(iwTermsIter.next() != null) {
assertNotNull(memTermsIter.next());
assertEquals(iwTermsIter.term(), memTermsIter.term());
- DocsAndPositionsEnum iwDocsAndPos = iwTermsIter.docsAndPositions(null, null);
- DocsAndPositionsEnum memDocsAndPos = memTermsIter.docsAndPositions(null, null);
- while(iwDocsAndPos.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS) {
+ DocsEnum iwDocsAndPos = iwTermsIter.docs(null, null, DocsEnum.FLAG_ALL);
+ DocsEnum memDocsAndPos = memTermsIter.docs(null, null, DocsEnum.FLAG_ALL);
+ while(iwDocsAndPos.nextDoc() != DocsEnum.NO_MORE_DOCS) {
assertEquals(iwDocsAndPos.docID(), memDocsAndPos.nextDoc());
assertEquals(iwDocsAndPos.freq(), memDocsAndPos.freq());
for (int i = 0; i < iwDocsAndPos.freq(); i++) {
@@ -224,7 +223,7 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
assertEquals(iwTermsIter.term(), memTermsIter.term());
DocsEnum iwDocsAndPos = iwTermsIter.docs(null, null);
DocsEnum memDocsAndPos = memTermsIter.docs(null, null);
- while(iwDocsAndPos.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS) {
+ while(iwDocsAndPos.nextDoc() != DocsEnum.NO_MORE_DOCS) {
assertEquals(iwDocsAndPos.docID(), memDocsAndPos.nextDoc());
assertEquals(iwDocsAndPos.freq(), memDocsAndPos.freq());
}
@@ -354,7 +353,7 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
memory.addField("foo", "bar", analyzer);
LeafReader reader = (LeafReader) memory.createSearcher().getIndexReader();
assertEquals(1, reader.terms("foo").getSumTotalTermFreq());
- DocsAndPositionsEnum disi = reader.termPositionsEnum(new Term("foo", "bar"));
+ DocsEnum disi = reader.termDocsEnum(new Term("foo", "bar"), DocsEnum.FLAG_ALL);
int docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -365,7 +364,7 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
// now reuse and check again
TermsEnum te = reader.terms("foo").iterator(null);
assertTrue(te.seekExact(new BytesRef("bar")));
- disi = te.docsAndPositions(null, disi);
+ disi = te.docs(null, disi);
docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -426,7 +425,7 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
assertNull(reader.getNumericDocValues("not-in-index"));
assertNull(reader.getNormValues("not-in-index"));
assertNull(reader.termDocsEnum(new Term("not-in-index", "foo")));
- assertNull(reader.termPositionsEnum(new Term("not-in-index", "foo")));
+ assertNull(reader.termDocsEnum(new Term("not-in-index", "foo"), DocsEnum.FLAG_ALL));
assertNull(reader.terms("not-in-index"));
}
@@ -526,8 +525,8 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
assertNotNull(memTermEnum.next());
assertThat(termEnum.totalTermFreq(), equalTo(memTermEnum.totalTermFreq()));
- DocsAndPositionsEnum docsPosEnum = termEnum.docsAndPositions(null, null, 0);
- DocsAndPositionsEnum memDocsPosEnum = memTermEnum.docsAndPositions(null, null, 0);
+ DocsEnum docsPosEnum = termEnum.docs(null, null, DocsEnum.FLAG_POSITIONS);
+ DocsEnum memDocsPosEnum = memTermEnum.docs(null, null, DocsEnum.FLAG_POSITIONS);
String currentTerm = termEnum.term().utf8ToString();
assertThat("Token mismatch for field: " + field_name, currentTerm, equalTo(memTermEnum.term().utf8ToString()));
diff --git a/lucene/misc/src/java/org/apache/lucene/index/Sorter.java b/lucene/misc/src/java/org/apache/lucene/index/Sorter.java
index 22912bc..93b0eeb 100644
--- a/lucene/misc/src/java/org/apache/lucene/index/Sorter.java
+++ b/lucene/misc/src/java/org/apache/lucene/index/Sorter.java
@@ -20,6 +20,7 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.util.Comparator;
+import org.apache.lucene.search.FakeScorer;
import org.apache.lucene.search.LeafFieldComparator;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;
@@ -258,25 +259,6 @@ final class Sorter {
return getID();
}
- static final Scorer FAKESCORER = new Scorer(null) {
-
- @Override
- public float score() throws IOException { throw new UnsupportedOperationException(); }
-
- @Override
- public int freq() throws IOException { throw new UnsupportedOperationException(); }
-
- @Override
- public int docID() { throw new UnsupportedOperationException(); }
-
- @Override
- public int nextDoc() throws IOException { throw new UnsupportedOperationException(); }
-
- @Override
- public int advance(int target) throws IOException { throw new UnsupportedOperationException(); }
-
- @Override
- public long cost() { throw new UnsupportedOperationException(); }
- };
+ static final Scorer FAKESCORER = new FakeScorer();
}
diff --git a/lucene/misc/src/java/org/apache/lucene/index/SortingLeafReader.java b/lucene/misc/src/java/org/apache/lucene/index/SortingLeafReader.java
index ee6140b..4328900 100644
--- a/lucene/misc/src/java/org/apache/lucene/index/SortingLeafReader.java
+++ b/lucene/misc/src/java/org/apache/lucene/index/SortingLeafReader.java
@@ -20,21 +20,6 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.util.Arrays;
-import org.apache.lucene.index.FilterLeafReader;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
-import org.apache.lucene.index.NumericDocValues;
-import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.index.SortedNumericDocValues;
-import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.index.StoredFieldVisitor;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.index.Sorter.DocMap;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Sort;
@@ -52,7 +37,7 @@ import org.apache.lucene.util.automaton.CompiledAutomaton;
/**
* An {@link org.apache.lucene.index.LeafReader} which supports sorting documents by a given
* {@link Sort}. You can use this class to sort an index as follows:
- *
+ *
*
* IndexWriter writer; // writer to which the sorted index will be added
* DirectoryReader reader; // reader on the input index
@@ -62,7 +47,7 @@ import org.apache.lucene.util.automaton.CompiledAutomaton;
* writer.close();
* reader.close();
*
- *
+ *
* @lucene.experimental
*/
public class SortingLeafReader extends FilterLeafReader {
@@ -94,7 +79,7 @@ public class SortingLeafReader extends FilterLeafReader {
private final Sorter.DocMap docMap;
private final IndexOptions indexOptions;
-
+
public SortingTerms(final Terms in, IndexOptions indexOptions, final Sorter.DocMap docMap) {
super(in);
this.docMap = docMap;
@@ -118,7 +103,7 @@ public class SortingLeafReader extends FilterLeafReader {
final Sorter.DocMap docMap; // pkg-protected to avoid synthetic accessor methods
private final IndexOptions indexOptions;
-
+
public SortingTermsEnum(final TermsEnum in, Sorter.DocMap docMap, IndexOptions indexOptions) {
super(in);
this.docMap = docMap;
@@ -146,6 +131,33 @@ public class SortingLeafReader extends FilterLeafReader {
@Override
public DocsEnum docs(Bits liveDocs, DocsEnum reuse, final int flags) throws IOException {
+
+ if (DocsEnum.requiresPositions(flags)) {
+ final DocsEnum inReuse;
+ final SortingDocsAndPositionsEnum wrapReuse;
+ if (reuse != null && reuse instanceof SortingDocsAndPositionsEnum) {
+ // if we're asked to reuse the given DocsEnum and it is Sorting, return
+ // the wrapped one, since some Codecs expect it.
+ wrapReuse = (SortingDocsAndPositionsEnum) reuse;
+ inReuse = wrapReuse.getWrapped();
+ } else {
+ wrapReuse = null;
+ inReuse = reuse;
+ }
+
+ final DocsEnum inDocsAndPositions = in.docs(newToOld(liveDocs), inReuse, flags);
+ if (inDocsAndPositions == null) {
+ return null;
+ }
+
+ // we ignore the fact that offsets may be stored but not asked for,
+ // since this code is expected to be used during addIndexes which will
+ // ask for everything. if that assumption changes in the future, we can
+ // factor in whether 'flags' says offsets are not required.
+ final boolean storeOffsets = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
+ return new SortingDocsAndPositionsEnum(docMap.size(), wrapReuse, inDocsAndPositions, docMap, storeOffsets);
+ }
+
final DocsEnum inReuse;
final SortingDocsEnum wrapReuse;
if (reuse != null && reuse instanceof SortingDocsEnum) {
@@ -163,40 +175,13 @@ public class SortingLeafReader extends FilterLeafReader {
return new SortingDocsEnum(docMap.size(), wrapReuse, inDocs, withFreqs, docMap);
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, final int flags) throws IOException {
- final DocsAndPositionsEnum inReuse;
- final SortingDocsAndPositionsEnum wrapReuse;
- if (reuse != null && reuse instanceof SortingDocsAndPositionsEnum) {
- // if we're asked to reuse the given DocsEnum and it is Sorting, return
- // the wrapped one, since some Codecs expect it.
- wrapReuse = (SortingDocsAndPositionsEnum) reuse;
- inReuse = wrapReuse.getWrapped();
- } else {
- wrapReuse = null;
- inReuse = reuse;
- }
-
- final DocsAndPositionsEnum inDocsAndPositions = in.docsAndPositions(newToOld(liveDocs), inReuse, flags);
- if (inDocsAndPositions == null) {
- return null;
- }
-
- // we ignore the fact that offsets may be stored but not asked for,
- // since this code is expected to be used during addIndexes which will
- // ask for everything. if that assumption changes in the future, we can
- // factor in whether 'flags' says offsets are not required.
- final boolean storeOffsets = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
- return new SortingDocsAndPositionsEnum(docMap.size(), wrapReuse, inDocsAndPositions, docMap, storeOffsets);
- }
-
}
private static class SortingBinaryDocValues extends BinaryDocValues {
-
+
private final BinaryDocValues in;
private final Sorter.DocMap docMap;
-
+
SortingBinaryDocValues(BinaryDocValues in, Sorter.DocMap docMap) {
this.in = in;
this.docMap = docMap;
@@ -207,7 +192,7 @@ public class SortingLeafReader extends FilterLeafReader {
return in.get(docMap.newToOld(docID));
}
}
-
+
private static class SortingNumericDocValues extends NumericDocValues {
private final NumericDocValues in;
@@ -223,33 +208,33 @@ public class SortingLeafReader extends FilterLeafReader {
return in.get(docMap.newToOld(docID));
}
}
-
+
private static class SortingSortedNumericDocValues extends SortedNumericDocValues {
-
+
private final SortedNumericDocValues in;
private final Sorter.DocMap docMap;
-
+
SortingSortedNumericDocValues(SortedNumericDocValues in, DocMap docMap) {
this.in = in;
this.docMap = docMap;
}
-
+
@Override
public int count() {
return in.count();
}
-
+
@Override
public void setDocument(int doc) {
in.setDocument(docMap.newToOld(doc));
}
-
+
@Override
public long valueAt(int index) {
return in.valueAt(index);
}
}
-
+
private static class SortingBits implements Bits {
private final Bits in;
@@ -270,12 +255,12 @@ public class SortingLeafReader extends FilterLeafReader {
return in.length();
}
}
-
+
private static class SortingSortedDocValues extends SortedDocValues {
-
+
private final SortedDocValues in;
private final Sorter.DocMap docMap;
-
+
SortingSortedDocValues(SortedDocValues in, Sorter.DocMap docMap) {
this.in = in;
this.docMap = docMap;
@@ -306,12 +291,12 @@ public class SortingLeafReader extends FilterLeafReader {
return in.lookupTerm(key);
}
}
-
+
private static class SortingSortedSetDocValues extends SortedSetDocValues {
-
+
private final SortedSetDocValues in;
private final Sorter.DocMap docMap;
-
+
SortingSortedSetDocValues(SortedSetDocValues in, Sorter.DocMap docMap) {
this.in = in;
this.docMap = docMap;
@@ -344,14 +329,14 @@ public class SortingLeafReader extends FilterLeafReader {
}
static class SortingDocsEnum extends FilterDocsEnum {
-
+
private static final class DocFreqSorter extends TimSorter {
-
+
private int[] docs;
private int[] freqs;
private final int[] tmpDocs;
private int[] tmpFreqs;
-
+
public DocFreqSorter(int maxDoc) {
super(maxDoc / 64);
this.tmpDocs = new int[maxDoc / 64];
@@ -369,13 +354,13 @@ public class SortingLeafReader extends FilterLeafReader {
protected int compare(int i, int j) {
return docs[i] - docs[j];
}
-
+
@Override
protected void swap(int i, int j) {
int tmpDoc = docs[i];
docs[i] = docs[j];
docs[j] = tmpDoc;
-
+
if (freqs != null) {
int tmpFreq = freqs[i];
freqs[i] = freqs[j];
@@ -483,43 +468,43 @@ public class SortingLeafReader extends FilterLeafReader {
// don't bother to implement efficiently for now.
return slowAdvance(target);
}
-
+
@Override
public int docID() {
return docIt < 0 ? -1 : docIt >= upto ? NO_MORE_DOCS : docs[docIt];
}
-
+
@Override
public int freq() throws IOException {
return withFreqs && docIt < upto ? freqs[docIt] : 1;
}
-
+
@Override
public int nextDoc() throws IOException {
if (++docIt >= upto) return NO_MORE_DOCS;
return docs[docIt];
}
-
+
/** Returns the wrapped {@link DocsEnum}. */
DocsEnum getWrapped() {
return in;
}
}
-
- static class SortingDocsAndPositionsEnum extends FilterDocsAndPositionsEnum {
-
+
+ static class SortingDocsAndPositionsEnum extends FilterDocsEnum {
+
/**
* A {@link TimSorter} which sorts two parallel arrays of doc IDs and
* offsets in one go. Everytime a doc ID is 'swapped', its corresponding offset
* is swapped too.
*/
private static final class DocOffsetSorter extends TimSorter {
-
+
private int[] docs;
private long[] offsets;
private final int[] tmpDocs;
private final long[] tmpOffsets;
-
+
public DocOffsetSorter(int maxDoc) {
super(maxDoc / 64);
this.tmpDocs = new int[maxDoc / 64];
@@ -535,13 +520,13 @@ public class SortingLeafReader extends FilterLeafReader {
protected int compare(int i, int j) {
return docs[i] - docs[j];
}
-
+
@Override
protected void swap(int i, int j) {
int tmpDoc = docs[i];
docs[i] = docs[j];
docs[j] = tmpDoc;
-
+
long tmpOffset = offsets[i];
offsets[i] = offsets[j];
offsets[j] = tmpOffset;
@@ -570,16 +555,16 @@ public class SortingLeafReader extends FilterLeafReader {
return tmpDocs[i] - docs[j];
}
}
-
+
private final int maxDoc;
private final DocOffsetSorter sorter;
private int[] docs;
private long[] offsets;
private final int upto;
-
+
private final IndexInput postingInput;
private final boolean storeOffsets;
-
+
private int docIt = -1;
private int pos;
private int startOffset = -1;
@@ -589,7 +574,7 @@ public class SortingLeafReader extends FilterLeafReader {
private final RAMFile file;
- SortingDocsAndPositionsEnum(int maxDoc, SortingDocsAndPositionsEnum reuse, final DocsAndPositionsEnum in, Sorter.DocMap docMap, boolean storeOffsets) throws IOException {
+ SortingDocsAndPositionsEnum(int maxDoc, SortingDocsAndPositionsEnum reuse, final DocsEnum in, Sorter.DocMap docMap, boolean storeOffsets) throws IOException {
super(in);
this.maxDoc = maxDoc;
this.storeOffsets = storeOffsets;
@@ -632,14 +617,14 @@ public class SortingLeafReader extends FilterLeafReader {
}
// for testing
- boolean reused(DocsAndPositionsEnum other) {
+ boolean reused(DocsEnum other) {
if (other == null || !(other instanceof SortingDocsAndPositionsEnum)) {
return false;
}
return docs == ((SortingDocsAndPositionsEnum) other).docs;
}
- private void addPositions(final DocsAndPositionsEnum in, final IndexOutput out) throws IOException {
+ private void addPositions(final DocsEnum in, final IndexOutput out) throws IOException {
int freq = in.freq();
out.writeVInt(freq);
int previousPosition = 0;
@@ -648,7 +633,7 @@ public class SortingLeafReader extends FilterLeafReader {
final int pos = in.nextPosition();
final BytesRef payload = in.getPayload();
// The low-order bit of token is set only if there is a payload, the
- // previous bits are the delta-encoded position.
+ // previous bits are the delta-encoded position.
final int token = (pos - previousPosition) << 1 | (payload == null ? 0 : 1);
out.writeVInt(token);
previousPosition = pos;
@@ -665,34 +650,34 @@ public class SortingLeafReader extends FilterLeafReader {
}
}
}
-
+
@Override
public int advance(final int target) throws IOException {
// need to support it for checkIndex, but in practice it won't be called, so
// don't bother to implement efficiently for now.
return slowAdvance(target);
}
-
+
@Override
public int docID() {
return docIt < 0 ? -1 : docIt >= upto ? NO_MORE_DOCS : docs[docIt];
}
-
+
@Override
public int endOffset() throws IOException {
return endOffset;
}
-
+
@Override
public int freq() throws IOException {
return currFreq;
}
-
+
@Override
public BytesRef getPayload() throws IOException {
return payload.length == 0 ? null : payload;
}
-
+
@Override
public int nextDoc() throws IOException {
if (++docIt >= upto) return DocIdSetIterator.NO_MORE_DOCS;
@@ -703,7 +688,7 @@ public class SortingLeafReader extends FilterLeafReader {
endOffset = 0;
return docs[docIt];
}
-
+
@Override
public int nextPosition() throws IOException {
final int token = postingInput.readVInt();
@@ -724,14 +709,14 @@ public class SortingLeafReader extends FilterLeafReader {
}
return pos;
}
-
+
@Override
public int startOffset() throws IOException {
return startOffset;
}
- /** Returns the wrapped {@link DocsAndPositionsEnum}. */
- DocsAndPositionsEnum getWrapped() {
+ /** Returns the wrapped {@link DocsEnum}. */
+ DocsEnum getWrapped() {
return in;
}
}
@@ -767,12 +752,12 @@ public class SortingLeafReader extends FilterLeafReader {
public void document(final int docID, final StoredFieldVisitor visitor) throws IOException {
in.document(docMap.newToOld(docID), visitor);
}
-
+
@Override
public Fields fields() throws IOException {
return new SortingFields(in.fields(), in.getFieldInfos(), docMap);
}
-
+
@Override
public BinaryDocValues getBinaryDocValues(String field) throws IOException {
BinaryDocValues oldDocValues = in.getBinaryDocValues(field);
@@ -782,7 +767,7 @@ public class SortingLeafReader extends FilterLeafReader {
return new SortingBinaryDocValues(oldDocValues, docMap);
}
}
-
+
@Override
public Bits getLiveDocs() {
final Bits inLiveDocs = in.getLiveDocs();
@@ -792,7 +777,7 @@ public class SortingLeafReader extends FilterLeafReader {
return new SortingBits(inLiveDocs, docMap);
}
}
-
+
@Override
public NumericDocValues getNormValues(String field) throws IOException {
final NumericDocValues norm = in.getNormValues(field);
@@ -809,7 +794,7 @@ public class SortingLeafReader extends FilterLeafReader {
if (oldDocValues == null) return null;
return new SortingNumericDocValues(oldDocValues, docMap);
}
-
+
@Override
public SortedNumericDocValues getSortedNumericDocValues(String field)
throws IOException {
@@ -830,7 +815,7 @@ public class SortingLeafReader extends FilterLeafReader {
return new SortingSortedDocValues(sortedDV, docMap);
}
}
-
+
@Override
public SortedSetDocValues getSortedSetDocValues(String field) throws IOException {
SortedSetDocValues sortedSetDV = in.getSortedSetDocValues(field);
@@ -838,7 +823,7 @@ public class SortingLeafReader extends FilterLeafReader {
return null;
} else {
return new SortingSortedSetDocValues(sortedSetDV, docMap);
- }
+ }
}
@Override
diff --git a/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java b/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java
index 6b6eb8f..0826deb 100644
--- a/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java
+++ b/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java
@@ -25,7 +25,6 @@ import java.util.List;
import org.apache.lucene.codecs.PostingsFormat; // javadocs
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.LeafReader;
@@ -617,11 +616,6 @@ public class DocTermOrds implements Accountable {
return termsEnum.docs(liveDocs, reuse, flags);
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- return termsEnum.docsAndPositions(liveDocs, reuse, flags);
- }
-
@Override
public BytesRef term() {
return term;
diff --git a/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java b/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java
index 30b0be7..a43a7ed 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java
@@ -31,8 +31,8 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.document.BinaryDocValuesField;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedDocValuesField;
@@ -40,27 +40,8 @@ import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.FieldInvertState;
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.NumericDocValues;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.SlowCompositeReaderWrapper;
-import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.index.SortedNumericDocValues;
-import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum.SeekStatus;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.index.SortingLeafReader.SortingDocsAndPositionsEnum;
import org.apache.lucene.index.SortingLeafReader.SortingDocsEnum;
+import org.apache.lucene.index.TermsEnum.SeekStatus;
import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.TermStatistics;
@@ -254,7 +235,7 @@ public abstract class SorterTestBase extends LuceneTestCase {
public void testDocsAndPositionsEnum() throws Exception {
TermsEnum termsEnum = sortedReader.terms(DOC_POSITIONS_FIELD).iterator(null);
assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef(DOC_POSITIONS_TERM)));
- DocsAndPositionsEnum sortedPositions = termsEnum.docsAndPositions(null, null);
+ DocsEnum sortedPositions = termsEnum.docs(null, null, DocsEnum.FLAG_ALL);
int doc;
// test nextDoc()
@@ -270,10 +251,10 @@ public abstract class SorterTestBase extends LuceneTestCase {
}
// test advance()
- final DocsAndPositionsEnum reuse = sortedPositions;
- sortedPositions = termsEnum.docsAndPositions(null, reuse);
- if (sortedPositions instanceof SortingDocsAndPositionsEnum) {
- assertTrue(((SortingDocsAndPositionsEnum) sortedPositions).reused(reuse)); // make sure reuse worked
+ final DocsEnum reuse = sortedPositions;
+ sortedPositions = termsEnum.docs(null, reuse, DocsEnum.FLAG_ALL);
+ if (sortedPositions instanceof SortingDocsEnum) {
+ assertTrue(((SortingDocsEnum) sortedPositions).reused(reuse)); // make sure reuse worked
}
doc = 0;
while ((doc = sortedPositions.advance(doc + TestUtil.nextInt(random(), 1, 5))) != DocIdSetIterator.NO_MORE_DOCS) {
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
index 3aae0cc..00cc97c 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
@@ -18,23 +18,24 @@ package org.apache.lucene.queries;
*/
import java.io.IOException;
+import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Set;
-import java.util.Arrays;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.function.FunctionQuery;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.ToStringUtils;
/**
@@ -234,14 +235,14 @@ public class CustomScoreQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
- Scorer subQueryScorer = subQueryWeight.scorer(context, acceptDocs);
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
+ Scorer subQueryScorer = subQueryWeight.scorer(context, flags, acceptDocs);
if (subQueryScorer == null) {
return null;
}
Scorer[] valSrcScorers = new Scorer[valSrcWeights.length];
for(int i = 0; i < valSrcScorers.length; i++) {
- valSrcScorers[i] = valSrcWeights[i].scorer(context, acceptDocs);
+ valSrcScorers[i] = valSrcWeights[i].scorer(context, flags, acceptDocs);
}
return new CustomScorer(CustomScoreQuery.this.getCustomScoreProvider(context), this, queryWeight, subQueryScorer, valSrcScorers);
}
@@ -286,6 +287,8 @@ public class CustomScoreQuery extends Query {
private final CustomScoreProvider provider;
private final float[] vScores; // reused in score() to avoid allocating this array for each doc
+ // TODO : can we use FilterScorer here instead?
+
// constructor
private CustomScorer(CustomScoreProvider provider, CustomWeight w, float qWeight,
Scorer subQueryScorer, Scorer[] valSrcScorers) {
@@ -328,6 +331,36 @@ public class CustomScoreQuery extends Query {
}
@Override
+ public int nextPosition() throws IOException {
+ return subQueryScorer.nextPosition();
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return subQueryScorer.startPosition();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return subQueryScorer.endPosition();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return subQueryScorer.startOffset();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return subQueryScorer.endOffset();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return subQueryScorer.getPayload();
+ }
+
+ @Override
public Collection getChildren() {
return Collections.singleton(new ChildScorer(subQueryScorer, "CUSTOM"));
}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java
index 90bb2e0..cb46dec 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java
@@ -17,18 +17,24 @@ package org.apache.lucene.queries.function;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.search.*;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.ToStringUtils;
-
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
-import java.util.Set;
import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.FilterScorer;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ToStringUtils;
/**
* Query that is boosted by a ValueSource
@@ -97,8 +103,8 @@ public class BoostedQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
- Scorer subQueryScorer = qWeight.scorer(context, acceptDocs);
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
+ Scorer subQueryScorer = qWeight.scorer(context, flags, acceptDocs);
if (subQueryScorer == null) {
return null;
}
@@ -122,41 +128,24 @@ public class BoostedQuery extends Query {
}
- private class CustomScorer extends Scorer {
+ private class CustomScorer extends FilterScorer {
private final BoostedQuery.BoostedWeight weight;
private final float qWeight;
- private final Scorer scorer;
private final FunctionValues vals;
private final LeafReaderContext readerContext;
private CustomScorer(LeafReaderContext readerContext, BoostedQuery.BoostedWeight w, float qWeight,
Scorer scorer, ValueSource vs) throws IOException {
- super(w);
+ super(scorer);
this.weight = w;
this.qWeight = qWeight;
- this.scorer = scorer;
this.readerContext = readerContext;
this.vals = vs.getValues(weight.fcontext, readerContext);
}
- @Override
- public int docID() {
- return scorer.docID();
- }
-
- @Override
- public int advance(int target) throws IOException {
- return scorer.advance(target);
- }
-
- @Override
- public int nextDoc() throws IOException {
- return scorer.nextDoc();
- }
-
@Override
public float score() throws IOException {
- float score = qWeight * scorer.score() * vals.floatVal(scorer.docID());
+ float score = qWeight * in.score() * vals.floatVal(in.docID());
// Current Lucene priority queues can't handle NaN and -Infinity, so
// map to -Float.MAX_VALUE. This conditional handles both -infinity
@@ -165,13 +154,8 @@ public class BoostedQuery extends Query {
}
@Override
- public int freq() throws IOException {
- return scorer.freq();
- }
-
- @Override
public Collection getChildren() {
- return Collections.singleton(new ChildScorer(scorer, "CUSTOM"));
+ return Collections.singleton(new ChildScorer(in, "CUSTOM"));
}
public Explanation explain(int doc) throws IOException {
@@ -187,10 +171,6 @@ public class BoostedQuery extends Query {
return res;
}
- @Override
- public long cost() {
- return scorer.cost();
- }
}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java
index 4abc312..d58431b 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java
@@ -17,15 +17,22 @@ package org.apache.lucene.queries.function;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
-
-import java.io.IOException;
-import java.util.Set;
-import java.util.Map;
+import org.apache.lucene.util.BytesRef;
/**
@@ -89,13 +96,13 @@ public class FunctionQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
return new AllScorer(context, acceptDocs, this, queryWeight);
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- return ((AllScorer)scorer(context, context.reader().getLiveDocs())).explain(doc);
+ return ((AllScorer)scorer(context, DocsEnum.FLAG_FREQS, context.reader().getLiveDocs())).explain(doc);
}
}
@@ -166,6 +173,36 @@ public class FunctionQuery extends Query {
return 1;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
public Explanation explain(int doc) throws IOException {
float sc = qWeight * vals.floatVal(doc);
@@ -177,6 +214,7 @@ public class FunctionQuery extends Query {
result.addDetail(new Explanation(weight.queryNorm,"queryNorm"));
return result;
}
+
}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java b/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java
index ec8aced..4a54151 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java
@@ -17,12 +17,13 @@ package org.apache.lucene.queries.function;
* limitations under the License.
*/
+import java.io.IOException;
+
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.util.Bits;
-
-import java.io.IOException;
+import org.apache.lucene.util.BytesRef;
/**
* {@link Scorer} which returns the result of {@link FunctionValues#floatVal(int)} as
@@ -93,6 +94,36 @@ public class ValueSourceScorer extends Scorer {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public long cost() {
return maxDoc;
}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java
index 3d57315..957a8b7 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java
@@ -17,12 +17,16 @@
package org.apache.lucene.queries.function.valuesource;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.docvalues.FloatDocValues;
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.mutable.MutableValue;
import org.apache.lucene.util.mutable.MutableValueFloat;
@@ -123,7 +127,7 @@ class QueryDocValues extends FloatDocValues {
try {
if (doc < lastDocRequested) {
if (noMatches) return defVal;
- scorer = weight.scorer(readerContext, acceptDocs);
+ scorer = weight.scorer(readerContext, DocsEnum.FLAG_FREQS, acceptDocs);
if (scorer==null) {
noMatches = true;
return defVal;
@@ -154,7 +158,7 @@ class QueryDocValues extends FloatDocValues {
try {
if (doc < lastDocRequested) {
if (noMatches) return false;
- scorer = weight.scorer(readerContext, acceptDocs);
+ scorer = weight.scorer(readerContext, DocsEnum.FLAG_FREQS, acceptDocs);
scorerDoc = -1;
if (scorer==null) {
noMatches = true;
@@ -212,7 +216,7 @@ class QueryDocValues extends FloatDocValues {
mval.exists = false;
return;
}
- scorer = weight.scorer(readerContext, acceptDocs);
+ scorer = weight.scorer(readerContext, DocsEnum.FLAG_FREQS, acceptDocs);
scorerDoc = -1;
if (scorer==null) {
noMatches = true;
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
index 4d73d55..872cc74 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
@@ -17,7 +17,14 @@ package org.apache.lucene.queries.function.valuesource;
* limitations under the License.
*/
-import org.apache.lucene.index.*;
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.docvalues.FloatDocValues;
import org.apache.lucene.search.DocIdSetIterator;
@@ -25,9 +32,6 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.similarities.TFIDFSimilarity;
import org.apache.lucene.util.BytesRef;
-import java.io.IOException;
-import java.util.Map;
-
/**
* Function that returns {@link TFIDFSimilarity#tf(float)}
* for every document.
@@ -84,6 +88,36 @@ public class TFValueSource extends TermFreqValueSource {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int docID() {
return DocIdSetIterator.NO_MORE_DOCS;
}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java
index b5e4bc2..e489918 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java
@@ -17,15 +17,19 @@
package org.apache.lucene.queries.function.valuesource;
-import org.apache.lucene.index.*;
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.docvalues.IntDocValues;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.BytesRef;
-import java.io.IOException;
-import java.util.Map;
-
/**
* Function that returns {@link DocsEnum#freq()} for the
* supplied term in every document.
@@ -77,6 +81,36 @@ public class TermFreqValueSource extends DocFreqValueSource {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
public int docID() {
return DocIdSetIterator.NO_MORE_DOCS;
}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsReader.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsReader.java
index b716acc..dc8bdff 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsReader.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsReader.java
@@ -22,7 +22,6 @@ import java.io.IOException;
import org.apache.lucene.codecs.BlockTermState;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.PostingsReaderBase;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.SegmentReadState;
@@ -67,6 +66,19 @@ final class IDVersionPostingsReader extends PostingsReaderBase {
public DocsEnum docs(FieldInfo fieldInfo, BlockTermState termState, Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
SingleDocsEnum docsEnum;
+ if (DocsEnum.requiresPositions(flags)) {
+ SingleDocsAndPositionsEnum posEnum;
+
+ if (reuse instanceof SingleDocsAndPositionsEnum) {
+ posEnum = (SingleDocsAndPositionsEnum) reuse;
+ } else {
+ posEnum = new SingleDocsAndPositionsEnum();
+ }
+ IDVersionTermState _termState = (IDVersionTermState) termState;
+ posEnum.reset(_termState.docID, _termState.idVersion, liveDocs);
+ return posEnum;
+ }
+
if (reuse instanceof SingleDocsEnum) {
docsEnum = (SingleDocsEnum) reuse;
} else {
@@ -78,21 +90,6 @@ final class IDVersionPostingsReader extends PostingsReaderBase {
}
@Override
- public DocsAndPositionsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState _termState, Bits liveDocs,
- DocsAndPositionsEnum reuse, int flags) {
- SingleDocsAndPositionsEnum posEnum;
-
- if (reuse instanceof SingleDocsAndPositionsEnum) {
- posEnum = (SingleDocsAndPositionsEnum) reuse;
- } else {
- posEnum = new SingleDocsAndPositionsEnum();
- }
- IDVersionTermState termState = (IDVersionTermState) _termState;
- posEnum.reset(termState.docID, termState.idVersion, liveDocs);
- return posEnum;
- }
-
- @Override
public long ramBytesUsed() {
return 0;
}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionSegmentTermsEnum.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionSegmentTermsEnum.java
index 481e74d..4491d5a 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionSegmentTermsEnum.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionSegmentTermsEnum.java
@@ -21,9 +21,7 @@ import java.io.IOException;
import java.io.PrintStream;
import org.apache.lucene.codecs.BlockTermState;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.ByteArrayDataInput;
@@ -1010,18 +1008,6 @@ public final class IDVersionSegmentTermsEnum extends TermsEnum {
}
@Override
- public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (fr.fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- // Positions were not indexed:
- return null;
- }
-
- assert !eof;
- currentFrame.decodeMetaData();
- return fr.parent.postingsReader.docsAndPositions(fr.fieldInfo, currentFrame.state, skipDocs, reuse, flags);
- }
-
- @Override
public void seekExact(BytesRef target, TermState otherState) {
// if (DEBUG) {
// System.out.println("BTTR.seekExact termState seg=" + segment + " target=" + target.utf8ToString() + " " + target + " state=" + otherState);
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsAndPositionsEnum.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsAndPositionsEnum.java
index eecc700..d34104b 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsAndPositionsEnum.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsAndPositionsEnum.java
@@ -17,11 +17,13 @@ package org.apache.lucene.codecs.idversion;
* limitations under the License.
*/
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import java.io.IOException;
+
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
-class SingleDocsAndPositionsEnum extends DocsAndPositionsEnum {
+class SingleDocsAndPositionsEnum extends DocsEnum {
private int doc;
private int pos;
private int singleDocID;
@@ -89,6 +91,16 @@ class SingleDocsAndPositionsEnum extends DocsAndPositionsEnum {
}
@Override
+ public int startPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
public BytesRef getPayload() {
return payload;
}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsEnum.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsEnum.java
index b29619c..1031aee 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsEnum.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsEnum.java
@@ -17,8 +17,11 @@ package org.apache.lucene.codecs.idversion;
* limitations under the License.
*/
+import java.io.IOException;
+
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
class SingleDocsEnum extends DocsEnum {
@@ -68,4 +71,34 @@ class SingleDocsEnum extends DocsEnum {
public int freq() {
return 1;
}
+
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonQuery.java b/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonQuery.java
index 7120dc2..a11233c 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonQuery.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonQuery.java
@@ -24,7 +24,7 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
@@ -325,7 +325,7 @@ public class TermAutomatonQuery extends Query {
static class EnumAndScorer {
public final int termID;
- public final DocsAndPositionsEnum posEnum;
+ public final DocsEnum posEnum;
// How many positions left in the current document:
public int posLeft;
@@ -333,7 +333,7 @@ public class TermAutomatonQuery extends Query {
// Current position
public int pos;
- public EnumAndScorer(int termID, DocsAndPositionsEnum posEnum) {
+ public EnumAndScorer(int termID, DocsEnum posEnum) {
this.termID = termID;
this.posEnum = posEnum;
}
@@ -385,7 +385,7 @@ public class TermAutomatonQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
// Initialize the enums; null for a given slot means that term didn't appear in this reader
EnumAndScorer[] enums = new EnumAndScorer[idToTerm.size()];
@@ -399,8 +399,7 @@ public class TermAutomatonQuery extends Query {
TermsEnum termsEnum = context.reader().terms(field).iterator(null);
termsEnum.seekExact(term, state);
- enums[ent.getKey()] = new EnumAndScorer(ent.getKey(),
- termsEnum.docsAndPositions(acceptDocs, null, 0));
+ enums[ent.getKey()] = new EnumAndScorer(ent.getKey(), termsEnum.docs(acceptDocs, null, DocsEnum.FLAG_POSITIONS));
}
}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonScorer.java b/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonScorer.java
index 106c307..e30efd7 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonScorer.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonScorer.java
@@ -326,6 +326,36 @@ class TermAutomatonScorer extends Scorer {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1; // TODO can we get positional information out of this Scorer?
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int docID() {
return docID;
}
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
index 7c05665..8886c15 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
@@ -29,7 +29,7 @@ import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.MultiDocValues;
import org.apache.lucene.index.Terms;
@@ -263,7 +263,7 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester {
if (matchedTokens.contains(docTerm) || docTerm.startsWith(prefixToken)) {
- DocsAndPositionsEnum docPosEnum = it.docsAndPositions(null, null, DocsAndPositionsEnum.FLAG_OFFSETS);
+ DocsEnum docPosEnum = it.docs(null, null, DocsEnum.FLAG_OFFSETS);
docPosEnum.nextDoc();
// use the first occurrence of the term
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
index d08a72f..8a4d24c 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
@@ -25,7 +25,6 @@ import org.apache.lucene.codecs.FieldsConsumer;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.index.AssertingLeafReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.Fields;
@@ -161,7 +160,6 @@ public final class AssertingPostingsFormat extends PostingsFormat {
termsEnum = terms.iterator(termsEnum);
BytesRefBuilder lastTerm = null;
DocsEnum docsEnum = null;
- DocsAndPositionsEnum posEnum = null;
boolean hasFreqs = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
boolean hasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
@@ -191,14 +189,14 @@ public final class AssertingPostingsFormat extends PostingsFormat {
}
docsEnum = termsEnum.docs(null, docsEnum, flags);
} else {
+ flags = DocsEnum.FLAG_POSITIONS;
if (hasPayloads) {
- flags |= DocsAndPositionsEnum.FLAG_PAYLOADS;
+ flags |= DocsEnum.FLAG_PAYLOADS;
}
if (hasOffsets) {
- flags = flags | DocsAndPositionsEnum.FLAG_OFFSETS;
+ flags = flags | DocsEnum.FLAG_OFFSETS;
}
- posEnum = termsEnum.docsAndPositions(null, posEnum, flags);
- docsEnum = posEnum;
+ docsEnum = termsEnum.docs(null, docsEnum, flags);
}
assert docsEnum != null : "termsEnum=" + termsEnum + " hasPositions=" + hasPositions;
@@ -220,13 +218,13 @@ public final class AssertingPostingsFormat extends PostingsFormat {
int lastPos = -1;
int lastStartOffset = -1;
for(int i=0;i= lastPos: "pos=" + pos + " vs lastPos=" + lastPos + " i=" + i + " freq=" + freq;
lastPos = pos;
if (hasOffsets) {
- int startOffset = posEnum.startOffset();
- int endOffset = posEnum.endOffset();
+ int startOffset = docsEnum.startOffset();
+ int endOffset = docsEnum.endOffset();
assert endOffset >= startOffset;
assert startOffset >= lastStartOffset;
lastStartOffset = startOffset;
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
index aeb3521..44f4362 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
@@ -34,7 +34,6 @@ import org.apache.lucene.codecs.FieldsConsumer;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.codecs.TermStats;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.Fields;
@@ -253,7 +252,6 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat {
long sumTotalTermFreq = 0;
long sumDocFreq = 0;
DocsEnum docsEnum = null;
- DocsAndPositionsEnum posEnum = null;
int enumFlags;
IndexOptions indexOptions = fieldInfo.getIndexOptions();
@@ -268,15 +266,15 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat {
enumFlags = DocsEnum.FLAG_FREQS;
} else if (writeOffsets == false) {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS;
+ enumFlags = DocsEnum.FLAG_PAYLOADS;
} else {
enumFlags = 0;
}
} else {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS | DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = DocsEnum.FLAG_PAYLOADS | DocsEnum.FLAG_OFFSETS;
} else {
- enumFlags = DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = DocsEnum.FLAG_OFFSETS;
}
}
@@ -286,14 +284,7 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat {
break;
}
RAMPostingsWriterImpl postingsWriter = termsConsumer.startTerm(term);
-
- if (writePositions) {
- posEnum = termsEnum.docsAndPositions(null, posEnum, enumFlags);
- docsEnum = posEnum;
- } else {
- docsEnum = termsEnum.docs(null, docsEnum, enumFlags);
- posEnum = null;
- }
+ docsEnum = termsEnum.docs(null, docsEnum, enumFlags);
int docFreq = 0;
long totalTermFreq = 0;
@@ -316,13 +307,13 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat {
postingsWriter.startDoc(docID, freq);
if (writePositions) {
for (int i=0;i doc : "backwards nextDoc from " + doc + " to " + nextDoc + " " + in;
if (nextDoc == DocIdSetIterator.NO_MORE_DOCS) {
state = DocsEnumState.FINISHED;
- } else {
- state = DocsEnumState.ITERATING;
- }
- assert super.docID() == nextDoc;
- return doc = nextDoc;
- }
-
- @Override
- public int advance(int target) throws IOException {
- assertThread("Docs enums", creationThread);
- assert state != DocsEnumState.FINISHED : "advance() called after NO_MORE_DOCS";
- assert target > doc : "target must be > docID(), got " + target + " <= " + doc;
- int advanced = super.advance(target);
- assert advanced >= target : "backwards advance from: " + target + " to: " + advanced;
- if (advanced == DocIdSetIterator.NO_MORE_DOCS) {
- state = DocsEnumState.FINISHED;
- } else {
- state = DocsEnumState.ITERATING;
- }
- assert super.docID() == advanced;
- return doc = advanced;
- }
-
- @Override
- public int docID() {
- assertThread("Docs enums", creationThread);
- assert doc == super.docID() : " invalid docID() in " + in.getClass() + " " + super.docID() + " instead of " + doc;
- return doc;
- }
-
- @Override
- public int freq() throws IOException {
- assertThread("Docs enums", creationThread);
- assert state != DocsEnumState.START : "freq() called before nextDoc()/advance()";
- assert state != DocsEnumState.FINISHED : "freq() called after NO_MORE_DOCS";
- int freq = super.freq();
- assert freq > 0;
- return freq;
- }
- }
-
- static class AssertingDocsAndPositionsEnum extends FilterDocsAndPositionsEnum {
- private final Thread creationThread = Thread.currentThread();
- private DocsEnumState state = DocsEnumState.START;
- private int positionMax = 0;
- private int positionCount = 0;
- private int doc;
-
- public AssertingDocsAndPositionsEnum(DocsAndPositionsEnum in) {
- super(in);
- int docid = in.docID();
- assert docid == -1 : "invalid initial doc id: " + docid;
- doc = -1;
- }
-
- @Override
- public int nextDoc() throws IOException {
- assertThread("Docs enums", creationThread);
- assert state != DocsEnumState.FINISHED : "nextDoc() called after NO_MORE_DOCS";
- int nextDoc = super.nextDoc();
- assert nextDoc > doc : "backwards nextDoc from " + doc + " to " + nextDoc;
- positionCount = 0;
- if (nextDoc == DocIdSetIterator.NO_MORE_DOCS) {
- state = DocsEnumState.FINISHED;
positionMax = 0;
} else {
state = DocsEnumState.ITERATING;
positionMax = super.freq();
}
+ positionCount = 0;
assert super.docID() == nextDoc;
return doc = nextDoc;
}
@@ -372,7 +297,6 @@ public class AssertingLeafReader extends FilterLeafReader {
assert target > doc : "target must be > docID(), got " + target + " <= " + doc;
int advanced = super.advance(target);
assert advanced >= target : "backwards advance from: " + target + " to: " + advanced;
- positionCount = 0;
if (advanced == DocIdSetIterator.NO_MORE_DOCS) {
state = DocsEnumState.FINISHED;
positionMax = 0;
@@ -380,6 +304,7 @@ public class AssertingLeafReader extends FilterLeafReader {
state = DocsEnumState.ITERATING;
positionMax = super.freq();
}
+ positionCount = 0;
assert super.docID() == advanced;
return doc = advanced;
}
@@ -403,7 +328,6 @@ public class AssertingLeafReader extends FilterLeafReader {
@Override
public int nextPosition() throws IOException {
- assertThread("Docs enums", creationThread);
assert state != DocsEnumState.START : "nextPosition() called before nextDoc()/advance()";
assert state != DocsEnumState.FINISHED : "nextPosition() called after NO_MORE_DOCS";
assert positionCount < positionMax : "nextPosition() called more than freq() times!";
@@ -415,7 +339,6 @@ public class AssertingLeafReader extends FilterLeafReader {
@Override
public int startOffset() throws IOException {
- assertThread("Docs enums", creationThread);
assert state != DocsEnumState.START : "startOffset() called before nextDoc()/advance()";
assert state != DocsEnumState.FINISHED : "startOffset() called after NO_MORE_DOCS";
assert positionCount > 0 : "startOffset() called before nextPosition()!";
@@ -424,7 +347,6 @@ public class AssertingLeafReader extends FilterLeafReader {
@Override
public int endOffset() throws IOException {
- assertThread("Docs enums", creationThread);
assert state != DocsEnumState.START : "endOffset() called before nextDoc()/advance()";
assert state != DocsEnumState.FINISHED : "endOffset() called after NO_MORE_DOCS";
assert positionCount > 0 : "endOffset() called before nextPosition()!";
@@ -432,17 +354,32 @@ public class AssertingLeafReader extends FilterLeafReader {
}
@Override
+ public int startPosition() throws IOException {
+ assert state != DocsEnumState.START : "startPosition() called before nextDoc()/advance()";
+ assert state != DocsEnumState.FINISHED : "startPosition() called after NO_MORE_DOCS";
+ assert positionCount > 0 : "startPosition() called before nextPosition()!";
+ return super.startPosition();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ assert state != DocsEnumState.START : "endPosition() called before nextDoc()/advance()";
+ assert state != DocsEnumState.FINISHED : "endPosition() called after NO_MORE_DOCS";
+ assert positionCount > 0 : "endPosition() called before nextPosition()!";
+ return super.endPosition();
+ }
+
+ @Override
public BytesRef getPayload() throws IOException {
- assertThread("Docs enums", creationThread);
assert state != DocsEnumState.START : "getPayload() called before nextDoc()/advance()";
assert state != DocsEnumState.FINISHED : "getPayload() called after NO_MORE_DOCS";
assert positionCount > 0 : "getPayload() called before nextPosition()!";
BytesRef payload = super.getPayload();
- assert payload == null || payload.isValid() && payload.length > 0 : "getPayload() returned payload with invalid length!";
+ assert payload == null || payload.length > 0 : "getPayload() returned payload with invalid length!";
return payload;
}
}
-
+
/** Wraps a NumericDocValues but with additional asserts */
public static class AssertingNumericDocValues extends NumericDocValues {
private final Thread creationThread = Thread.currentThread();
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
index 16845f9..dcf127a 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
@@ -17,8 +17,6 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
-
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
@@ -61,6 +59,8 @@ import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.TestUtil;
+import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
+
/**
* Abstract class to do basic tests for a docvalues format.
* NOTE: This test focuses on the docvalues impl, nothing else.
@@ -2083,7 +2083,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
);
}
}
-
+
public void testSortedNumericsMultipleValuesVsStoredFields() throws Exception {
assumeTrue("Codec does not support SORTED_NUMERIC", codecSupportsSortedNumeric());
int numIterations = atLeast(1);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
index 6024c7e..68db0eb 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
@@ -121,7 +121,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
/** Given the same random seed this always enumerates the
* same random postings */
- private static class SeedPostings extends DocsAndPositionsEnum {
+ private static class SeedPostings extends DocsEnum {
// Used only to generate docIDs; this way if you pull w/
// or w/o positions you get the same docID sequence:
private final Random docRandom;
@@ -234,7 +234,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
return 0;
}
assert posUpto < freq;
-
+
if (posUpto == 0 && random.nextBoolean()) {
// Sometimes index pos = 0
} else if (posSpacing == 1) {
@@ -270,7 +270,17 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
posUpto++;
return pos;
}
-
+
+ @Override
+ public int startPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return pos;
+ }
+
@Override
public int startOffset() {
return startOffset;
@@ -643,28 +653,23 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
if (liveDocs != null) {
throw new IllegalArgumentException("liveDocs must be null");
}
+ if (DocsEnum.requiresPositions(flags)) {
+ if (maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
+ return null;
+ }
+ if ((flags & DocsEnum.FLAG_OFFSETS) == DocsEnum.FLAG_OFFSETS && maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) < 0) {
+ return null;
+ }
+ if ((flags & DocsEnum.FLAG_PAYLOADS) == DocsEnum.FLAG_PAYLOADS && allowPayloads == false) {
+ return null;
+ }
+ }
if ((flags & DocsEnum.FLAG_FREQS) != 0 && maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS) < 0) {
return null;
}
return getSeedPostings(current.getKey().utf8ToString(), current.getValue().seed, false, maxAllowed, allowPayloads);
}
- @Override
- public final DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (liveDocs != null) {
- throw new IllegalArgumentException("liveDocs must be null");
- }
- if (maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- return null;
- }
- if ((flags & DocsAndPositionsEnum.FLAG_OFFSETS) != 0 && maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) < 0) {
- return null;
- }
- if ((flags & DocsAndPositionsEnum.FLAG_PAYLOADS) != 0 && allowPayloads == false) {
- return null;
- }
- return getSeedPostings(current.getKey().utf8ToString(), current.getValue().seed, false, maxAllowed, allowPayloads);
- }
}
// TODO maybe instead of @BeforeClass just make a single test run: build postings & index & test it?
@@ -746,7 +751,6 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
private static class ThreadState {
// Only used with REUSE option:
public DocsEnum reuseDocsEnum;
- public DocsAndPositionsEnum reuseDocsAndPositionsEnum;
}
private void verifyEnum(ThreadState threadState,
@@ -812,31 +816,29 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
DocsEnum prevDocsEnum = null;
DocsEnum docsEnum;
- DocsAndPositionsEnum docsAndPositionsEnum;
if (!doCheckPositions) {
if (allowPositions && random().nextInt(10) == 7) {
// 10% of the time, even though we will not check positions, pull a DocsAndPositions enum
if (options.contains(Option.REUSE_ENUMS) && random().nextInt(10) < 9) {
- prevDocsEnum = threadState.reuseDocsAndPositionsEnum;
+ prevDocsEnum = threadState.reuseDocsEnum;
}
- int flags = 0;
+ int flags = DocsEnum.FLAG_POSITIONS;
if (alwaysTestMax || random().nextBoolean()) {
- flags |= DocsAndPositionsEnum.FLAG_OFFSETS;
+ flags |= DocsEnum.FLAG_OFFSETS;
}
if (alwaysTestMax || random().nextBoolean()) {
- flags |= DocsAndPositionsEnum.FLAG_PAYLOADS;
+ flags |= DocsEnum.FLAG_PAYLOADS;
}
if (VERBOSE) {
- System.out.println(" get DocsAndPositionsEnum (but we won't check positions) flags=" + flags);
+ System.out.println(" get DocsEnum (but we won't check positions) flags=" + flags);
}
- threadState.reuseDocsAndPositionsEnum = termsEnum.docsAndPositions(liveDocs, (DocsAndPositionsEnum) prevDocsEnum, flags);
- docsEnum = threadState.reuseDocsAndPositionsEnum;
- docsAndPositionsEnum = threadState.reuseDocsAndPositionsEnum;
+ threadState.reuseDocsEnum = termsEnum.docs(liveDocs, prevDocsEnum, flags);
+ docsEnum = threadState.reuseDocsEnum;
} else {
if (VERBOSE) {
System.out.println(" get DocsEnum");
@@ -846,28 +848,26 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
}
threadState.reuseDocsEnum = termsEnum.docs(liveDocs, prevDocsEnum, doCheckFreqs ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
docsEnum = threadState.reuseDocsEnum;
- docsAndPositionsEnum = null;
}
} else {
if (options.contains(Option.REUSE_ENUMS) && random().nextInt(10) < 9) {
- prevDocsEnum = threadState.reuseDocsAndPositionsEnum;
+ prevDocsEnum = threadState.reuseDocsEnum;
}
- int flags = 0;
+ int flags = DocsEnum.FLAG_POSITIONS;
if (alwaysTestMax || doCheckOffsets || random().nextInt(3) == 1) {
- flags |= DocsAndPositionsEnum.FLAG_OFFSETS;
+ flags |= DocsEnum.FLAG_OFFSETS;
}
if (alwaysTestMax || doCheckPayloads|| random().nextInt(3) == 1) {
- flags |= DocsAndPositionsEnum.FLAG_PAYLOADS;
+ flags |= DocsEnum.FLAG_PAYLOADS;
}
if (VERBOSE) {
- System.out.println(" get DocsAndPositionsEnum flags=" + flags);
+ System.out.println(" get DocsEnum flags=" + flags);
}
- threadState.reuseDocsAndPositionsEnum = termsEnum.docsAndPositions(liveDocs, (DocsAndPositionsEnum) prevDocsEnum, flags);
- docsEnum = threadState.reuseDocsAndPositionsEnum;
- docsAndPositionsEnum = threadState.reuseDocsAndPositionsEnum;
+ threadState.reuseDocsEnum = termsEnum.docs(liveDocs, prevDocsEnum, flags);
+ docsEnum = threadState.reuseDocsEnum;
}
assertNotNull("null DocsEnum", docsEnum);
@@ -1009,7 +1009,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
if (VERBOSE) {
System.out.println(" now nextPosition to " + pos);
}
- assertEquals("position is wrong", pos, docsAndPositionsEnum.nextPosition());
+ assertEquals("position is wrong", pos, docsEnum.nextPosition());
if (doCheckPayloads) {
BytesRef expectedPayload = expected.getPayload();
@@ -1018,9 +1018,9 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
System.out.println(" now check expectedPayload length=" + (expectedPayload == null ? 0 : expectedPayload.length));
}
if (expectedPayload == null || expectedPayload.length == 0) {
- assertNull("should not have payload", docsAndPositionsEnum.getPayload());
+ assertNull("should not have payload", docsEnum.getPayload());
} else {
- BytesRef payload = docsAndPositionsEnum.getPayload();
+ BytesRef payload = docsEnum.getPayload();
assertNotNull("should have payload but doesn't", payload);
assertEquals("payload length is wrong", expectedPayload.length, payload.length);
@@ -1032,7 +1032,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
// make a deep copy
payload = BytesRef.deepCopyOf(payload);
- assertEquals("2nd call to getPayload returns something different!", payload, docsAndPositionsEnum.getPayload());
+ assertEquals("2nd call to getPayload returns something different!", payload, docsEnum.getPayload());
}
} else {
if (VERBOSE) {
@@ -1046,8 +1046,8 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
if (VERBOSE) {
System.out.println(" now check offsets: startOff=" + expected.startOffset() + " endOffset=" + expected.endOffset());
}
- assertEquals("startOffset is wrong", expected.startOffset(), docsAndPositionsEnum.startOffset());
- assertEquals("endOffset is wrong", expected.endOffset(), docsAndPositionsEnum.endOffset());
+ assertEquals("startOffset is wrong", expected.startOffset(), docsEnum.startOffset());
+ assertEquals("endOffset is wrong", expected.endOffset(), docsEnum.endOffset());
} else {
if (VERBOSE) {
System.out.println(" skip check offsets");
@@ -1057,8 +1057,8 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
if (VERBOSE) {
System.out.println(" now check offsets are -1");
}
- assertEquals("startOffset isn't -1", -1, docsAndPositionsEnum.startOffset());
- assertEquals("endOffset isn't -1", -1, docsAndPositionsEnum.endOffset());
+ assertEquals("startOffset isn't -1", -1, docsEnum.startOffset());
+ assertEquals("endOffset isn't -1", -1, docsEnum.endOffset());
}
}
}
@@ -1575,22 +1575,17 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
if (random().nextBoolean()) {
docs = termsEnum.docs(null, docs, DocsEnum.FLAG_FREQS);
- } else if (docs instanceof DocsAndPositionsEnum) {
- docs = termsEnum.docsAndPositions(null, (DocsAndPositionsEnum) docs, 0);
} else {
- docs = termsEnum.docsAndPositions(null, null, 0);
+ docs = termsEnum.docs(null, null, DocsEnum.FLAG_POSITIONS);
}
int docFreq = 0;
long totalTermFreq = 0;
while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
docFreq++;
totalTermFreq += docs.freq();
- if (docs instanceof DocsAndPositionsEnum) {
- DocsAndPositionsEnum posEnum = (DocsAndPositionsEnum) docs;
- int limit = TestUtil.nextInt(random(), 1, docs.freq());
- for(int i=0;i termsEnum = new ThreadLocal<>();
private final ThreadLocal docsEnum = new ThreadLocal<>();
- private final ThreadLocal docsAndPositionsEnum = new ThreadLocal<>();
+ private final ThreadLocal docsAndPositionsEnum = new ThreadLocal<>();
protected void assertEquals(RandomTokenStream tk, FieldType ft, Terms terms) throws IOException {
assertEquals(1, terms.getDocCount());
@@ -453,14 +452,14 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
this.docsEnum.set(docsEnum);
bits.clear(0);
- DocsAndPositionsEnum docsAndPositionsEnum = termsEnum.docsAndPositions(bits, random().nextBoolean() ? null : this.docsAndPositionsEnum.get());
+ DocsEnum docsAndPositionsEnum = termsEnum.docs(bits, random().nextBoolean() ? null : this.docsEnum.get(), DocsEnum.FLAG_POSITIONS);
assertEquals(ft.storeTermVectorOffsets() || ft.storeTermVectorPositions(), docsAndPositionsEnum != null);
if (docsAndPositionsEnum != null) {
assertEquals(DocsEnum.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
}
bits.set(0);
- docsAndPositionsEnum = termsEnum.docsAndPositions(random().nextBoolean() ? bits : null, random().nextBoolean() ? null : docsAndPositionsEnum);
+ docsAndPositionsEnum = termsEnum.docs(random().nextBoolean() ? bits : null, random().nextBoolean() ? null : docsAndPositionsEnum, DocsEnum.FLAG_POSITIONS);
assertEquals(ft.storeTermVectorOffsets() || ft.storeTermVectorPositions(), docsAndPositionsEnum != null);
if (terms.hasPositions() || terms.hasOffsets()) {
assertEquals(0, docsAndPositionsEnum.nextDoc());
@@ -517,7 +516,7 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
}
assertEquals(DocsEnum.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
}
- this.docsAndPositionsEnum.set(docsAndPositionsEnum);
+ this.docsEnum.set(docsAndPositionsEnum);
}
assertNull(termsEnum.next());
for (int i = 0; i < 5; ++i) {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java
index 0889d26..7c79363 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingIndexSearcher.java
@@ -17,16 +17,17 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.TestUtil;
+
import java.io.IOException;
import java.util.List;
import java.util.Random;
import java.util.concurrent.ExecutorService;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexReaderContext;
-import org.apache.lucene.util.TestUtil;
-
/**
* Helper class that adds some extra checks to ensure correct
* usage of {@code IndexSearcher} and {@code Weight}.
@@ -65,6 +66,21 @@ public class AssertingIndexSearcher extends IndexSearcher {
}
@Override
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
+ Scorer scorer = w.scorer(context, flags, acceptDocs);
+ if (scorer != null) {
+ // check that scorer obeys disi contract for docID() before next()/advance
+ try {
+ int docid = scorer.docID();
+ assert docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS;
+ } catch (UnsupportedOperationException ignored) {
+ // from a top-level BS1
+ }
+ }
+ return scorer;
+ }
+
+ @Override
public float getValueForNormalization() {
throw new IllegalStateException("Weight already normalized.");
}
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorer.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorer.java
index 1abda77..bd760ae 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorer.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingScorer.java
@@ -26,6 +26,7 @@ import java.util.Random;
import java.util.WeakHashMap;
import org.apache.lucene.index.AssertingLeafReader;
+import org.apache.lucene.util.BytesRef;
/** Wraps a Scorer with additional checks */
public class AssertingScorer extends Scorer {
@@ -111,18 +112,58 @@ public class AssertingScorer extends Scorer {
}
@Override
+ public int nextPosition() throws IOException {
+ assert iterating();
+ return in.nextPosition();
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ assert iterating();
+ return in.startPosition();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ assert iterating();
+ return in.endPosition();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ assert iterating();
+ return in.startOffset();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ assert iterating();
+ return in.endOffset();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ assert iterating();
+ return in.getPayload();
+ }
+
+ @Override
public int docID() {
return in.docID();
}
@Override
public int nextDoc() throws IOException {
- return docsEnumIn.nextDoc();
+ int doc = docsEnumIn.nextDoc();
+ assert in.startPosition() == -1;
+ return doc;
}
@Override
public int advance(int target) throws IOException {
- return docsEnumIn.advance(target);
+ int doc = docsEnumIn.advance(target);
+ assert in.startPosition() == -1;
+ return doc;
}
@Override
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java
index 4d881a6..389929e 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/AssertingWeight.java
@@ -58,15 +58,19 @@ class AssertingWeight extends Weight {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
- final Scorer inScorer = in.scorer(context, acceptDocs);
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
+ // if the caller asks for in-order scoring or if the weight does not support
+ // out-of order scoring then collection will have to happen in-order.
+ final Scorer inScorer = in.scorer(context, flags, acceptDocs);
assert inScorer == null || inScorer.docID() == -1;
return AssertingScorer.wrap(new Random(random.nextLong()), inScorer);
}
@Override
- public BulkScorer bulkScorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
- BulkScorer inScorer = in.bulkScorer(context, acceptDocs);
+ public BulkScorer bulkScorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
+ // if the caller asks for in-order scoring or if the weight does not support
+ // out-of order scoring then collection will have to happen in-order.
+ BulkScorer inScorer = in.bulkScorer(context, flags, acceptDocs);
if (inScorer == null) {
return null;
}
diff --git a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
index c1b02ae..5215a17 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/search/QueryUtils.java
@@ -22,16 +22,16 @@ import java.util.List;
import java.util.Random;
import junit.framework.Assert;
-
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.AllDeletedFilterReader;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.MultiReader;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MockDirectoryWrapper;
@@ -264,7 +264,7 @@ public class QueryUtils {
if (scorer == null) {
Weight w = s.createNormalizedWeight(q);
LeafReaderContext context = readerContextArray.get(leafPtr);
- scorer = w.scorer(context, context.reader().getLiveDocs());
+ scorer = w.scorer(context, DocsEnum.FLAG_FREQS, context.reader().getLiveDocs());
}
int op = order[(opidx[0]++) % order.length];
@@ -311,7 +311,7 @@ public class QueryUtils {
indexSearcher.setSimilarity(s.getSimilarity());
Weight w = indexSearcher.createNormalizedWeight(q);
LeafReaderContext ctx = (LeafReaderContext)indexSearcher.getTopReaderContext();
- Scorer scorer = w.scorer(ctx, ctx.reader().getLiveDocs());
+ Scorer scorer = w.scorer(ctx, DocsEnum.FLAG_FREQS, ctx.reader().getLiveDocs());
if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
@@ -333,7 +333,7 @@ public class QueryUtils {
indexSearcher.setSimilarity(s.getSimilarity());
Weight w = indexSearcher.createNormalizedWeight(q);
LeafReaderContext ctx = previousReader.getContext();
- Scorer scorer = w.scorer(ctx, ctx.reader().getLiveDocs());
+ Scorer scorer = w.scorer(ctx, DocsEnum.FLAG_FREQS, ctx.reader().getLiveDocs());
if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
@@ -364,7 +364,7 @@ public class QueryUtils {
long startMS = System.currentTimeMillis();
for (int i=lastDoc[0]+1; i<=doc; i++) {
Weight w = s.createNormalizedWeight(q);
- Scorer scorer = w.scorer(context.get(leafPtr), liveDocs);
+ Scorer scorer = w.scorer(context.get(leafPtr), DocsEnum.FLAG_FREQS, liveDocs);
Assert.assertTrue("query collected "+doc+" but skipTo("+i+") says no more docs!",scorer.advance(i) != DocIdSetIterator.NO_MORE_DOCS);
Assert.assertEquals("query collected "+doc+" but skipTo("+i+") got to "+scorer.docID(),doc,scorer.docID());
float skipToScore = scorer.score();
@@ -392,7 +392,7 @@ public class QueryUtils {
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
indexSearcher.setSimilarity(s.getSimilarity());
Weight w = indexSearcher.createNormalizedWeight(q);
- Scorer scorer = w.scorer((LeafReaderContext)indexSearcher.getTopReaderContext(), previousReader.getLiveDocs());
+ Scorer scorer = w.scorer((LeafReaderContext)indexSearcher.getTopReaderContext(), DocsEnum.FLAG_FREQS, previousReader.getLiveDocs());
if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
@@ -413,7 +413,7 @@ public class QueryUtils {
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
indexSearcher.setSimilarity(s.getSimilarity());
Weight w = indexSearcher.createNormalizedWeight(q);
- Scorer scorer = w.scorer((LeafReaderContext)indexSearcher.getTopReaderContext(), previousReader.getLiveDocs());
+ Scorer scorer = w.scorer((LeafReaderContext)indexSearcher.getTopReaderContext(), DocsEnum.FLAG_FREQS, previousReader.getLiveDocs());
if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
Assert.assertFalse("query's last doc was "+ lastDoc[0] +" but skipTo("+(lastDoc[0]+1)+") got to "+scorer.docID(),more);
@@ -425,8 +425,8 @@ public class QueryUtils {
public static void checkBulkScorerSkipTo(Random r, Query query, IndexSearcher searcher) throws IOException {
Weight weight = searcher.createNormalizedWeight(query);
for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
- final Scorer scorer = weight.scorer(context, context.reader().getLiveDocs());
- final BulkScorer bulkScorer = weight.bulkScorer(context, context.reader().getLiveDocs());
+ final Scorer scorer = weight.scorer(context, DocsEnum.FLAG_NONE, context.reader().getLiveDocs());
+ final BulkScorer bulkScorer = weight.bulkScorer(context, DocsEnum.FLAG_NONE, context.reader().getLiveDocs());
if (scorer == null && bulkScorer == null) {
continue;
}
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
index dd6b303..9956009 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
@@ -55,28 +55,40 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.logging.Logger;
+import com.carrotsearch.randomizedtesting.JUnit4MethodProvider;
+import com.carrotsearch.randomizedtesting.LifecycleScope;
+import com.carrotsearch.randomizedtesting.MixWithSuiteName;
+import com.carrotsearch.randomizedtesting.RandomizedContext;
+import com.carrotsearch.randomizedtesting.RandomizedRunner;
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.annotations.Listeners;
+import com.carrotsearch.randomizedtesting.annotations.SeedDecorators;
+import com.carrotsearch.randomizedtesting.annotations.TestGroup;
+import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction.Action;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup.Group;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies.Consequence;
+import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.carrotsearch.randomizedtesting.rules.NoClassHooksShadowingRule;
+import com.carrotsearch.randomizedtesting.rules.NoInstanceHooksOverridesRule;
+import com.carrotsearch.randomizedtesting.rules.StaticFieldsInvariantRule;
+import com.carrotsearch.randomizedtesting.rules.SystemPropertiesInvariantRule;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.AlcoholicMergePolicy;
-import org.apache.lucene.index.AssertingDirectoryReader;
-import org.apache.lucene.index.AssertingLeafReader;
-import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.index.CompositeReader;
-import org.apache.lucene.index.ConcurrentMergeScheduler;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.FieldFilterLeafReader;
-import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
-import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.*;
import org.apache.lucene.index.IndexReader.ReaderClosedListener;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
@@ -109,8 +121,6 @@ import org.apache.lucene.index.StorableField;
import org.apache.lucene.index.StoredDocument;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum.SeekStatus;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.index.TieredMergePolicy;
import org.apache.lucene.search.AssertingIndexSearcher;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
@@ -124,10 +134,11 @@ import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.FSLockFactory;
import org.apache.lucene.store.FlushInfo;
import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IOContext.Context;
import org.apache.lucene.store.LockFactory;
import org.apache.lucene.store.MergeInfo;
-import org.apache.lucene.store.MockDirectoryWrapper.Throttling;
import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.MockDirectoryWrapper.Throttling;
import org.apache.lucene.store.NRTCachingDirectory;
import org.apache.lucene.util.automaton.AutomatonTestUtil;
import org.apache.lucene.util.automaton.CompiledAutomaton;
@@ -468,9 +479,9 @@ public abstract class LuceneTestCase extends Assert {
static {
CORE_DIRECTORIES = new ArrayList<>(FS_DIRECTORIES);
CORE_DIRECTORIES.add("RAMDirectory");
- };
+ }
- /** A {@link FilterCachingPolicy} that randomly caches. */
+ /** A {@link org.apache.lucene.search.FilterCachingPolicy} that randomly caches. */
public static final FilterCachingPolicy MAYBE_CACHE_POLICY = new FilterCachingPolicy() {
@Override
@@ -482,7 +493,7 @@ public abstract class LuceneTestCase extends Assert {
}
};
-
+
// -----------------------------------------------------------------
// Fields initialized in class or instance rules.
// -----------------------------------------------------------------
@@ -1305,10 +1316,6 @@ public abstract class LuceneTestCase extends Assert {
String fsdirClass = TEST_DIRECTORY;
if (fsdirClass.equals("random")) {
fsdirClass = RandomPicks.randomFrom(random(), FS_DIRECTORIES);
- if (fsdirClass.equals("SimpleFSDirectory")) {
- // pick again
- fsdirClass = RandomPicks.randomFrom(random(), FS_DIRECTORIES);
- }
}
Class extends FSDirectory> clazz;
@@ -1347,7 +1354,7 @@ public abstract class LuceneTestCase extends Assert {
if (rarely(random) && !bare) {
directory = new NRTCachingDirectory(directory, random.nextDouble(), random.nextDouble());
}
-
+
if (bare) {
BaseDirectoryWrapper base = new BaseDirectoryWrapper(directory);
closeAfterSuite(new CloseableDirectory(base, suiteFailureMarker));
@@ -1468,7 +1475,7 @@ public abstract class LuceneTestCase extends Assert {
/**
* Return a random Locale from the available locales on the system.
- * @see LUCENE-4020
+ * @see "https://issues.apache.org/jira/browse/LUCENE-4020"
*/
public static Locale randomLocale(Random random) {
Locale locales[] = Locale.getAvailableLocales();
@@ -1477,7 +1484,7 @@ public abstract class LuceneTestCase extends Assert {
/**
* Return a random TimeZone from the available timezones on the system
- * @see LUCENE-4020
+ * @see "https://issues.apache.org/jira/browse/LUCENE-4020"
*/
public static TimeZone randomTimeZone(Random random) {
String tzIds[] = TimeZone.getAvailableIDs();
@@ -1514,10 +1521,6 @@ public abstract class LuceneTestCase extends Assert {
if (clazzName.equals("random")) {
if (rarely(random)) {
clazzName = RandomPicks.randomFrom(random, CORE_DIRECTORIES);
- if (clazzName.equals("SimpleFSDirectory")) {
- // pick again
- clazzName = RandomPicks.randomFrom(random, CORE_DIRECTORIES);
- }
} else {
clazzName = "RAMDirectory";
}
@@ -1937,8 +1940,8 @@ public abstract class LuceneTestCase extends Assert {
public void assertTermsEnumEquals(String info, IndexReader leftReader, TermsEnum leftTermsEnum, TermsEnum rightTermsEnum, boolean deep) throws IOException {
BytesRef term;
Bits randomBits = new RandomBits(leftReader.maxDoc(), random().nextDouble(), random());
- DocsAndPositionsEnum leftPositions = null;
- DocsAndPositionsEnum rightPositions = null;
+ DocsEnum leftPositions = null;
+ DocsEnum rightPositions = null;
DocsEnum leftDocs = null;
DocsEnum rightDocs = null;
@@ -1946,17 +1949,17 @@ public abstract class LuceneTestCase extends Assert {
assertEquals(info, term, rightTermsEnum.next());
assertTermStatsEquals(info, leftTermsEnum, rightTermsEnum);
if (deep) {
- assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
- assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
+ assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.docs(null, leftPositions, DocsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.docs(null, rightPositions, DocsEnum.FLAG_ALL));
+ assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.docs(randomBits, leftPositions, DocsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.docs(randomBits, rightPositions, DocsEnum.FLAG_ALL));
assertPositionsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
+ leftPositions = leftTermsEnum.docs(null, leftPositions, DocsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.docs(null, rightPositions, DocsEnum.FLAG_ALL));
assertPositionsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
+ leftPositions = leftTermsEnum.docs(randomBits, leftPositions, DocsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.docs(randomBits, rightPositions, DocsEnum.FLAG_ALL));
// with freqs:
assertDocsEnumEquals(info, leftDocs = leftTermsEnum.docs(null, leftDocs),
@@ -2002,7 +2005,7 @@ public abstract class LuceneTestCase extends Assert {
/**
* checks docs + freqs + positions + payloads, sequentially
*/
- public void assertDocsAndPositionsEnumEquals(String info, DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws IOException {
+ public void assertDocsAndPositionsEnumEquals(String info, DocsEnum leftDocs, DocsEnum rightDocs) throws IOException {
if (leftDocs == null || rightDocs == null) {
assertNull(leftDocs);
assertNull(rightDocs);
@@ -2081,7 +2084,7 @@ public abstract class LuceneTestCase extends Assert {
/**
* checks advancing docs + positions
*/
- public void assertPositionsSkippingEquals(String info, IndexReader leftReader, int docFreq, DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws IOException {
+ public void assertPositionsSkippingEquals(String info, IndexReader leftReader, int docFreq, DocsEnum leftDocs, DocsEnum rightDocs) throws IOException {
if (leftDocs == null || rightDocs == null) {
assertNull(leftDocs);
assertNull(rightDocs);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
index aa1f18a..08a9a4f 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
@@ -46,6 +46,8 @@ import java.util.regex.PatternSyntaxException;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.DocValuesFormat;
import org.apache.lucene.codecs.PostingsFormat;
@@ -71,7 +73,6 @@ import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.ConcurrentMergeScheduler;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FilterLeafReader;
import org.apache.lucene.index.IndexReader;
@@ -92,8 +93,8 @@ import org.apache.lucene.index.TieredMergePolicy;
import org.apache.lucene.mockfile.FilterFileSystem;
import org.apache.lucene.mockfile.WindowsFS;
import org.apache.lucene.search.FieldDoc;
-import org.apache.lucene.search.FilteredQuery.FilterStrategy;
import org.apache.lucene.search.FilteredQuery;
+import org.apache.lucene.search.FilteredQuery.FilterStrategy;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
@@ -102,8 +103,6 @@ import org.apache.lucene.store.FilterDirectory;
import org.apache.lucene.store.NoLockFactory;
import org.junit.Assert;
-import com.carrotsearch.randomizedtesting.generators.RandomInts;
-import com.carrotsearch.randomizedtesting.generators.RandomPicks;
/**
* General utility methods for Lucene unit tests.
@@ -1016,13 +1015,12 @@ public final class TestUtil {
if (random.nextBoolean()) {
final int posFlags;
switch (random.nextInt(4)) {
- case 0: posFlags = 0; break;
- case 1: posFlags = DocsAndPositionsEnum.FLAG_OFFSETS; break;
- case 2: posFlags = DocsAndPositionsEnum.FLAG_PAYLOADS; break;
- default: posFlags = DocsAndPositionsEnum.FLAG_OFFSETS | DocsAndPositionsEnum.FLAG_PAYLOADS; break;
+ case 0: posFlags = DocsEnum.FLAG_POSITIONS; break;
+ case 1: posFlags = DocsEnum.FLAG_OFFSETS; break;
+ case 2: posFlags = DocsEnum.FLAG_PAYLOADS; break;
+ default: posFlags = DocsEnum.FLAG_OFFSETS | DocsEnum.FLAG_PAYLOADS; break;
}
- // TODO: cast to DocsAndPositionsEnum?
- DocsAndPositionsEnum docsAndPositions = termsEnum.docsAndPositions(liveDocs, null, posFlags);
+ DocsEnum docsAndPositions = termsEnum.docs(liveDocs, null, posFlags);
if (docsAndPositions != null) {
return docsAndPositions;
}
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
index 1a60660..89b9db7 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
@@ -21,21 +21,21 @@ import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.HashMap;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
-import java.util.Iterator;
-import java.util.Arrays;
import com.carrotsearch.hppc.IntObjectOpenHashMap;
-import com.carrotsearch.hppc.LongOpenHashSet;
+import com.carrotsearch.hppc.IntOpenHashSet;
+import com.carrotsearch.hppc.LongObjectMap;
import com.carrotsearch.hppc.LongObjectOpenHashMap;
+import com.carrotsearch.hppc.LongOpenHashSet;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import com.carrotsearch.hppc.cursors.LongCursor;
import com.carrotsearch.hppc.cursors.LongObjectCursor;
-import com.carrotsearch.hppc.IntOpenHashSet;
import com.carrotsearch.hppc.cursors.ObjectCursor;
-import com.carrotsearch.hppc.LongObjectMap;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.FieldInfo;
@@ -73,11 +73,11 @@ import org.apache.solr.core.PluginInfo;
import org.apache.solr.core.SolrCore;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.StrField;
+import org.apache.solr.schema.TrieDoubleField;
import org.apache.solr.schema.TrieFloatField;
import org.apache.solr.schema.TrieIntField;
import org.apache.solr.schema.TrieLongField;
-import org.apache.solr.schema.TrieDoubleField;
-import org.apache.solr.schema.StrField;
import org.apache.solr.search.CollapsingQParserPlugin;
import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocList;
diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
index a3520ea..95b4f02 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
@@ -17,6 +17,21 @@
package org.apache.solr.handler.component;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
@@ -94,20 +109,6 @@ import org.apache.solr.util.SolrPluginUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
/**
* TODO!
@@ -1349,6 +1350,36 @@ public class QueryComponent extends SearchComponent
}
@Override
+ public int nextPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
public int nextDoc() throws IOException {
throw new UnsupportedOperationException();
}
diff --git a/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java b/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
index e807daa..9e49f14 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
@@ -1,19 +1,17 @@
package org.apache.solr.handler.component;
import java.io.IOException;
-import java.util.Arrays;
import java.util.ArrayList;
-import java.util.Collection;
+import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
-import java.util.LinkedHashSet;
-import java.util.Set;
import java.util.Iterator;
+import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
-import java.util.Map.Entry;
+import java.util.Set;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader;
@@ -24,17 +22,15 @@ import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BytesRef;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.params.TermVectorParams;
import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
import org.apache.solr.core.SolrCore;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.schema.SchemaField;
-import org.apache.solr.search.ReturnFields;
import org.apache.solr.search.DocList;
import org.apache.solr.search.DocListAndSet;
+import org.apache.solr.search.ReturnFields;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.search.SolrReturnFields;
import org.apache.solr.util.SolrPluginUtils;
@@ -335,7 +331,7 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar
docNL.add(field, fieldNL);
BytesRef text;
- DocsAndPositionsEnum dpEnum = null;
+ DocsEnum dpEnum = null;
while((text = termsEnum.next()) != null) {
String term = text.utf8ToString();
NamedList