diff --git a/dev-tools/idea/lucene/highlighter/highlighter.iml b/dev-tools/idea/lucene/highlighter/highlighter.iml
index 0787fb8..8a45e23 100644
--- a/dev-tools/idea/lucene/highlighter/highlighter.iml
+++ b/dev-tools/idea/lucene/highlighter/highlighter.iml
@@ -12,6 +12,7 @@
+
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java
index 7381e11..e3f8880 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java
@@ -27,7 +27,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@@ -96,7 +96,7 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
writer.close();
IndexReader reader = DirectoryReader.open(dir);
- DocsEnum td = TestUtil.docs(random(),
+ PostingsEnum td = TestUtil.docs(random(),
reader,
"partnum",
new BytesRef("Q36"),
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
index 9c63bb0..f5a4d35 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java
@@ -31,7 +31,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Terms;
@@ -111,7 +111,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase {
TermsEnum termsEnum = vector.iterator(null);
termsEnum.next();
assertEquals(2, termsEnum.totalTermFreq());
- DocsAndPositionsEnum positions = termsEnum.docsAndPositions(null, null);
+ PostingsEnum positions = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertTrue(positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(2, positions.freq());
positions.nextPosition();
diff --git a/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java b/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java
index 3b3706f..67cab60 100644
--- a/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java
+++ b/lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestClassicAnalyzer.java
@@ -2,12 +2,11 @@ package org.apache.lucene.analysis.standard;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.analysis.standard.ClassicAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@@ -281,7 +280,7 @@ public class TestClassicAnalyzer extends BaseTokenStreamTestCase {
// Make sure position is still incremented when
// massive term is skipped:
- DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(reader,
+ PostingsEnum tps = MultiFields.getTermPositionsEnum(reader,
MultiFields.getLiveDocs(reader),
"content",
new BytesRef("another"));
diff --git a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
index a73bcc4..09c6f31 100644
--- a/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
+++ b/lucene/backward-codecs/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
@@ -932,7 +932,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
writer.addDocument(doc);
}
- private int countDocs(DocsEnum docs) throws IOException {
+ private int countDocs(PostingsEnum docs) throws IOException {
int count = 0;
while((docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
count ++;
@@ -958,7 +958,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
// should be found exactly
assertEquals(TermsEnum.SeekStatus.FOUND,
terms.seekCeil(aaaTerm));
- assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE)));
+ assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, PostingsEnum.FLAG_NONE)));
assertNull(terms.next());
// should hit end of field
@@ -970,12 +970,12 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
assertEquals(TermsEnum.SeekStatus.NOT_FOUND,
terms.seekCeil(new BytesRef("a")));
assertTrue(terms.term().bytesEquals(aaaTerm));
- assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE)));
+ assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, PostingsEnum.FLAG_NONE)));
assertNull(terms.next());
assertEquals(TermsEnum.SeekStatus.FOUND,
terms.seekCeil(aaaTerm));
- assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, DocsEnum.FLAG_NONE)));
+ assertEquals(35, countDocs(TestUtil.docs(random(), terms, null, null, PostingsEnum.FLAG_NONE)));
assertNull(terms.next());
r.close();
diff --git a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
index d89a670..ca748c2 100644
--- a/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
+++ b/lucene/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
@@ -40,7 +40,7 @@ import org.apache.lucene.benchmark.byTask.tasks.WriteLineDocTask;
import org.apache.lucene.collation.CollationKeyAnalyzer;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
@@ -497,9 +497,9 @@ public class TestPerfTasksLogic extends BenchmarkTestCase {
continue;
}
TermsEnum termsEnum = terms.iterator(null);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while(termsEnum.next() != null) {
- docs = TestUtil.docs(random(), termsEnum, MultiFields.getLiveDocs(reader), docs, DocsEnum.FLAG_FREQS);
+ docs = TestUtil.docs(random(), termsEnum, MultiFields.getLiveDocs(reader), docs, PostingsEnum.FLAG_FREQS);
while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
totalTokenCount2 += docs.freq();
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java
index 80540e9..5b3f405 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java
@@ -30,8 +30,7 @@ import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsReaderBase;
import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexOptions;
@@ -652,22 +651,19 @@ public class BlockTermsReader extends FieldsProducer {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
- //System.out.println("BTR.docs this=" + this);
- decodeMetaData();
- //System.out.println("BTR.docs: state.docFreq=" + state.docFreq);
- return postingsReader.docs(fieldInfo, state, liveDocs, reuse, flags);
- }
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- // Positions were not indexed:
- return null;
+ if (PostingsEnum.requiresPositions(flags)) {
+ if (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
+ // Positions were not indexed:
+ return null;
+ }
}
+ //System.out.println("BTR.docs this=" + this);
decodeMetaData();
- return postingsReader.docsAndPositions(fieldInfo, state, liveDocs, reuse, flags);
+ //System.out.println("BTR.docs: state.docFreq=" + state.docFreq);
+ return postingsReader.docs(fieldInfo, state, liveDocs, reuse, flags);
}
@Override
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsIntersectTermsEnum.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsIntersectTermsEnum.java
index 0eb9709..313b45a 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsIntersectTermsEnum.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsIntersectTermsEnum.java
@@ -20,8 +20,7 @@ package org.apache.lucene.codecs.blocktreeords;
import java.io.IOException;
import org.apache.lucene.codecs.blocktreeords.FSTOrdsOutputs.Output;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.TermsEnum;
@@ -203,20 +202,17 @@ final class OrdsIntersectTermsEnum extends TermsEnum {
}
@Override
- public DocsEnum docs(Bits skipDocs, DocsEnum reuse, int flags) throws IOException {
- currentFrame.decodeMetaData();
- return fr.parent.postingsReader.docs(fr.fieldInfo, currentFrame.termState, skipDocs, reuse, flags);
- }
+ public PostingsEnum postings(Bits skipDocs, PostingsEnum reuse, int flags) throws IOException {
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (fr.fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- // Positions were not indexed:
- return null;
+ if (PostingsEnum.requiresPositions(flags)) {
+ if (fr.fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
+ // Positions were not indexed:
+ return null;
+ }
}
currentFrame.decodeMetaData();
- return fr.parent.postingsReader.docsAndPositions(fr.fieldInfo, currentFrame.termState, skipDocs, reuse, flags);
+ return fr.parent.postingsReader.docs(fr.fieldInfo, currentFrame.termState, skipDocs, reuse, flags);
}
private int getState() {
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsSegmentTermsEnum.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsSegmentTermsEnum.java
index 8bdd248..626d91e 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsSegmentTermsEnum.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsSegmentTermsEnum.java
@@ -25,8 +25,7 @@ import java.io.PrintStream;
import org.apache.lucene.codecs.BlockTermState;
import org.apache.lucene.codecs.blocktreeords.FSTOrdsOutputs.Output;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.TermsEnum;
@@ -924,7 +923,15 @@ public final class OrdsSegmentTermsEnum extends TermsEnum {
}
@Override
- public DocsEnum docs(Bits skipDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits skipDocs, PostingsEnum reuse, int flags) throws IOException {
+
+ if (PostingsEnum.requiresPositions(flags)) {
+ if (fr.fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
+ // Positions were not indexed:
+ return null;
+ }
+ }
+
assert !eof;
//if (DEBUG) {
//System.out.println("BTTR.docs seg=" + segment);
@@ -937,18 +944,6 @@ public final class OrdsSegmentTermsEnum extends TermsEnum {
}
@Override
- public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (fr.fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- // Positions were not indexed:
- return null;
- }
-
- assert !eof;
- currentFrame.decodeMetaData();
- return fr.parent.postingsReader.docsAndPositions(fr.fieldInfo, currentFrame.state, skipDocs, reuse, flags);
- }
-
- @Override
public void seekExact(BytesRef target, TermState otherState) {
// if (DEBUG) {
// System.out.println("BTTR.seekExact termState seg=" + segment + " target=" + target.utf8ToString() + " " + target + " state=" + otherState);
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
index 09ca6e0..ec8f5e5 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
@@ -32,8 +32,7 @@ import org.apache.lucene.codecs.FieldsConsumer;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.codecs.bloom.FuzzySet.ContainsResult;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexFileNames;
@@ -382,19 +381,13 @@ public final class BloomFilteringPostingsFormat extends PostingsFormat {
public long totalTermFreq() throws IOException {
return delegate().totalTermFreq();
}
-
@Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs,
- DocsAndPositionsEnum reuse, int flags) throws IOException {
- return delegate().docsAndPositions(liveDocs, reuse, flags);
- }
-
- @Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags)
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags)
throws IOException {
- return delegate().docs(liveDocs, reuse, flags);
+ return delegate().postings(liveDocs, reuse, flags);
}
+
}
@Override
@@ -460,7 +453,7 @@ public final class BloomFilteringPostingsFormat extends PostingsFormat {
FuzzySet bloomFilter = null;
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
while (true) {
BytesRef term = termsEnum.next();
if (term == null) {
@@ -476,8 +469,8 @@ public final class BloomFilteringPostingsFormat extends PostingsFormat {
bloomFilters.put(fieldInfo, bloomFilter);
}
// Make sure there's at least one doc for this term:
- docsEnum = termsEnum.docs(null, docsEnum, 0);
- if (docsEnum.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+ postingsEnum = termsEnum.postings(null, postingsEnum, 0);
+ if (postingsEnum.nextDoc() != PostingsEnum.NO_MORE_DOCS) {
bloomFilter.addValue(term);
}
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
index 562c9dc..41857c7 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/DirectPostingsFormat.java
@@ -27,13 +27,13 @@ import java.util.TreeMap;
import org.apache.lucene.codecs.FieldsConsumer;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsFormat;
-import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat; // javadocs
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.OrdTermState;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.SegmentWriteState;
import org.apache.lucene.index.TermState;
@@ -51,7 +51,7 @@ import org.apache.lucene.util.automaton.CompiledAutomaton;
import org.apache.lucene.util.automaton.RunAutomaton;
import org.apache.lucene.util.automaton.Transition;
-// TODO:
+// TODO:
// - build depth-N prefix hash?
// - or: longer dense skip lists than just next byte?
@@ -62,7 +62,7 @@ import org.apache.lucene.util.automaton.Transition;
*
WARNING: This is
* exceptionally RAM intensive: it makes no effort to
* compress the postings data, storing terms as separate
- * byte[] and postings as separate int[], but as a result it
+ * byte[] and postings as separate int[], but as a result it
* gives substantial increase in search performance.
*
*
This postings format supports {@link TermsEnum#ord}
@@ -89,7 +89,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
public DirectPostingsFormat() {
this(DEFAULT_MIN_SKIP_COUNT, DEFAULT_LOW_FREQ_CUTOFF);
}
-
+
/** minSkipCount is how many terms in a row must have the
* same prefix before we put a skip pointer down. Terms
* with docFreq <= lowFreqCutoff will use a single int[]
@@ -100,7 +100,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
this.minSkipCount = minSkipCount;
this.lowFreqCutoff = lowFreqCutoff;
}
-
+
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
return PostingsFormat.forName("Lucene50").fieldsConsumer(state);
@@ -161,7 +161,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
return sizeInBytes;
}
-
+
@Override
public Collection getChildResources() {
return Accountables.namedAccountables("field", fields);
@@ -206,9 +206,10 @@ public final class DirectPostingsFormat extends PostingsFormat {
@Override
public long ramBytesUsed() {
return BASE_RAM_BYTES_USED +
- ((postings!=null) ? RamUsageEstimator.sizeOf(postings) : 0) +
+ ((postings!=null) ? RamUsageEstimator.sizeOf(postings) : 0) +
((payloads!=null) ? RamUsageEstimator.sizeOf(payloads) : 0);
}
+
}
// TODO: maybe specialize into prx/no-prx/no-frq cases?
@@ -232,31 +233,32 @@ public final class DirectPostingsFormat extends PostingsFormat {
@Override
public long ramBytesUsed() {
- long sizeInBytes = BASE_RAM_BYTES_USED;
- sizeInBytes += (docIDs!=null)? RamUsageEstimator.sizeOf(docIDs) : 0;
- sizeInBytes += (freqs!=null)? RamUsageEstimator.sizeOf(freqs) : 0;
-
- if(positions != null) {
- sizeInBytes += RamUsageEstimator.shallowSizeOf(positions);
- for(int[] position : positions) {
- sizeInBytes += (position!=null) ? RamUsageEstimator.sizeOf(position) : 0;
- }
- }
-
- if (payloads != null) {
- sizeInBytes += RamUsageEstimator.shallowSizeOf(payloads);
- for(byte[][] payload : payloads) {
- if(payload != null) {
- sizeInBytes += RamUsageEstimator.shallowSizeOf(payload);
- for(byte[] pload : payload) {
- sizeInBytes += (pload!=null) ? RamUsageEstimator.sizeOf(pload) : 0;
- }
- }
- }
- }
-
- return sizeInBytes;
+ long sizeInBytes = BASE_RAM_BYTES_USED;
+ sizeInBytes += (docIDs!=null)? RamUsageEstimator.sizeOf(docIDs) : 0;
+ sizeInBytes += (freqs!=null)? RamUsageEstimator.sizeOf(freqs) : 0;
+
+ if(positions != null) {
+ sizeInBytes += RamUsageEstimator.shallowSizeOf(positions);
+ for(int[] position : positions) {
+ sizeInBytes += (position!=null) ? RamUsageEstimator.sizeOf(position) : 0;
+ }
+ }
+
+ if (payloads != null) {
+ sizeInBytes += RamUsageEstimator.shallowSizeOf(payloads);
+ for(byte[][] payload : payloads) {
+ if(payload != null) {
+ sizeInBytes += RamUsageEstimator.shallowSizeOf(payload);
+ for(byte[] pload : payload) {
+ sizeInBytes += (pload!=null) ? RamUsageEstimator.sizeOf(pload) : 0;
+ }
+ }
+ }
+ }
+
+ return sizeInBytes;
}
+
}
private final byte[] termBytes;
@@ -313,7 +315,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
terms = new TermAndSkip[numTerms];
termOffsets = new int[1+numTerms];
-
+
byte[] termBytes = new byte[1024];
this.minSkipCount = minSkipCount;
@@ -324,8 +326,8 @@ public final class DirectPostingsFormat extends PostingsFormat {
hasPayloads = fieldInfo.hasPayloads();
BytesRef term;
- DocsEnum docsEnum = null;
- DocsAndPositionsEnum docsAndPositionsEnum = null;
+ PostingsEnum postingsEnum = null;
+ PostingsEnum docsAndPositionsEnum = null;
final TermsEnum termsEnum = termsIn.iterator(null);
int termOffset = 0;
@@ -356,18 +358,18 @@ public final class DirectPostingsFormat extends PostingsFormat {
termOffsets[count+1] = termOffset;
if (hasPos) {
- docsAndPositionsEnum = termsEnum.docsAndPositions(null, docsAndPositionsEnum);
+ docsAndPositionsEnum = termsEnum.postings(null, docsAndPositionsEnum, PostingsEnum.FLAG_ALL);
} else {
- docsEnum = termsEnum.docs(null, docsEnum);
+ postingsEnum = termsEnum.postings(null, postingsEnum);
}
final TermAndSkip ent;
- final DocsEnum docsEnum2;
+ final PostingsEnum postingsEnum2;
if (hasPos) {
- docsEnum2 = docsAndPositionsEnum;
+ postingsEnum2 = docsAndPositionsEnum;
} else {
- docsEnum2 = docsEnum;
+ postingsEnum2 = postingsEnum;
}
int docID;
@@ -377,10 +379,10 @@ public final class DirectPostingsFormat extends PostingsFormat {
ros.reset();
// Pack postings for low-freq terms into a single int[]:
- while ((docID = docsEnum2.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
+ while ((docID = postingsEnum2.nextDoc()) != PostingsEnum.NO_MORE_DOCS) {
scratch.add(docID);
if (hasFreq) {
- final int freq = docsEnum2.freq();
+ final int freq = postingsEnum2.freq();
scratch.add(freq);
if (hasPos) {
for(int pos=0;pos= PostingsEnum.FLAG_POSITIONS) {
+ if (!hasPos) {
+ return null;
+ }
+
+ if (terms[termOrd] instanceof LowFreqTerm) {
+ final LowFreqTerm term = ((LowFreqTerm) terms[termOrd]);
+ final int[] postings = term.postings;
+ final byte[] payloads = term.payloads;
+ return new LowFreqPostingsEnum(liveDocs, hasOffsets, hasPayloads).reset(postings, payloads);
+ } else {
+ final HighFreqTerm term = (HighFreqTerm) terms[termOrd];
+ return new HighFreqPostingsEnum(liveDocs, hasOffsets).reset(term.docIDs, term.freqs, term.positions, term.payloads);
+ }
+ }
+
if (terms[termOrd] instanceof LowFreqTerm) {
final int[] postings = ((LowFreqTerm) terms[termOrd]).postings;
if (hasFreq) {
@@ -927,25 +945,6 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
- if (!hasPos) {
- return null;
- }
-
- // TODO: implement reuse
- // it's hairy!
-
- if (terms[termOrd] instanceof LowFreqTerm) {
- final LowFreqTerm term = ((LowFreqTerm) terms[termOrd]);
- final int[] postings = term.postings;
- final byte[] payloads = term.payloads;
- return new LowFreqDocsAndPositionsEnum(liveDocs, hasOffsets, hasPayloads).reset(postings, payloads);
- } else {
- final HighFreqTerm term = (HighFreqTerm) terms[termOrd];
- return new HighFreqDocsAndPositionsEnum(liveDocs, hasOffsets).reset(term.docIDs, term.freqs, term.positions, term.payloads);
- }
- }
}
private final class DirectIntersectTermsEnum extends TermsEnum {
@@ -1203,7 +1202,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
// if (DEBUG) {
// System.out.println(" term=" + new BytesRef(termBytes, termOffset, termLength).utf8ToString() + " skips=" + Arrays.toString(skips));
// }
-
+
assert termOrd < state.changeOrd;
assert stateUpto <= termLength: "term.length=" + termLength + "; stateUpto=" + stateUpto;
@@ -1336,7 +1335,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
compiledAutomaton.automaton.initTransition(nextState, states[stateUpto].transition);
states[stateUpto].transitionUpto = -1;
states[stateUpto].transitionMax = -1;
-
+
if (stateUpto == termLength) {
// if (DEBUG) {
// System.out.println(" term ends after push");
@@ -1453,9 +1452,23 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) {
// TODO: implement reuse
// it's hairy!
+ if ((flags & PostingsEnum.FLAG_POSITIONS) >= PostingsEnum.FLAG_POSITIONS) {
+ if (!hasPos) {
+ return null;
+ }
+ if (terms[termOrd] instanceof LowFreqTerm) {
+ final LowFreqTerm term = ((LowFreqTerm) terms[termOrd]);
+ final int[] postings = term.postings;
+ final byte[] payloads = term.payloads;
+ return new LowFreqPostingsEnum(liveDocs, hasOffsets, hasPayloads).reset(postings, payloads);
+ } else {
+ final HighFreqTerm term = (HighFreqTerm) terms[termOrd];
+ return new HighFreqPostingsEnum(liveDocs, hasOffsets).reset(term.docIDs, term.freqs, term.positions, term.payloads);
+ }
+ }
if (terms[termOrd] instanceof LowFreqTerm) {
final int[] postings = ((LowFreqTerm) terms[termOrd]).postings;
@@ -1485,26 +1498,6 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
@Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
- if (!hasPos) {
- return null;
- }
-
- // TODO: implement reuse
- // it's hairy!
-
- if (terms[termOrd] instanceof LowFreqTerm) {
- final LowFreqTerm term = ((LowFreqTerm) terms[termOrd]);
- final int[] postings = term.postings;
- final byte[] payloads = term.payloads;
- return new LowFreqDocsAndPositionsEnum(liveDocs, hasOffsets, hasPayloads).reset(postings, payloads);
- } else {
- final HighFreqTerm term = (HighFreqTerm) terms[termOrd];
- return new HighFreqDocsAndPositionsEnum(liveDocs, hasOffsets).reset(term.docIDs, term.freqs, term.positions, term.payloads);
- }
- }
-
- @Override
public SeekStatus seekCeil(BytesRef term) {
throw new UnsupportedOperationException();
}
@@ -1530,7 +1523,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return liveDocs == this.liveDocs;
}
- public DocsEnum reset(int[] postings) {
+ public PostingsEnum reset(int[] postings) {
this.postings = postings;
upto = -1;
return this;
@@ -1573,12 +1566,18 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
@Override
+ public int nextPosition() throws IOException {
+ assert false : "Shouldn't be calling nextPosition() if you haven't asked for positions";
+ return -1;
+ }
+
+ @Override
public int advance(int target) throws IOException {
// Linear scan, but this is low-freq term so it won't
// be costly:
return slowAdvance(target);
}
-
+
@Override
public long cost() {
return postings.length;
@@ -1599,7 +1598,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return liveDocs == this.liveDocs;
}
- public DocsEnum reset(int[] postings) {
+ public PostingsEnum reset(int[] postings) {
this.postings = postings;
upto = -2;
return this;
@@ -1641,12 +1640,18 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
@Override
+ public int nextPosition() throws IOException {
+ assert false : "Shouldn't be calling nextPosition() if you haven't asked for positions";
+ return -1;
+ }
+
+ @Override
public int advance(int target) throws IOException {
// Linear scan, but this is low-freq term so it won't
// be costly:
return slowAdvance(target);
}
-
+
@Override
public long cost() {
return postings.length / 2;
@@ -1673,7 +1678,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return liveDocs == this.liveDocs && posMult == this.posMult;
}
- public DocsEnum reset(int[] postings) {
+ public PostingsEnum reset(int[] postings) {
this.postings = postings;
upto = -2;
freq = 0;
@@ -1688,7 +1693,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
// System.out.println(" nextDoc freq=" + freq + " upto=" + upto + " vs " + postings.length);
// }
if (liveDocs == null) {
- if (upto < postings.length) {
+ if (upto < postings.length) {
freq = postings[upto+1];
assert freq > 0;
return postings[upto];
@@ -1725,12 +1730,18 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
@Override
+ public int nextPosition() throws IOException {
+ assert false : "should be using LowFreqDocsAndPositionsEnum";
+ return -1;
+ }
+
+ @Override
public int advance(int target) throws IOException {
// Linear scan, but this is low-freq term so it won't
// be costly:
return slowAdvance(target);
}
-
+
@Override
public long cost() {
// TODO: could do a better estimate
@@ -1738,7 +1749,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
}
- private final static class LowFreqDocsAndPositionsEnum extends DocsAndPositionsEnum {
+ private final static class LowFreqPostingsEnum extends PostingsEnum {
private int[] postings;
private final Bits liveDocs;
private final int posMult;
@@ -1749,6 +1760,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
private int docID;
private int freq;
private int skipPositions;
+ private int pos;
private int startOffset;
private int endOffset;
private int lastPayloadOffset;
@@ -1756,7 +1768,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
private int payloadLength;
private byte[] payloadBytes;
- public LowFreqDocsAndPositionsEnum(Bits liveDocs, boolean hasOffsets, boolean hasPayloads) {
+ public LowFreqPostingsEnum(Bits liveDocs, boolean hasOffsets, boolean hasPayloads) {
this.liveDocs = liveDocs;
this.hasOffsets = hasOffsets;
this.hasPayloads = hasPayloads;
@@ -1773,10 +1785,11 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
}
- public DocsAndPositionsEnum reset(int[] postings, byte[] payloadBytes) {
+ public PostingsEnum reset(int[] postings, byte[] payloadBytes) {
this.postings = postings;
upto = 0;
skipPositions = 0;
+ pos = -1;
startOffset = -1;
endOffset = -1;
docID = -1;
@@ -1787,6 +1800,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
@Override
public int nextDoc() {
+ pos = -1;
if (hasPayloads) {
for(int i=0;i 0;
skipPositions--;
- final int pos = postings[upto++];
+ pos = postings[upto++];
if (hasOffsets) {
startOffset = postings[upto++];
endOffset = postings[upto++];
@@ -1859,6 +1873,16 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
@Override
+ public int startPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
public int startOffset() {
return startOffset;
}
@@ -1884,7 +1908,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return null;
}
}
-
+
@Override
public long cost() {
// TODO: could do a better estimate
@@ -1916,7 +1940,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return freqs;
}
- public DocsEnum reset(int[] docIDs, int[] freqs) {
+ public PostingsEnum reset(int[] docIDs, int[] freqs) {
this.docIDs = docIDs;
this.freqs = freqs;
docID = upto = -1;
@@ -2063,7 +2087,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return docID = docIDs[upto];
}
}
-
+
@Override
public long cost() {
return docIDs.length;
@@ -2071,7 +2095,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
}
// TODO: specialize offsets and not
- private final static class HighFreqDocsAndPositionsEnum extends DocsAndPositionsEnum {
+ private final static class HighFreqPostingsEnum extends PostingsEnum {
private int[] docIDs;
private int[] freqs;
private int[][] positions;
@@ -2084,7 +2108,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
private int posUpto;
private int[] curPositions;
- public HighFreqDocsAndPositionsEnum(Bits liveDocs, boolean hasOffsets) {
+ public HighFreqPostingsEnum(Bits liveDocs, boolean hasOffsets) {
this.liveDocs = liveDocs;
this.hasOffsets = hasOffsets;
posJump = hasOffsets ? 3 : 1;
@@ -2106,7 +2130,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return liveDocs;
}
- public DocsAndPositionsEnum reset(int[] docIDs, int[] freqs, int[][] positions, byte[][][] payloads) {
+ public PostingsEnum reset(int[] docIDs, int[] freqs, int[][] positions, byte[][][] payloads) {
this.docIDs = docIDs;
this.freqs = freqs;
this.positions = positions;
@@ -2120,7 +2144,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
upto++;
if (liveDocs == null) {
if (upto < docIDs.length) {
- posUpto = -posJump;
+ posUpto = -posJump;
curPositions = positions[upto];
return docID = docIDs[upto];
}
@@ -2151,10 +2175,21 @@ public final class DirectPostingsFormat extends PostingsFormat {
@Override
public int nextPosition() {
posUpto += posJump;
+ assert posUpto < curPositions.length;
return curPositions[posUpto];
}
@Override
+ public int startPosition() throws IOException {
+ return curPositions[posUpto];
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return startPosition();
+ }
+
+ @Override
public int startOffset() {
if (hasOffsets) {
return curPositions[posUpto+1];
@@ -2301,7 +2336,7 @@ public final class DirectPostingsFormat extends PostingsFormat {
return payload;
}
}
-
+
@Override
public long cost() {
return docIDs.length;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
index b5030ce..2ee6083 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
@@ -32,8 +32,7 @@ import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsReaderBase;
import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexFileNames;
@@ -428,20 +427,11 @@ public class FSTOrdTermsReader extends FieldsProducer {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
decodeMetaData();
return postingsReader.docs(fieldInfo, state, liveDocs, reuse, flags);
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (!hasPositions()) {
- return null;
- }
- decodeMetaData();
- return postingsReader.docsAndPositions(fieldInfo, state, liveDocs, reuse, flags);
- }
-
// TODO: this can be achieved by making use of Util.getByOutput()
// and should have related tests
@Override
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
index 23065b2..9d32d21 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
@@ -31,8 +31,7 @@ import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsReaderBase;
import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexFileNames;
@@ -291,21 +290,12 @@ public class FSTTermsReader extends FieldsProducer {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
decodeMetaData();
return postingsReader.docs(fieldInfo, state, liveDocs, reuse, flags);
}
@Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (!hasPositions()) {
- return null;
- }
- decodeMetaData();
- return postingsReader.docsAndPositions(fieldInfo, state, liveDocs, reuse, flags);
- }
-
- @Override
public void seekExact(long ord) throws IOException {
throw new UnsupportedOperationException();
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java
index f653606..ce47e4b 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryDocValuesProducer.java
@@ -31,8 +31,7 @@ import org.apache.lucene.codecs.DocValuesProducer;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexFileNames;
@@ -893,13 +892,9 @@ class MemoryDocValuesProducer extends DocValuesProducer {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
throw new UnsupportedOperationException();
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- throw new UnsupportedOperationException();
- }
}
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
index c7ce7e1..e2866a3 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
@@ -31,8 +31,8 @@ import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.codecs.TermStats;
import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Fields;
@@ -317,8 +317,8 @@ public final class MemoryPostingsFormat extends PostingsFormat {
FixedBitSet docsSeen = new FixedBitSet(state.segmentInfo.getDocCount());
long sumTotalTermFreq = 0;
long sumDocFreq = 0;
- DocsEnum docsEnum = null;
- DocsAndPositionsEnum posEnum = null;
+ PostingsEnum postingsEnum = null;
+ PostingsEnum posEnum = null;
int enumFlags;
IndexOptions indexOptions = fieldInfo.getIndexOptions();
@@ -330,18 +330,19 @@ public final class MemoryPostingsFormat extends PostingsFormat {
if (writeFreqs == false) {
enumFlags = 0;
} else if (writePositions == false) {
- enumFlags = DocsEnum.FLAG_FREQS;
+ enumFlags = PostingsEnum.FLAG_FREQS;
} else if (writeOffsets == false) {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS;
- } else {
- enumFlags = 0;
+ enumFlags = PostingsEnum.FLAG_PAYLOADS;
+ }
+ else {
+ enumFlags = PostingsEnum.FLAG_POSITIONS;
}
} else {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS | DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = PostingsEnum.FLAG_PAYLOADS | PostingsEnum.FLAG_OFFSETS;
} else {
- enumFlags = DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = PostingsEnum.FLAG_OFFSETS;
}
}
@@ -353,18 +354,18 @@ public final class MemoryPostingsFormat extends PostingsFormat {
termsWriter.postingsWriter.reset();
if (writePositions) {
- posEnum = termsEnum.docsAndPositions(null, posEnum, enumFlags);
- docsEnum = posEnum;
+ posEnum = termsEnum.postings(null, posEnum, enumFlags);
+ postingsEnum = posEnum;
} else {
- docsEnum = termsEnum.docs(null, docsEnum, enumFlags);
+ postingsEnum = termsEnum.postings(null, postingsEnum, enumFlags);
posEnum = null;
}
int docFreq = 0;
long totalTermFreq = 0;
while (true) {
- int docID = docsEnum.nextDoc();
- if (docID == DocsEnum.NO_MORE_DOCS) {
+ int docID = postingsEnum.nextDoc();
+ if (docID == PostingsEnum.NO_MORE_DOCS) {
break;
}
docsSeen.set(docID);
@@ -372,7 +373,7 @@ public final class MemoryPostingsFormat extends PostingsFormat {
int freq;
if (writeFreqs) {
- freq = docsEnum.freq();
+ freq = postingsEnum.freq();
totalTermFreq += freq;
} else {
freq = -1;
@@ -545,14 +546,14 @@ public final class MemoryPostingsFormat extends PostingsFormat {
public int freq() {
return freq;
}
-
+
@Override
public long cost() {
return numDocs;
}
}
- private final static class FSTDocsAndPositionsEnum extends DocsAndPositionsEnum {
+ private final static class FSTPostingsEnum extends PostingsEnum {
private final boolean storePayloads;
private byte[] buffer = new byte[16];
private final ByteArrayDataInput in = new ByteArrayDataInput(buffer);
@@ -572,7 +573,7 @@ public final class MemoryPostingsFormat extends PostingsFormat {
private int pos;
private final BytesRef payload = new BytesRef();
- public FSTDocsAndPositionsEnum(boolean storePayloads, boolean storeOffsets) {
+ public FSTPostingsEnum(boolean storePayloads, boolean storeOffsets) {
this.storePayloads = storePayloads;
this.storeOffsets = storeOffsets;
}
@@ -581,7 +582,7 @@ public final class MemoryPostingsFormat extends PostingsFormat {
return storePayloads == this.storePayloads && storeOffsets == this.storeOffsets;
}
- public FSTDocsAndPositionsEnum reset(BytesRef bufferIn, Bits liveDocs, int numDocs) {
+ public FSTPostingsEnum reset(BytesRef bufferIn, Bits liveDocs, int numDocs) {
assert numDocs > 0;
// System.out.println("D&P reset bytes this=" + this);
@@ -701,6 +702,16 @@ public final class MemoryPostingsFormat extends PostingsFormat {
}
@Override
+ public int startPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
public int startOffset() {
return startOffset;
}
@@ -807,7 +818,27 @@ public final class MemoryPostingsFormat extends PostingsFormat {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) {
+
+ if ((flags & PostingsEnum.FLAG_POSITIONS) >= PostingsEnum.FLAG_POSITIONS) {
+ if (field.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
+ return null;
+ }
+ boolean hasOffsets = field.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
+ decodeMetaData();
+ FSTPostingsEnum docsAndPositionsEnum;
+ if (reuse == null || !(reuse instanceof FSTPostingsEnum)) {
+ docsAndPositionsEnum = new FSTPostingsEnum(field.hasPayloads(), hasOffsets);
+ } else {
+ docsAndPositionsEnum = (FSTPostingsEnum) reuse;
+ if (!docsAndPositionsEnum.canReuse(field.hasPayloads(), hasOffsets)) {
+ docsAndPositionsEnum = new FSTPostingsEnum(field.hasPayloads(), hasOffsets);
+ }
+ }
+ //System.out.println("D&P reset this=" + this);
+ return docsAndPositionsEnum.reset(postingsSpare, liveDocs, docFreq);
+ }
+
decodeMetaData();
FSTDocsEnum docsEnum;
@@ -823,27 +854,6 @@ public final class MemoryPostingsFormat extends PostingsFormat {
}
@Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
-
- boolean hasOffsets = field.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
- if (field.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- return null;
- }
- decodeMetaData();
- FSTDocsAndPositionsEnum docsAndPositionsEnum;
- if (reuse == null || !(reuse instanceof FSTDocsAndPositionsEnum)) {
- docsAndPositionsEnum = new FSTDocsAndPositionsEnum(field.hasPayloads(), hasOffsets);
- } else {
- docsAndPositionsEnum = (FSTDocsAndPositionsEnum) reuse;
- if (!docsAndPositionsEnum.canReuse(field.hasPayloads(), hasOffsets)) {
- docsAndPositionsEnum = new FSTDocsAndPositionsEnum(field.hasPayloads(), hasOffsets);
- }
- }
- //System.out.println("D&P reset this=" + this);
- return docsAndPositionsEnum.reset(postingsSpare, liveDocs, docFreq);
- }
-
- @Override
public BytesRef term() {
return current.input;
}
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
index 3a7591c..888d0f8 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsReader.java
@@ -17,16 +17,6 @@ package org.apache.lucene.codecs.simpletext;
* limitations under the License.
*/
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.DOC;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.END;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.END_OFFSET;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.FIELD;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.FREQ;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.PAYLOAD;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.POS;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.START_OFFSET;
-import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.TERM;
-
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Collection;
@@ -37,11 +27,11 @@ import java.util.Map;
import java.util.TreeMap;
import org.apache.lucene.codecs.FieldsProducer;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
@@ -68,11 +58,21 @@ import org.apache.lucene.util.fst.PairOutputs;
import org.apache.lucene.util.fst.PositiveIntOutputs;
import org.apache.lucene.util.fst.Util;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.DOC;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.END;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.END_OFFSET;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.FIELD;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.FREQ;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.PAYLOAD;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.POS;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.START_OFFSET;
+import static org.apache.lucene.codecs.simpletext.SimpleTextFieldsWriter.TERM;
+
class SimpleTextFieldsReader extends FieldsProducer {
private static final long BASE_RAM_BYTES_USED =
- RamUsageEstimator.shallowSizeOfInstance(SimpleTextFieldsReader.class)
- + RamUsageEstimator.shallowSizeOfInstance(TreeMap.class);
+ RamUsageEstimator.shallowSizeOfInstance(SimpleTextFieldsReader.class)
+ + RamUsageEstimator.shallowSizeOfInstance(TreeMap.class);
private final TreeMap fields;
private final IndexInput in;
@@ -93,12 +93,12 @@ class SimpleTextFieldsReader extends FieldsProducer {
}
}
}
-
+
private TreeMap readFields(IndexInput in) throws IOException {
ChecksumIndexInput input = new BufferedChecksumIndexInput(in);
BytesRefBuilder scratch = new BytesRefBuilder();
TreeMap fields = new TreeMap<>();
-
+
while (true) {
SimpleTextUtil.readLine(input, scratch);
if (scratch.get().equals(END)) {
@@ -206,9 +206,26 @@ class SimpleTextFieldsReader extends FieldsProducer {
public long totalTermFreq() {
return indexOptions == IndexOptions.DOCS ? -1 : totalTermFreq;
}
-
+
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
+
+ if (PostingsEnum.requiresPositions(flags)) {
+ if (indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
+ // Positions were not indexed
+ return null;
+ }
+
+ SimpleTextPostingsEnum docsAndPositionsEnum;
+ if (reuse != null && reuse instanceof SimpleTextPostingsEnum && ((SimpleTextPostingsEnum) reuse).canReuse(SimpleTextFieldsReader.this.in)) {
+ docsAndPositionsEnum = (SimpleTextPostingsEnum) reuse;
+ } else {
+ docsAndPositionsEnum = new SimpleTextPostingsEnum();
+ }
+ return docsAndPositionsEnum.reset(docsStart, liveDocs, indexOptions, docFreq);
+
+ }
+
SimpleTextDocsEnum docsEnum;
if (reuse != null && reuse instanceof SimpleTextDocsEnum && ((SimpleTextDocsEnum) reuse).canReuse(SimpleTextFieldsReader.this.in)) {
docsEnum = (SimpleTextDocsEnum) reuse;
@@ -218,22 +235,6 @@ class SimpleTextFieldsReader extends FieldsProducer {
return docsEnum.reset(docsStart, liveDocs, indexOptions == IndexOptions.DOCS, docFreq);
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
-
- if (indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- // Positions were not indexed
- return null;
- }
-
- SimpleTextDocsAndPositionsEnum docsAndPositionsEnum;
- if (reuse != null && reuse instanceof SimpleTextDocsAndPositionsEnum && ((SimpleTextDocsAndPositionsEnum) reuse).canReuse(SimpleTextFieldsReader.this.in)) {
- docsAndPositionsEnum = (SimpleTextDocsAndPositionsEnum) reuse;
- } else {
- docsAndPositionsEnum = new SimpleTextDocsAndPositionsEnum();
- }
- return docsAndPositionsEnum.reset(docsStart, liveDocs, indexOptions, docFreq);
- }
}
private class SimpleTextDocsEnum extends DocsEnum {
@@ -246,7 +247,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
private final BytesRefBuilder scratch = new BytesRefBuilder();
private final CharsRefBuilder scratchUTF16 = new CharsRefBuilder();
private int cost;
-
+
public SimpleTextDocsEnum() {
this.inStart = SimpleTextFieldsReader.this.in;
this.in = this.inStart.clone();
@@ -277,6 +278,12 @@ class SimpleTextFieldsReader extends FieldsProducer {
}
@Override
+ public int nextPosition() throws IOException {
+ assert false : "Shouldn't be calling nextPosition() if you haven't asked for positions";
+ return -1;
+ }
+
+ @Override
public int nextDoc() throws IOException {
if (docID == NO_MORE_DOCS) {
return docID;
@@ -328,14 +335,14 @@ class SimpleTextFieldsReader extends FieldsProducer {
// Naive -- better to index skip data
return slowAdvance(target);
}
-
+
@Override
public long cost() {
return cost;
}
}
- private class SimpleTextDocsAndPositionsEnum extends DocsAndPositionsEnum {
+ private class SimpleTextPostingsEnum extends PostingsEnum {
private final IndexInput inStart;
private final IndexInput in;
private int docID = -1;
@@ -345,6 +352,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
private final BytesRefBuilder scratch2 = new BytesRefBuilder();
private final CharsRefBuilder scratchUTF16 = new CharsRefBuilder();
private final CharsRefBuilder scratchUTF16_2 = new CharsRefBuilder();
+ private int pos;
private BytesRef payload;
private long nextDocStart;
private boolean readOffsets;
@@ -353,7 +361,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
private int endOffset;
private int cost;
- public SimpleTextDocsAndPositionsEnum() {
+ public SimpleTextPostingsEnum() {
this.inStart = SimpleTextFieldsReader.this.in;
this.in = inStart.clone();
}
@@ -362,7 +370,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
return in == inStart;
}
- public SimpleTextDocsAndPositionsEnum reset(long fp, Bits liveDocs, IndexOptions indexOptions, int docFreq) {
+ public SimpleTextPostingsEnum reset(long fp, Bits liveDocs, IndexOptions indexOptions, int docFreq) {
this.liveDocs = liveDocs;
nextDocStart = fp;
docID = -1;
@@ -437,7 +445,6 @@ class SimpleTextFieldsReader extends FieldsProducer {
@Override
public int nextPosition() throws IOException {
- final int pos;
if (readPositions) {
SimpleTextUtil.readLine(in, scratch);
assert StringHelper.startsWith(scratch.get(), POS): "got line=" + scratch.get().utf8ToString();
@@ -475,6 +482,16 @@ class SimpleTextFieldsReader extends FieldsProducer {
}
@Override
+ public int startPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
public int startOffset() throws IOException {
return startOffset;
}
@@ -488,7 +505,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
public BytesRef getPayload() {
return payload;
}
-
+
@Override
public long cost() {
return cost;
@@ -506,9 +523,9 @@ class SimpleTextFieldsReader extends FieldsProducer {
}
private static final long TERMS_BASE_RAM_BYTES_USED =
- RamUsageEstimator.shallowSizeOfInstance(SimpleTextTerms.class)
- + RamUsageEstimator.shallowSizeOfInstance(BytesRef.class)
- + RamUsageEstimator.shallowSizeOfInstance(CharsRef.class);
+ RamUsageEstimator.shallowSizeOfInstance(SimpleTextTerms.class)
+ + RamUsageEstimator.shallowSizeOfInstance(BytesRef.class)
+ + RamUsageEstimator.shallowSizeOfInstance(CharsRef.class);
private class SimpleTextTerms extends Terms implements Accountable {
private final long termsStart;
private final FieldInfo fieldInfo;
@@ -533,7 +550,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
final Builder>> b;
final PairOutputs outputsInner = new PairOutputs<>(posIntOutputs, posIntOutputs);
final PairOutputs> outputs = new PairOutputs<>(posIntOutputs,
- outputsInner);
+ outputsInner);
b = new Builder<>(FST.INPUT_TYPE.BYTE1, outputs);
IndexInput in = SimpleTextFieldsReader.this.in.clone();
in.seek(termsStart);
@@ -548,8 +565,8 @@ class SimpleTextFieldsReader extends FieldsProducer {
if (scratch.get().equals(END) || StringHelper.startsWith(scratch.get(), FIELD)) {
if (lastDocsStart != -1) {
b.add(Util.toIntsRef(lastTerm.get(), scratchIntsRef),
- outputs.newPair(lastDocsStart,
- outputsInner.newPair((long) docFreq, totalTermFreq)));
+ outputs.newPair(lastDocsStart,
+ outputsInner.newPair((long) docFreq, totalTermFreq)));
sumTotalTermFreq += totalTermFreq;
}
break;
@@ -565,7 +582,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
} else if (StringHelper.startsWith(scratch.get(), TERM)) {
if (lastDocsStart != -1) {
b.add(Util.toIntsRef(lastTerm.get(), scratchIntsRef), outputs.newPair(lastDocsStart,
- outputsInner.newPair((long) docFreq, totalTermFreq)));
+ outputsInner.newPair((long) docFreq, totalTermFreq)));
}
lastDocsStart = in.getFilePointer();
final int len = scratch.length() - TERM.length;
@@ -652,7 +669,7 @@ class SimpleTextFieldsReader extends FieldsProducer {
public boolean hasPositions() {
return fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
}
-
+
@Override
public boolean hasPayloads() {
return fieldInfo.hasPayloads();
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
index 656713d..436a204 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
@@ -20,8 +20,7 @@ package org.apache.lucene.codecs.simpletext;
import java.io.IOException;
import org.apache.lucene.codecs.FieldsConsumer;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.Fields;
@@ -33,7 +32,7 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
class SimpleTextFieldsWriter extends FieldsConsumer {
-
+
private IndexOutput out;
private final BytesRefBuilder scratch = new BytesRefBuilder();
private final SegmentWriteState writeState;
@@ -79,22 +78,21 @@ class SimpleTextFieldsWriter extends FieldsConsumer {
int flags = 0;
if (hasPositions) {
-
+ flags = PostingsEnum.FLAG_POSITIONS;
if (hasPayloads) {
- flags = flags | DocsAndPositionsEnum.FLAG_PAYLOADS;
+ flags = flags | PostingsEnum.FLAG_PAYLOADS;
}
if (hasOffsets) {
- flags = flags | DocsAndPositionsEnum.FLAG_OFFSETS;
+ flags = flags | PostingsEnum.FLAG_OFFSETS;
}
} else {
if (hasFreqs) {
- flags = flags | DocsEnum.FLAG_FREQS;
+ flags = flags | PostingsEnum.FLAG_FREQS;
}
}
TermsEnum termsEnum = terms.iterator(null);
- DocsAndPositionsEnum posEnum = null;
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
// for each term in field
while(true) {
@@ -103,20 +101,16 @@ class SimpleTextFieldsWriter extends FieldsConsumer {
break;
}
- if (hasPositions) {
- posEnum = termsEnum.docsAndPositions(null, posEnum, flags);
- docsEnum = posEnum;
- } else {
- docsEnum = termsEnum.docs(null, docsEnum, flags);
- }
- assert docsEnum != null: "termsEnum=" + termsEnum + " hasPos=" + hasPositions + " flags=" + flags;
+ postingsEnum = termsEnum.postings(null, postingsEnum, flags);
+
+ assert postingsEnum != null: "termsEnum=" + termsEnum + " hasPos=" + hasPositions + " flags=" + flags;
boolean wroteTerm = false;
// for each doc in field+term
while(true) {
- int doc = docsEnum.nextDoc();
- if (doc == DocsEnum.NO_MORE_DOCS) {
+ int doc = postingsEnum.nextDoc();
+ if (doc == PostingsEnum.NO_MORE_DOCS) {
break;
}
@@ -143,7 +137,7 @@ class SimpleTextFieldsWriter extends FieldsConsumer {
write(Integer.toString(doc));
newline();
if (hasFreqs) {
- int freq = docsEnum.freq();
+ int freq = postingsEnum.freq();
write(FREQ);
write(Integer.toString(freq));
newline();
@@ -154,15 +148,15 @@ class SimpleTextFieldsWriter extends FieldsConsumer {
// for each pos in field+term+doc
for(int i=0;i= startOffset;
assert startOffset >= lastStartOffset: "startOffset=" + startOffset + " lastStartOffset=" + lastStartOffset;
lastStartOffset = startOffset;
@@ -174,7 +168,7 @@ class SimpleTextFieldsWriter extends FieldsConsumer {
newline();
}
- BytesRef payload = posEnum.getPayload();
+ BytesRef payload = postingsEnum.getPayload();
if (payload != null && payload.length > 0) {
assert payload.length != 0;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
index 641ff6c..f87df0b 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextTermVectorsReader.java
@@ -25,8 +25,8 @@ import java.util.SortedMap;
import java.util.TreeMap;
import org.apache.lucene.codecs.TermVectorsReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.SegmentInfo;
@@ -59,15 +59,15 @@ import static org.apache.lucene.codecs.simpletext.SimpleTextTermVectorsWriter.*;
public class SimpleTextTermVectorsReader extends TermVectorsReader {
private static final long BASE_RAM_BYTES_USED =
- RamUsageEstimator.shallowSizeOfInstance(SimpleTextTermVectorsReader.class)
- + RamUsageEstimator.shallowSizeOfInstance(BytesRef.class)
- + RamUsageEstimator.shallowSizeOfInstance(CharsRef.class);
+ RamUsageEstimator.shallowSizeOfInstance(SimpleTextTermVectorsReader.class)
+ + RamUsageEstimator.shallowSizeOfInstance(BytesRef.class)
+ + RamUsageEstimator.shallowSizeOfInstance(CharsRef.class);
private long offsets[]; /* docid -> offset in .vec file */
private IndexInput in;
private BytesRefBuilder scratch = new BytesRefBuilder();
private CharsRefBuilder scratchUTF16 = new CharsRefBuilder();
-
+
public SimpleTextTermVectorsReader(Directory directory, SegmentInfo si, IOContext context) throws IOException {
boolean success = false;
try {
@@ -82,15 +82,15 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
}
readIndex(si.getDocCount());
}
-
+
// used by clone
SimpleTextTermVectorsReader(long offsets[], IndexInput in) {
this.offsets = offsets;
this.in = in;
}
-
- // we don't actually write a .tvx-like index, instead we read the
- // vectors file in entirety up-front and save the offsets
+
+ // we don't actually write a .tvx-like index, instead we read the
+ // vectors file in entirety up-front and save the offsets
// so we can seek to the data later.
private void readIndex(int maxDoc) throws IOException {
ChecksumIndexInput input = new BufferedChecksumIndexInput(in);
@@ -106,7 +106,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
SimpleTextUtil.checkFooter(input);
assert upto == offsets.length;
}
-
+
@Override
public Fields get(int doc) throws IOException {
SortedMap fields = new TreeMap<>();
@@ -122,30 +122,30 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
assert StringHelper.startsWith(scratch.get(), FIELD);
// skip fieldNumber:
parseIntAt(FIELD.length);
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), FIELDNAME);
String fieldName = readString(FIELDNAME.length, scratch);
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), FIELDPOSITIONS);
boolean positions = Boolean.parseBoolean(readString(FIELDPOSITIONS.length, scratch));
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), FIELDOFFSETS);
boolean offsets = Boolean.parseBoolean(readString(FIELDOFFSETS.length, scratch));
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), FIELDPAYLOADS);
boolean payloads = Boolean.parseBoolean(readString(FIELDPAYLOADS.length, scratch));
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), FIELDTERMCOUNT);
int termCount = parseIntAt(FIELDTERMCOUNT.length);
-
+
SimpleTVTerms terms = new SimpleTVTerms(offsets, positions, payloads);
fields.put(fieldName, terms);
-
+
BytesRefBuilder term = new BytesRefBuilder();
for (int j = 0; j < termCount; j++) {
readLine();
@@ -154,14 +154,14 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
term.grow(termLength);
term.setLength(termLength);
System.arraycopy(scratch.bytes(), TERMTEXT.length, term.bytes(), 0, termLength);
-
+
SimpleTVPostings postings = new SimpleTVPostings();
terms.terms.put(term.toBytesRef(), postings);
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), TERMFREQ);
postings.freq = parseIntAt(TERMFREQ.length);
-
+
if (positions || offsets) {
if (positions) {
postings.positions = new int[postings.freq];
@@ -169,12 +169,12 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
postings.payloads = new BytesRef[postings.freq];
}
}
-
+
if (offsets) {
postings.startOffsets = new int[postings.freq];
postings.endOffsets = new int[postings.freq];
}
-
+
for (int k = 0; k < postings.freq; k++) {
if (positions) {
readLine();
@@ -192,12 +192,12 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
}
}
}
-
+
if (offsets) {
readLine();
assert StringHelper.startsWith(scratch.get(), STARTOFFSET);
postings.startOffsets[k] = parseIntAt(STARTOFFSET.length);
-
+
readLine();
assert StringHelper.startsWith(scratch.get(), ENDOFFSET);
postings.endOffsets[k] = parseIntAt(ENDOFFSET.length);
@@ -216,11 +216,11 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
}
return new SimpleTextTermVectorsReader(offsets, in.clone());
}
-
+
@Override
public void close() throws IOException {
try {
- IOUtils.close(in);
+ IOUtils.close(in);
} finally {
in = null;
offsets = null;
@@ -230,20 +230,20 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
private void readLine() throws IOException {
SimpleTextUtil.readLine(in, scratch);
}
-
+
private int parseIntAt(int offset) {
scratchUTF16.copyUTF8Bytes(scratch.bytes(), offset, scratch.length()-offset);
return ArrayUtil.parseInt(scratchUTF16.chars(), 0, scratchUTF16.length());
}
-
+
private String readString(int offset, BytesRefBuilder scratch) {
scratchUTF16.copyUTF8Bytes(scratch.bytes(), offset, scratch.length()-offset);
return scratchUTF16.toString();
}
-
+
private class SimpleTVFields extends Fields {
private final SortedMap fields;
-
+
SimpleTVFields(SortedMap fields) {
this.fields = fields;
}
@@ -263,20 +263,20 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
return fields.size();
}
}
-
+
private static class SimpleTVTerms extends Terms {
final SortedMap terms;
final boolean hasOffsets;
final boolean hasPositions;
final boolean hasPayloads;
-
+
SimpleTVTerms(boolean hasOffsets, boolean hasPositions, boolean hasPayloads) {
this.hasOffsets = hasOffsets;
this.hasPositions = hasPositions;
this.hasPayloads = hasPayloads;
terms = new TreeMap<>();
}
-
+
@Override
public TermsEnum iterator(TermsEnum reuse) throws IOException {
// TODO: reuse
@@ -317,13 +317,13 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
public boolean hasPositions() {
return hasPositions;
}
-
+
@Override
public boolean hasPayloads() {
return hasPayloads;
}
}
-
+
private static class SimpleTVPostings {
private int freq;
private int positions[];
@@ -331,17 +331,17 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
private int endOffsets[];
private BytesRef payloads[];
}
-
+
private static class SimpleTVTermsEnum extends TermsEnum {
SortedMap terms;
Iterator> iterator;
Map.Entry current;
-
+
SimpleTVTermsEnum(SortedMap terms) {
this.terms = terms;
this.iterator = terms.entrySet().iterator();
}
-
+
@Override
public SeekStatus seekCeil(BytesRef text) throws IOException {
iterator = terms.tailMap(text).entrySet().iterator();
@@ -388,26 +388,27 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
- // TODO: reuse
- SimpleTVDocsEnum e = new SimpleTVDocsEnum();
- e.reset(liveDocs, (flags & DocsEnum.FLAG_FREQS) == 0 ? 1 : current.getValue().freq);
- return e;
- }
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- SimpleTVPostings postings = current.getValue();
- if (postings.positions == null && postings.startOffsets == null) {
- return null;
+ if (PostingsEnum.requiresPositions(flags)) {
+ SimpleTVPostings postings = current.getValue();
+ if (postings.positions == null && postings.startOffsets == null) {
+ return null;
+ }
+ // TODO: reuse
+ SimpleTVPostingsEnum e = new SimpleTVPostingsEnum();
+ e.reset(liveDocs, postings.positions, postings.startOffsets, postings.endOffsets, postings.payloads);
+ return e;
}
+
// TODO: reuse
- SimpleTVDocsAndPositionsEnum e = new SimpleTVDocsAndPositionsEnum();
- e.reset(liveDocs, postings.positions, postings.startOffsets, postings.endOffsets, postings.payloads);
+ SimpleTVDocsEnum e = new SimpleTVDocsEnum();
+ e.reset(liveDocs, (flags & PostingsEnum.FLAG_FREQS) == 0 ? 1 : current.getValue().freq);
return e;
}
+
}
-
+
// note: these two enum classes are exactly like the Default impl...
private static class SimpleTVDocsEnum extends DocsEnum {
private boolean didNext;
@@ -422,6 +423,12 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
}
@Override
+ public int nextPosition() throws IOException {
+ assert false;
+ return -1;
+ }
+
+ @Override
public int docID() {
return doc;
}
@@ -447,14 +454,14 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
this.doc = -1;
didNext = false;
}
-
+
@Override
public long cost() {
return 1;
}
}
-
- private static class SimpleTVDocsAndPositionsEnum extends DocsAndPositionsEnum {
+
+ private static class SimpleTVPostingsEnum extends PostingsEnum {
private boolean didNext;
private int doc = -1;
private int nextPos;
@@ -512,17 +519,27 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
@Override
public int nextPosition() {
- assert (positions != null && nextPos < positions.length) ||
- startOffsets != null && nextPos < startOffsets.length;
if (positions != null) {
+ assert nextPos < positions.length;
return positions[nextPos++];
} else {
+ assert nextPos < startOffsets.length;
nextPos++;
return -1;
}
}
@Override
+ public int startPosition() throws IOException {
+ return positions[nextPos-1];
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return positions[nextPos-1];
+ }
+
+ @Override
public int startOffset() {
if (startOffsets == null) {
return -1;
@@ -539,7 +556,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
return endOffsets[nextPos-1];
}
}
-
+
@Override
public long cost() {
return 1;
@@ -550,7 +567,7 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
public long ramBytesUsed() {
return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(offsets);
}
-
+
@Override
public String toString() {
return getClass().getSimpleName();
diff --git a/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestDirectPostingsFormat.java b/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestDirectPostingsFormat.java
index 3240903..afd261c 100644
--- a/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestDirectPostingsFormat.java
+++ b/lucene/codecs/src/test/org/apache/lucene/codecs/memory/TestDirectPostingsFormat.java
@@ -29,6 +29,11 @@ public class TestDirectPostingsFormat extends BasePostingsFormatTestCase {
private final Codec codec = TestUtil.alwaysPostingsFormat(new DirectPostingsFormat());
@Override
+ protected boolean isPostingsEnumReuseImplemented() {
+ return false;
+ }
+
+ @Override
protected Codec getCodec() {
return codec;
}
diff --git a/lucene/common-build.xml b/lucene/common-build.xml
index 478ead5..c170c1e 100644
--- a/lucene/common-build.xml
+++ b/lucene/common-build.xml
@@ -165,7 +165,7 @@
-
+
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/Token.java b/lucene/core/src/java/org/apache/lucene/analysis/Token.java
index f0a66f5..cdb8482 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/Token.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/Token.java
@@ -20,7 +20,7 @@ package org.apache.lucene.analysis;
import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.index.DocsAndPositionsEnum; // for javadoc
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.AttributeFactory;
import org.apache.lucene.util.AttributeImpl;
@@ -43,7 +43,7 @@ import org.apache.lucene.util.BytesRef;
with type "eos". The default token type is "word".
A Token can optionally have metadata (a.k.a. payload) in the form of a variable
- length byte array. Use {@link DocsAndPositionsEnum#getPayload()} to retrieve the
+ length byte array. Use {@link org.apache.lucene.index.PostingsEnum#getPayload()} to retrieve the
payloads from the index.
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java
index daf6d00..f4d09a6 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PayloadAttribute.java
@@ -17,7 +17,6 @@ package org.apache.lucene.analysis.tokenattributes;
* limitations under the License.
*/
-import org.apache.lucene.index.DocsAndPositionsEnum; // javadocs
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.BytesRef;
@@ -33,7 +32,7 @@ import org.apache.lucene.util.BytesRef;
* best to use the minimum number of bytes necessary. Some codec implementations
* may optimize payload storage when all payloads have the same length.
*
- * @see DocsAndPositionsEnum
+ * @see org.apache.lucene.index.PostingsEnum
*/
public interface PayloadAttribute extends Attribute {
/**
diff --git a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java
index 9afd2f9..807987b 100644
--- a/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java
+++ b/lucene/core/src/java/org/apache/lucene/analysis/tokenattributes/PositionIncrementAttribute.java
@@ -43,7 +43,7 @@ import org.apache.lucene.util.Attribute;
*
*
*
- * @see org.apache.lucene.index.DocsAndPositionsEnum
+ * @see org.apache.lucene.index.PostingsEnum
*/
public interface PositionIncrementAttribute extends Attribute {
/** Set the position increment. The default value is one.
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java b/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java
index e6d7a92..0f133e1 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/BlockTermState.java
@@ -16,13 +16,12 @@ package org.apache.lucene.codecs;
* limitations under the License.
*/
-import org.apache.lucene.index.DocsEnum; // javadocs
import org.apache.lucene.index.OrdTermState;
import org.apache.lucene.index.TermState;
/**
* Holds all state required for {@link PostingsReaderBase}
- * to produce a {@link DocsEnum} without re-seeking the
+ * to produce a {@link org.apache.lucene.index.PostingsEnum} without re-seeking the
* terms dict.
*/
public class BlockTermState extends OrdTermState {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java b/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java
index 5681c19..6a58ce2 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java
@@ -20,8 +20,7 @@ package org.apache.lucene.codecs;
import java.io.Closeable;
import java.io.IOException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.store.DataInput;
@@ -31,8 +30,8 @@ import org.apache.lucene.util.Bits;
/** The core terms dictionaries (BlockTermsReader,
* BlockTreeTermsReader) interact with a single instance
- * of this class to manage creation of {@link DocsEnum} and
- * {@link DocsAndPositionsEnum} instances. It provides an
+ * of this class to manage creation of {@link org.apache.lucene.index.PostingsEnum} and
+ * {@link org.apache.lucene.index.PostingsEnum} instances. It provides an
* IndexInput (termsIn) where this class may read any
* previously stored data that it had written in its
* corresponding {@link PostingsWriterBase} at indexing
@@ -66,12 +65,7 @@ public abstract class PostingsReaderBase implements Closeable, Accountable {
/** Must fully consume state, since after this call that
* TermState may be reused. */
- public abstract DocsEnum docs(FieldInfo fieldInfo, BlockTermState state, Bits skipDocs, DocsEnum reuse, int flags) throws IOException;
-
- /** Must fully consume state, since after this call that
- * TermState may be reused. */
- public abstract DocsAndPositionsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState state, Bits skipDocs, DocsAndPositionsEnum reuse,
- int flags) throws IOException;
+ public abstract PostingsEnum docs(FieldInfo fieldInfo, BlockTermState state, Bits skipDocs, PostingsEnum reuse, int flags) throws IOException;
/**
* Checks consistency of this reader.
@@ -81,7 +75,7 @@ public abstract class PostingsReaderBase implements Closeable, Accountable {
* @lucene.internal
*/
public abstract void checkIntegrity() throws IOException;
-
+
@Override
public abstract void close() throws IOException;
}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java b/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java
index 0dc7bb5..4df8f4e 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java
@@ -17,12 +17,7 @@ package org.apache.lucene.codecs;
* limitations under the License.
*/
-import java.io.Closeable;
-import java.io.IOException;
-
import org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter;
-import org.apache.lucene.index.DocsAndPositionsEnum; // javadocs
-import org.apache.lucene.index.DocsEnum; // javadocs
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.SegmentWriteState;
import org.apache.lucene.index.TermsEnum;
@@ -31,6 +26,9 @@ import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
+import java.io.Closeable;
+import java.io.IOException;
+
/**
* Class that plugs into term dictionaries, such as {@link
* BlockTreeTermsWriter}, and handles writing postings.
@@ -54,8 +52,8 @@ public abstract class PostingsWriterBase implements Closeable {
public abstract void init(IndexOutput termsOut, SegmentWriteState state) throws IOException;
/** Write all postings for one term; use the provided
- * {@link TermsEnum} to pull a {@link DocsEnum} or {@link
- * DocsAndPositionsEnum}. This method should not
+ * {@link TermsEnum} to pull a {@link org.apache.lucene.index.PostingsEnum}.
+ * This method should not
* re-position the {@code TermsEnum}! It is already
* positioned on the term that should be written. This
* method must set the bit in the provided {@link
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java b/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java
index 35ebba1..10f9032 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java
@@ -19,8 +19,7 @@ package org.apache.lucene.codecs;
import java.io.IOException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.TermsEnum;
@@ -42,8 +41,7 @@ import org.apache.lucene.util.FixedBitSet;
public abstract class PushPostingsWriterBase extends PostingsWriterBase {
// Reused in writeTerm
- private DocsEnum docsEnum;
- private DocsAndPositionsEnum posEnum;
+ private PostingsEnum postingsEnum;
private int enumFlags;
/** {@link FieldInfo} of current field being written. */
@@ -100,18 +98,18 @@ public abstract class PushPostingsWriterBase extends PostingsWriterBase {
if (writeFreqs == false) {
enumFlags = 0;
} else if (writePositions == false) {
- enumFlags = DocsEnum.FLAG_FREQS;
+ enumFlags = PostingsEnum.FLAG_FREQS;
} else if (writeOffsets == false) {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS;
+ enumFlags = PostingsEnum.FLAG_PAYLOADS;
} else {
- enumFlags = 0;
+ enumFlags = PostingsEnum.FLAG_POSITIONS;
}
} else {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS | DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = PostingsEnum.FLAG_PAYLOADS | PostingsEnum.FLAG_OFFSETS;
} else {
- enumFlags = DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = PostingsEnum.FLAG_OFFSETS;
}
}
@@ -121,26 +119,21 @@ public abstract class PushPostingsWriterBase extends PostingsWriterBase {
@Override
public final BlockTermState writeTerm(BytesRef term, TermsEnum termsEnum, FixedBitSet docsSeen) throws IOException {
startTerm();
- if (writePositions == false) {
- docsEnum = termsEnum.docs(null, docsEnum, enumFlags);
- } else {
- posEnum = termsEnum.docsAndPositions(null, posEnum, enumFlags);
- docsEnum = posEnum;
- }
- assert docsEnum != null;
+ postingsEnum = termsEnum.postings(null, postingsEnum, enumFlags);
+ assert postingsEnum != null;
int docFreq = 0;
long totalTermFreq = 0;
while (true) {
- int docID = docsEnum.nextDoc();
- if (docID == DocsEnum.NO_MORE_DOCS) {
+ int docID = postingsEnum.nextDoc();
+ if (docID == PostingsEnum.NO_MORE_DOCS) {
break;
}
docFreq++;
docsSeen.set(docID);
int freq;
if (writeFreqs) {
- freq = docsEnum.freq();
+ freq = postingsEnum.freq();
totalTermFreq += freq;
} else {
freq = -1;
@@ -149,13 +142,13 @@ public abstract class PushPostingsWriterBase extends PostingsWriterBase {
if (writePositions) {
for(int i=0;i= 0;
+ assert !hasPositions || pos >= 0 ;
addPosition(pos, startOffset, endOffset, payload);
}
}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
index 952d226..3130cac 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
@@ -19,9 +19,7 @@ package org.apache.lucene.codecs.blocktree;
import java.io.IOException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.IndexInput;
@@ -203,22 +201,11 @@ final class IntersectTermsEnum extends TermsEnum {
}
@Override
- public DocsEnum docs(Bits skipDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits skipDocs, PostingsEnum reuse, int flags) throws IOException {
currentFrame.decodeMetaData();
return fr.parent.postingsReader.docs(fr.fieldInfo, currentFrame.termState, skipDocs, reuse, flags);
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (fr.fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- // Positions were not indexed:
- return null;
- }
-
- currentFrame.decodeMetaData();
- return fr.parent.postingsReader.docsAndPositions(fr.fieldInfo, currentFrame.termState, skipDocs, reuse, flags);
- }
-
private int getState() {
int state = currentFrame.state;
for(int idx=0;idx
- * NOTE: if the {@link DocsEnum} was obtain with {@link #FLAG_NONE},
- * the result of this method is undefined.
- */
- public abstract int freq() throws IOException;
-
- /** Returns the related attributes. */
- public AttributeSource attributes() {
- if (atts == null) atts = new AttributeSource();
- return atts;
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
}
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java b/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
index 47422a9..a8996db 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FilterLeafReader.java
@@ -17,14 +17,14 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import java.io.IOException;
-import java.util.Iterator;
-
import org.apache.lucene.search.CachingWrapperFilter;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
+import java.io.IOException;
+import java.util.Iterator;
+
/** A FilterLeafReader contains another LeafReader, which it
* uses as its basic source of data, possibly transforming the data along the
* way or providing additional functionality. The class
@@ -215,26 +215,22 @@ public class FilterLeafReader extends LeafReader {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
- return in.docs(liveDocs, reuse, flags);
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
+ return in.postings(liveDocs, reuse, flags);
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- return in.docsAndPositions(liveDocs, reuse, flags);
- }
}
- /** Base class for filtering {@link DocsEnum} implementations. */
- public static class FilterDocsEnum extends DocsEnum {
+ /** Base class for filtering {@link PostingsEnum} implementations. */
+ public static class FilterDocsEnum extends PostingsEnum {
/** The underlying DocsEnum instance. */
- protected final DocsEnum in;
+ protected final PostingsEnum in;
/**
* Create a new FilterDocsEnum
* @param in the underlying DocsEnum instance.
*/
- public FilterDocsEnum(DocsEnum in) {
+ public FilterDocsEnum(PostingsEnum in) {
if (in == null) {
throw new NullPointerException("incoming DocsEnum cannot be null");
}
@@ -267,55 +263,18 @@ public class FilterLeafReader extends LeafReader {
}
@Override
- public long cost() {
- return in.cost();
- }
- }
-
- /** Base class for filtering {@link DocsAndPositionsEnum} implementations. */
- public static class FilterDocsAndPositionsEnum extends DocsAndPositionsEnum {
- /** The underlying DocsAndPositionsEnum instance. */
- protected final DocsAndPositionsEnum in;
-
- /**
- * Create a new FilterDocsAndPositionsEnum
- * @param in the underlying DocsAndPositionsEnum instance.
- */
- public FilterDocsAndPositionsEnum(DocsAndPositionsEnum in) {
- if (in == null) {
- throw new NullPointerException("incoming DocsAndPositionsEnum cannot be null");
- }
- this.in = in;
- }
-
- @Override
- public AttributeSource attributes() {
- return in.attributes();
- }
-
- @Override
- public int docID() {
- return in.docID();
- }
-
- @Override
- public int freq() throws IOException {
- return in.freq();
- }
-
- @Override
- public int nextDoc() throws IOException {
- return in.nextDoc();
+ public int nextPosition() throws IOException {
+ return in.nextPosition();
}
@Override
- public int advance(int target) throws IOException {
- return in.advance(target);
+ public int startPosition() throws IOException {
+ return in.startPosition();
}
@Override
- public int nextPosition() throws IOException {
- return in.nextPosition();
+ public int endPosition() throws IOException {
+ return in.endPosition();
}
@Override
@@ -332,7 +291,7 @@ public class FilterLeafReader extends LeafReader {
public BytesRef getPayload() throws IOException {
return in.getPayload();
}
-
+
@Override
public long cost() {
return in.cost();
diff --git a/lucene/core/src/java/org/apache/lucene/index/FilteredTermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/FilteredTermsEnum.java
index b6bfcc4..85bde39 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FilteredTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FilteredTermsEnum.java
@@ -179,13 +179,8 @@ public abstract class FilteredTermsEnum extends TermsEnum {
}
@Override
- public DocsEnum docs(Bits bits, DocsEnum reuse, int flags) throws IOException {
- return tenum.docs(bits, reuse, flags);
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits bits, DocsAndPositionsEnum reuse, int flags) throws IOException {
- return tenum.docsAndPositions(bits, reuse, flags);
+ public PostingsEnum postings(Bits bits, PostingsEnum reuse, int flags) throws IOException {
+ return tenum.postings(bits, reuse, flags);
}
/** This enum does not support seeking!
diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
index fe5d31f..b721d32 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxFields.java
@@ -24,7 +24,7 @@ import java.util.List;
import java.util.Map;
import org.apache.lucene.index.FreqProxTermsWriterPerField.FreqProxPostingsArray;
-import org.apache.lucene.util.AttributeSource; // javadocs
+import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
@@ -230,14 +230,41 @@ class FreqProxFields extends Fields {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) {
if (liveDocs != null) {
throw new IllegalArgumentException("liveDocs must be null");
}
+ if ((flags & PostingsEnum.FLAG_POSITIONS) >= PostingsEnum.FLAG_POSITIONS) {
+ FreqProxPostingsEnum posEnum;
+
+ if (!terms.hasProx) {
+ // Caller wants positions but we didn't index them;
+ // don't lie:
+ throw new IllegalArgumentException("did not index positions");
+ }
+
+ if (!terms.hasOffsets && (flags & PostingsEnum.FLAG_OFFSETS) == PostingsEnum.FLAG_OFFSETS) {
+ // Caller wants offsets but we didn't index them;
+ // don't lie:
+ throw new IllegalArgumentException("did not index offsets");
+ }
+
+ if (reuse instanceof FreqProxPostingsEnum) {
+ posEnum = (FreqProxPostingsEnum) reuse;
+ if (posEnum.postingsArray != postingsArray) {
+ posEnum = new FreqProxPostingsEnum(terms, postingsArray);
+ }
+ } else {
+ posEnum = new FreqProxPostingsEnum(terms, postingsArray);
+ }
+ posEnum.reset(sortedTermIDs[ord]);
+ return posEnum;
+ }
+
FreqProxDocsEnum docsEnum;
- if (!terms.hasFreq && (flags & DocsEnum.FLAG_FREQS) != 0) {
+ if (!terms.hasFreq && (flags & PostingsEnum.FLAG_FREQS) != 0) {
// Caller wants freqs but we didn't index them;
// don't lie:
throw new IllegalArgumentException("did not index freq");
@@ -255,37 +282,6 @@ class FreqProxFields extends Fields {
return docsEnum;
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
- if (liveDocs != null) {
- throw new IllegalArgumentException("liveDocs must be null");
- }
- FreqProxDocsAndPositionsEnum posEnum;
-
- if (!terms.hasProx) {
- // Caller wants positions but we didn't index them;
- // don't lie:
- throw new IllegalArgumentException("did not index positions");
- }
-
- if (!terms.hasOffsets && (flags & DocsAndPositionsEnum.FLAG_OFFSETS) != 0) {
- // Caller wants offsets but we didn't index them;
- // don't lie:
- throw new IllegalArgumentException("did not index offsets");
- }
-
- if (reuse instanceof FreqProxDocsAndPositionsEnum) {
- posEnum = (FreqProxDocsAndPositionsEnum) reuse;
- if (posEnum.postingsArray != postingsArray) {
- posEnum = new FreqProxDocsAndPositionsEnum(terms, postingsArray);
- }
- } else {
- posEnum = new FreqProxDocsAndPositionsEnum(terms, postingsArray);
- }
- posEnum.reset(sortedTermIDs[ord]);
- return posEnum;
- }
-
/**
* Expert: Returns the TermsEnums internal state to position the TermsEnum
* without re-seeking the term dictionary.
@@ -348,6 +344,12 @@ class FreqProxFields extends Fields {
}
@Override
+ public int nextPosition() throws IOException {
+ assert false : "Shouldn't be calling nextPositions on DocsEnum";
+ return -1;
+ }
+
+ @Override
public int nextDoc() throws IOException {
if (reader.eof()) {
if (ended) {
@@ -389,7 +391,7 @@ class FreqProxFields extends Fields {
}
}
- private static class FreqProxDocsAndPositionsEnum extends DocsAndPositionsEnum {
+ private static class FreqProxPostingsEnum extends PostingsEnum {
final FreqProxTermsWriterPerField terms;
final FreqProxPostingsArray postingsArray;
@@ -407,7 +409,7 @@ class FreqProxFields extends Fields {
boolean hasPayload;
BytesRefBuilder payload = new BytesRefBuilder();
- public FreqProxDocsAndPositionsEnum(FreqProxTermsWriterPerField terms, FreqProxPostingsArray postingsArray) {
+ public FreqProxPostingsEnum(FreqProxTermsWriterPerField terms, FreqProxPostingsArray postingsArray) {
this.terms = terms;
this.postingsArray = postingsArray;
this.readOffsets = terms.hasOffsets;
@@ -501,6 +503,16 @@ class FreqProxFields extends Fields {
}
@Override
+ public int startPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
public int startOffset() {
if (!readOffsets) {
throw new IllegalStateException("offsets were not indexed");
diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
index 8e98fbd..44d20d0 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
@@ -43,7 +43,7 @@ final class FreqProxTermsWriter extends TermsHash {
Collections.sort(deleteTerms);
String lastField = null;
TermsEnum termsEnum = null;
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
for(Term deleteTerm : deleteTerms) {
if (deleteTerm.field().equals(lastField) == false) {
lastField = deleteTerm.field();
@@ -56,11 +56,11 @@ final class FreqProxTermsWriter extends TermsHash {
}
if (termsEnum != null && termsEnum.seekExact(deleteTerm.bytes())) {
- docsEnum = termsEnum.docs(null, docsEnum, 0);
+ postingsEnum = termsEnum.postings(null, postingsEnum, 0);
int delDocLimit = segDeletes.get(deleteTerm);
- assert delDocLimit < DocsEnum.NO_MORE_DOCS;
+ assert delDocLimit < PostingsEnum.NO_MORE_DOCS;
while (true) {
- int doc = docsEnum.nextDoc();
+ int doc = postingsEnum.nextDoc();
if (doc < delDocLimit) {
if (state.liveDocs == null) {
state.liveDocs = state.segmentInfo.getCodec().liveDocsFormat().newLiveDocs(state.segmentInfo.getDocCount());
diff --git a/lucene/core/src/java/org/apache/lucene/index/LeafReader.java b/lucene/core/src/java/org/apache/lucene/index/LeafReader.java
index 2f9c604..4b8902f 100644
--- a/lucene/core/src/java/org/apache/lucene/index/LeafReader.java
+++ b/lucene/core/src/java/org/apache/lucene/index/LeafReader.java
@@ -19,7 +19,6 @@ package org.apache.lucene.index;
import java.io.IOException;
-import org.apache.lucene.index.IndexReader.ReaderClosedListener;
import org.apache.lucene.util.Bits;
/** {@code LeafReader} is an abstract class, providing an interface for accessing an
@@ -205,38 +204,25 @@ public abstract class LeafReader extends IndexReader {
return fields().terms(field);
}
- /** Returns {@link DocsEnum} for the specified term.
+ /** Returns {@link PostingsEnum} for the specified term.
* This will return null if either the field or
* term does not exist.
- * @see TermsEnum#docs(Bits, DocsEnum) */
- public final DocsEnum termDocsEnum(Term term) throws IOException {
+ * @see TermsEnum#postings(Bits, PostingsEnum) */
+ public final PostingsEnum termDocsEnum(Term term, int flags) throws IOException {
assert term.field() != null;
assert term.bytes() != null;
final Terms terms = terms(term.field());
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(term.bytes())) {
- return termsEnum.docs(getLiveDocs(), null);
+ return termsEnum.postings(getLiveDocs(), null, flags);
}
}
return null;
}
- /** Returns {@link DocsAndPositionsEnum} for the specified
- * term. This will return null if the
- * field or term does not exist or positions weren't indexed.
- * @see TermsEnum#docsAndPositions(Bits, DocsAndPositionsEnum) */
- public final DocsAndPositionsEnum termPositionsEnum(Term term) throws IOException {
- assert term.field() != null;
- assert term.bytes() != null;
- final Terms terms = terms(term.field());
- if (terms != null) {
- final TermsEnum termsEnum = terms.iterator(null);
- if (termsEnum.seekExact(term.bytes())) {
- return termsEnum.docsAndPositions(getLiveDocs(), null);
- }
- }
- return null;
+ public final PostingsEnum termDocsEnum(Term term) throws IOException {
+ return termDocsEnum(term, PostingsEnum.FLAG_FREQS);
}
/** Returns {@link NumericDocValues} for this field, or
diff --git a/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java b/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java
index fad0eed..85fc614 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MappedMultiFields.java
@@ -103,10 +103,23 @@ public class MappedMultiFields extends FilterFields {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
if (liveDocs != null) {
throw new IllegalArgumentException("liveDocs must be null");
}
+
+ MappingMultiPostingsEnum mappingDocsAndPositionsEnum;
+ if (reuse instanceof MappingMultiPostingsEnum) {
+ mappingDocsAndPositionsEnum = (MappingMultiPostingsEnum) reuse;
+ } else {
+ mappingDocsAndPositionsEnum = new MappingMultiPostingsEnum(mergeState);
+ }
+
+ MultiPostingsEnum docsAndPositionsEnum = (MultiPostingsEnum) in.postings(liveDocs, mappingDocsAndPositionsEnum.multiDocsAndPositionsEnum, flags);
+ mappingDocsAndPositionsEnum.reset(docsAndPositionsEnum);
+ return mappingDocsAndPositionsEnum;
+
+/*
MappingMultiDocsEnum mappingDocsEnum;
if (reuse instanceof MappingMultiDocsEnum) {
mappingDocsEnum = (MappingMultiDocsEnum) reuse;
@@ -116,24 +129,7 @@ public class MappedMultiFields extends FilterFields {
MultiDocsEnum docsEnum = (MultiDocsEnum) in.docs(liveDocs, mappingDocsEnum.multiDocsEnum, flags);
mappingDocsEnum.reset(docsEnum);
- return mappingDocsEnum;
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (liveDocs != null) {
- throw new IllegalArgumentException("liveDocs must be null");
- }
- MappingMultiDocsAndPositionsEnum mappingDocsAndPositionsEnum;
- if (reuse instanceof MappingMultiDocsAndPositionsEnum) {
- mappingDocsAndPositionsEnum = (MappingMultiDocsAndPositionsEnum) reuse;
- } else {
- mappingDocsAndPositionsEnum = new MappingMultiDocsAndPositionsEnum(mergeState);
- }
-
- MultiDocsAndPositionsEnum docsAndPositionsEnum = (MultiDocsAndPositionsEnum) in.docsAndPositions(liveDocs, mappingDocsAndPositionsEnum.multiDocsAndPositionsEnum, flags);
- mappingDocsAndPositionsEnum.reset(docsAndPositionsEnum);
- return mappingDocsAndPositionsEnum;
+ return mappingDocsEnum;*/
}
}
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsAndPositionsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsAndPositionsEnum.java
deleted file mode 100644
index 8fd316a..0000000
--- a/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsAndPositionsEnum.java
+++ /dev/null
@@ -1,141 +0,0 @@
-package org.apache.lucene.index;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.MultiDocsAndPositionsEnum.EnumWithSlice;
-import org.apache.lucene.util.BytesRef;
-
-import java.io.IOException;
-
-/**
- * Exposes flex API, merged from flex API of sub-segments,
- * remapping docIDs (this is used for segment merging).
- *
- * @lucene.experimental
- */
-
-final class MappingMultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
- private MultiDocsAndPositionsEnum.EnumWithSlice[] subs;
- int numSubs;
- int upto;
- MergeState.DocMap currentMap;
- DocsAndPositionsEnum current;
- int currentBase;
- int doc = -1;
- private MergeState mergeState;
- MultiDocsAndPositionsEnum multiDocsAndPositionsEnum;
-
- /** Sole constructor. */
- public MappingMultiDocsAndPositionsEnum(MergeState mergeState) {
- this.mergeState = mergeState;
- }
-
- MappingMultiDocsAndPositionsEnum reset(MultiDocsAndPositionsEnum postingsEnum) {
- this.numSubs = postingsEnum.getNumSubs();
- this.subs = postingsEnum.getSubs();
- upto = -1;
- current = null;
- this.multiDocsAndPositionsEnum = postingsEnum;
- return this;
- }
-
- /** How many sub-readers we are merging.
- * @see #getSubs */
- public int getNumSubs() {
- return numSubs;
- }
-
- /** Returns sub-readers we are merging. */
- public EnumWithSlice[] getSubs() {
- return subs;
- }
-
- @Override
- public int freq() throws IOException {
- return current.freq();
- }
-
- @Override
- public int docID() {
- return doc;
- }
-
- @Override
- public int advance(int target) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public int nextDoc() throws IOException {
- while(true) {
- if (current == null) {
- if (upto == numSubs-1) {
- return this.doc = NO_MORE_DOCS;
- } else {
- upto++;
- final int reader = subs[upto].slice.readerIndex;
- current = subs[upto].docsAndPositionsEnum;
- currentBase = mergeState.docBase[reader];
- currentMap = mergeState.docMaps[reader];
- }
- }
-
- int doc = current.nextDoc();
- if (doc != NO_MORE_DOCS) {
- // compact deletions
- doc = currentMap.get(doc);
- if (doc == -1) {
- continue;
- }
- return this.doc = currentBase + doc;
- } else {
- current = null;
- }
- }
- }
-
- @Override
- public int nextPosition() throws IOException {
- return current.nextPosition();
- }
-
- @Override
- public int startOffset() throws IOException {
- return current.startOffset();
- }
-
- @Override
- public int endOffset() throws IOException {
- return current.endOffset();
- }
-
- @Override
- public BytesRef getPayload() throws IOException {
- return current.getPayload();
- }
-
- @Override
- public long cost() {
- long cost = 0;
- for (EnumWithSlice enumWithSlice : subs) {
- cost += enumWithSlice.docsAndPositionsEnum.cost();
- }
- return cost;
- }
-}
-
diff --git a/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsEnum.java
deleted file mode 100644
index 2aa9e5f..0000000
--- a/lucene/core/src/java/org/apache/lucene/index/MappingMultiDocsEnum.java
+++ /dev/null
@@ -1,121 +0,0 @@
-package org.apache.lucene.index;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.MultiDocsEnum.EnumWithSlice;
-
-import java.io.IOException;
-
-/**
- * Exposes flex API, merged from flex API of sub-segments,
- * remapping docIDs (this is used for segment merging).
- *
- * @lucene.experimental
- */
-
-final class MappingMultiDocsEnum extends DocsEnum {
- private MultiDocsEnum.EnumWithSlice[] subs;
- int numSubs;
- int upto;
- MergeState.DocMap currentMap;
- DocsEnum current;
- int currentBase;
- int doc = -1;
- private final MergeState mergeState;
- MultiDocsEnum multiDocsEnum;
-
- /** Sole constructor. */
- public MappingMultiDocsEnum(MergeState mergeState) {
- this.mergeState = mergeState;
- }
-
- MappingMultiDocsEnum reset(MultiDocsEnum docsEnum) {
- this.numSubs = docsEnum.getNumSubs();
- this.subs = docsEnum.getSubs();
- this.multiDocsEnum = docsEnum;
- upto = -1;
- current = null;
- return this;
- }
-
- /** How many sub-readers we are merging.
- * @see #getSubs */
- public int getNumSubs() {
- return numSubs;
- }
-
- /** Returns sub-readers we are merging. */
- public EnumWithSlice[] getSubs() {
- return subs;
- }
-
- @Override
- public int freq() throws IOException {
- return current.freq();
- }
-
- @Override
- public int docID() {
- return doc;
- }
-
- @Override
- public int advance(int target) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public int nextDoc() throws IOException {
- while(true) {
- if (current == null) {
- if (upto == numSubs-1) {
- return this.doc = NO_MORE_DOCS;
- } else {
- upto++;
- final int reader = subs[upto].slice.readerIndex;
- current = subs[upto].docsEnum;
- currentBase = mergeState.docBase[reader];
- currentMap = mergeState.docMaps[reader];
- assert currentMap.maxDoc() == subs[upto].slice.length: "readerIndex=" + reader + " subs.len=" + subs.length + " len1=" + currentMap.maxDoc() + " vs " + subs[upto].slice.length;
- }
- }
-
- int doc = current.nextDoc();
- if (doc != NO_MORE_DOCS) {
- // compact deletions
- doc = currentMap.get(doc);
- if (doc == -1) {
- continue;
- }
- return this.doc = currentBase + doc;
- } else {
- current = null;
- }
- }
- }
-
- @Override
- public long cost() {
- long cost = 0;
- for (EnumWithSlice enumWithSlice : subs) {
- cost += enumWithSlice.docsEnum.cost();
- }
- return cost;
- }
-}
-
diff --git a/lucene/core/src/java/org/apache/lucene/index/MappingMultiPostingsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MappingMultiPostingsEnum.java
new file mode 100644
index 0000000..7263136
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/index/MappingMultiPostingsEnum.java
@@ -0,0 +1,151 @@
+package org.apache.lucene.index;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.MultiPostingsEnum.EnumWithSlice;
+import org.apache.lucene.util.BytesRef;
+
+/**
+ * Exposes flex API, merged from flex API of sub-segments,
+ * remapping docIDs (this is used for segment merging).
+ *
+ * @lucene.experimental
+ */
+
+final class MappingMultiPostingsEnum extends PostingsEnum {
+ private MultiPostingsEnum.EnumWithSlice[] subs;
+ int numSubs;
+ int upto;
+ MergeState.DocMap currentMap;
+ PostingsEnum current;
+ int currentBase;
+ int doc = -1;
+ private MergeState mergeState;
+ MultiPostingsEnum multiDocsAndPositionsEnum;
+
+ /** Sole constructor. */
+ public MappingMultiPostingsEnum(MergeState mergeState) {
+ this.mergeState = mergeState;
+ }
+
+ MappingMultiPostingsEnum reset(MultiPostingsEnum postingsEnum) {
+ this.numSubs = postingsEnum.getNumSubs();
+ this.subs = postingsEnum.getSubs();
+ upto = -1;
+ current = null;
+ this.multiDocsAndPositionsEnum = postingsEnum;
+ return this;
+ }
+
+ /** How many sub-readers we are merging.
+ * @see #getSubs */
+ public int getNumSubs() {
+ return numSubs;
+ }
+
+ /** Returns sub-readers we are merging. */
+ public EnumWithSlice[] getSubs() {
+ return subs;
+ }
+
+ @Override
+ public int freq() throws IOException {
+ return current.freq();
+ }
+
+ @Override
+ public int docID() {
+ return doc;
+ }
+
+ @Override
+ public int advance(int target) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ while(true) {
+ if (current == null) {
+ if (upto == numSubs-1) {
+ return this.doc = NO_MORE_DOCS;
+ } else {
+ upto++;
+ final int reader = subs[upto].slice.readerIndex;
+ current = subs[upto].docsAndPositionsEnum;
+ currentBase = mergeState.docBase[reader];
+ currentMap = mergeState.docMaps[reader];
+ }
+ }
+
+ int doc = current.nextDoc();
+ if (doc != NO_MORE_DOCS) {
+ // compact deletions
+ doc = currentMap.get(doc);
+ if (doc == -1) {
+ continue;
+ }
+ return this.doc = currentBase + doc;
+ } else {
+ current = null;
+ }
+ }
+ }
+
+ @Override
+ public int nextPosition() throws IOException {
+ return current.nextPosition();
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return current.startPosition();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return current.endPosition();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return current.startOffset();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return current.endOffset();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return current.getPayload();
+ }
+
+ @Override
+ public long cost() {
+ long cost = 0;
+ for (EnumWithSlice enumWithSlice : subs) {
+ cost += enumWithSlice.docsAndPositionsEnum.cost();
+ }
+ return cost;
+ }
+}
+
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java
deleted file mode 100644
index 33e2127..0000000
--- a/lucene/core/src/java/org/apache/lucene/index/MultiDocsAndPositionsEnum.java
+++ /dev/null
@@ -1,195 +0,0 @@
-package org.apache.lucene.index;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.util.BytesRef;
-
-import java.io.IOException;
-import java.util.Arrays;
-
-/**
- * Exposes flex API, merged from flex API of sub-segments.
- *
- * @lucene.experimental
- */
-
-public final class MultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
- private final MultiTermsEnum parent;
- final DocsAndPositionsEnum[] subDocsAndPositionsEnum;
- private final EnumWithSlice[] subs;
- int numSubs;
- int upto;
- DocsAndPositionsEnum current;
- int currentBase;
- int doc = -1;
-
- /** Sole constructor. */
- public MultiDocsAndPositionsEnum(MultiTermsEnum parent, int subReaderCount) {
- this.parent = parent;
- subDocsAndPositionsEnum = new DocsAndPositionsEnum[subReaderCount];
- this.subs = new EnumWithSlice[subReaderCount];
- for (int i = 0; i < subs.length; i++) {
- subs[i] = new EnumWithSlice();
- }
- }
-
- /** Returns {@code true} if this instance can be reused by
- * the provided {@link MultiTermsEnum}. */
- public boolean canReuse(MultiTermsEnum parent) {
- return this.parent == parent;
- }
-
- /** Rre-use and reset this instance on the provided slices. */
- public MultiDocsAndPositionsEnum reset(final EnumWithSlice[] subs, final int numSubs) {
- this.numSubs = numSubs;
- for(int i=0;i doc;
- while(true) {
- if (current != null) {
- final int doc;
- if (target < currentBase) {
- // target was in the previous slice but there was no matching doc after it
- doc = current.nextDoc();
- } else {
- doc = current.advance(target-currentBase);
- }
- if (doc == NO_MORE_DOCS) {
- current = null;
- } else {
- return this.doc = doc + currentBase;
- }
- } else if (upto == numSubs-1) {
- return this.doc = NO_MORE_DOCS;
- } else {
- upto++;
- current = subs[upto].docsAndPositionsEnum;
- currentBase = subs[upto].slice.start;
- }
- }
- }
-
- @Override
- public int nextDoc() throws IOException {
- while(true) {
- if (current == null) {
- if (upto == numSubs-1) {
- return this.doc = NO_MORE_DOCS;
- } else {
- upto++;
- current = subs[upto].docsAndPositionsEnum;
- currentBase = subs[upto].slice.start;
- }
- }
-
- final int doc = current.nextDoc();
- if (doc != NO_MORE_DOCS) {
- return this.doc = currentBase + doc;
- } else {
- current = null;
- }
- }
- }
-
- @Override
- public int nextPosition() throws IOException {
- return current.nextPosition();
- }
-
- @Override
- public int startOffset() throws IOException {
- return current.startOffset();
- }
-
- @Override
- public int endOffset() throws IOException {
- return current.endOffset();
- }
-
- @Override
- public BytesRef getPayload() throws IOException {
- return current.getPayload();
- }
-
- // TODO: implement bulk read more efficiently than super
- /** Holds a {@link DocsAndPositionsEnum} along with the
- * corresponding {@link ReaderSlice}. */
- public final static class EnumWithSlice {
- EnumWithSlice() {
- }
-
- /** {@link DocsAndPositionsEnum} for this sub-reader. */
- public DocsAndPositionsEnum docsAndPositionsEnum;
-
- /** {@link ReaderSlice} describing how this sub-reader
- * fits into the composite reader. */
- public ReaderSlice slice;
-
- @Override
- public String toString() {
- return slice.toString()+":"+docsAndPositionsEnum;
- }
- }
-
- @Override
- public long cost() {
- long cost = 0;
- for (int i = 0; i < numSubs; i++) {
- cost += subs[i].docsAndPositionsEnum.cost();
- }
- return cost;
- }
-
- @Override
- public String toString() {
- return "MultiDocsAndPositionsEnum(" + Arrays.toString(getSubs()) + ")";
- }
-}
-
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java
index 082d266..3b461a5 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiDocsEnum.java
@@ -21,20 +21,22 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.util.Arrays;
+import org.apache.lucene.util.BytesRef;
+
/**
- * Exposes {@link DocsEnum}, merged from {@link DocsEnum}
+ * Exposes {@link PostingsEnum}, merged from {@link PostingsEnum}
* API of sub-segments.
*
* @lucene.experimental
*/
-public final class MultiDocsEnum extends DocsEnum {
+public final class MultiDocsEnum extends PostingsEnum {
private final MultiTermsEnum parent;
- final DocsEnum[] subDocsEnum;
+ final PostingsEnum[] subPostingsEnum;
private final EnumWithSlice[] subs;
int numSubs;
int upto;
- DocsEnum current;
+ PostingsEnum current;
int currentBase;
int doc = -1;
@@ -43,7 +45,7 @@ public final class MultiDocsEnum extends DocsEnum {
* @param subReaderCount How many sub-readers are being merged. */
public MultiDocsEnum(MultiTermsEnum parent, int subReaderCount) {
this.parent = parent;
- subDocsEnum = new DocsEnum[subReaderCount];
+ subPostingsEnum = new PostingsEnum[subReaderCount];
this.subs = new EnumWithSlice[subReaderCount];
for (int i = 0; i < subs.length; i++) {
subs[i] = new EnumWithSlice();
@@ -54,7 +56,7 @@ public final class MultiDocsEnum extends DocsEnum {
this.numSubs = numSubs;
for(int i=0;i doc;
+ while(true) {
+ if (current != null) {
+ final int doc;
+ if (target < currentBase) {
+ // target was in the previous slice but there was no matching doc after it
+ doc = current.nextDoc();
+ } else {
+ doc = current.advance(target-currentBase);
+ }
+ if (doc == NO_MORE_DOCS) {
+ current = null;
+ } else {
+ return this.doc = doc + currentBase;
+ }
+ } else if (upto == numSubs-1) {
+ return this.doc = NO_MORE_DOCS;
+ } else {
+ upto++;
+ current = subs[upto].docsAndPositionsEnum;
+ currentBase = subs[upto].slice.start;
+ }
+ }
+ }
+
+ @Override
+ public int nextDoc() throws IOException {
+ while(true) {
+ if (current == null) {
+ if (upto == numSubs-1) {
+ return this.doc = NO_MORE_DOCS;
+ } else {
+ upto++;
+ current = subs[upto].docsAndPositionsEnum;
+ currentBase = subs[upto].slice.start;
+ }
+ }
+
+ final int doc = current.nextDoc();
+ if (doc != NO_MORE_DOCS) {
+ return this.doc = currentBase + doc;
+ } else {
+ current = null;
+ }
+ }
+ }
+
+ @Override
+ public int nextPosition() throws IOException {
+ return current.nextPosition();
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return current.startPosition();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return current.endPosition();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return current.startOffset();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return current.endOffset();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return current.getPayload();
+ }
+
+ // TODO: implement bulk read more efficiently than super
+ /** Holds a {@link PostingsEnum} along with the
+ * corresponding {@link ReaderSlice}. */
+ public final static class EnumWithSlice {
+ EnumWithSlice() {
+ }
+
+ /** {@link PostingsEnum} for this sub-reader. */
+ public PostingsEnum docsAndPositionsEnum;
+
+ /** {@link ReaderSlice} describing how this sub-reader
+ * fits into the composite reader. */
+ public ReaderSlice slice;
+
+ @Override
+ public String toString() {
+ return slice.toString()+":"+docsAndPositionsEnum;
+ }
+ }
+
+ @Override
+ public long cost() {
+ long cost = 0;
+ for (int i = 0; i < numSubs; i++) {
+ cost += subs[i].docsAndPositionsEnum.cost();
+ }
+ return cost;
+ }
+
+ @Override
+ public String toString() {
+ return "MultiDocsAndPositionsEnum(" + Arrays.toString(getSubs()) + ")";
+ }
+}
+
diff --git a/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java
index 6ae2c7c..913ca3a 100644
--- a/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/MultiTermsEnum.java
@@ -17,14 +17,14 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import org.apache.lucene.util.BytesRefBuilder;
-import org.apache.lucene.util.PriorityQueue;
-import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.Bits;
-
import java.io.IOException;
import java.util.Arrays;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.PriorityQueue;
+
/**
* Exposes {@link TermsEnum} API, merged from {@link TermsEnum} API of sub-segments.
* This does a merge sort, by term text, of the sub-readers.
@@ -37,8 +37,7 @@ public final class MultiTermsEnum extends TermsEnum {
private final TermsEnumWithSlice[] subs; // all of our subs (one per sub-reader)
private final TermsEnumWithSlice[] currentSubs; // current subs that have at least one term for this field
private final TermsEnumWithSlice[] top;
- private final MultiDocsEnum.EnumWithSlice[] subDocs;
- private final MultiDocsAndPositionsEnum.EnumWithSlice[] subDocsAndPositions;
+ private final MultiPostingsEnum.EnumWithSlice[] subDocs;
private BytesRef lastSeek;
private boolean lastSeekExact;
@@ -77,14 +76,11 @@ public final class MultiTermsEnum extends TermsEnum {
queue = new TermMergeQueue(slices.length);
top = new TermsEnumWithSlice[slices.length];
subs = new TermsEnumWithSlice[slices.length];
- subDocs = new MultiDocsEnum.EnumWithSlice[slices.length];
- subDocsAndPositions = new MultiDocsAndPositionsEnum.EnumWithSlice[slices.length];
+ subDocs = new MultiPostingsEnum.EnumWithSlice[slices.length];
for(int i=0;i= FLAG_POSITIONS);
+ }
+
+ private AttributeSource atts = null;
+
+ /** Sole constructor. (For invocation by subclass
+ * constructors, typically implicit.) */
+ protected PostingsEnum() {
+ }
+
+ /**
+ * Returns term frequency in the current document, or 1 if the field was
+ * indexed with {@link IndexOptions#DOCS}. Do not call this before
+ * {@link #nextDoc} is first called, nor after {@link #nextDoc} returns
+ * {@link DocIdSetIterator#NO_MORE_DOCS}.
+ *
+ *
+ * NOTE: if the {@link PostingsEnum} was obtain with {@link #FLAG_NONE},
+ * the result of this method is undefined.
+ */
+ public abstract int freq() throws IOException;
+
+ /** Returns the related attributes. */
+ public AttributeSource attributes() {
+ if (atts == null) atts = new AttributeSource();
+ return atts;
+ }
+
+ /**
+ * Returns the next position. If there are no more
+ * positions, or the iterator does not support positions,
+ * this will return DocsEnum.NO_MORE_POSITIONS */
+ public abstract int nextPosition() throws IOException;
+
+ /** Returns current starting position, or NO_MORE_POSITIONS if not supported */
+ public abstract int startPosition() throws IOException;
+
+ /** Returns current ending position, or NO_MORE_POSITIONS if not supported */
+ public abstract int endPosition() throws IOException;
+
+ /** Returns start offset for the current position, or -1
+ * if offsets were not indexed. */
+ public abstract int startOffset() throws IOException;
+
+ /** Returns end offset for the current position, or -1 if
+ * offsets were not indexed. */
+ public abstract int endOffset() throws IOException;
+
+ /** Returns the payload at this position, or null if no
+ * payload was indexed. You should not modify anything
+ * (neither members of the returned BytesRef nor bytes
+ * in the byte[]). */
+ public abstract BytesRef getPayload() throws IOException;
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortedDocValuesTermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/SortedDocValuesTermsEnum.java
index 16427cc..224104c 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortedDocValuesTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortedDocValuesTermsEnum.java
@@ -109,12 +109,7 @@ class SortedDocValuesTermsEnum extends TermsEnum {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
throw new UnsupportedOperationException();
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesTermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesTermsEnum.java
index 64dba95..6298774 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortedSetDocValuesTermsEnum.java
@@ -17,12 +17,12 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import java.io.IOException;
-
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
+import java.io.IOException;
+
/** Implements a {@link TermsEnum} wrapping a provided
* {@link SortedSetDocValues}. */
@@ -109,12 +109,7 @@ class SortedSetDocValuesTermsEnum extends TermsEnum {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
throw new UnsupportedOperationException();
}
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermContext.java b/lucene/core/src/java/org/apache/lucene/index/TermContext.java
index ada4fc1..e44f0e6 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermContext.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermContext.java
@@ -17,11 +17,11 @@ package org.apache.lucene.index;
* limitations under the License.
*/
+import org.apache.lucene.util.BytesRef;
+
import java.io.IOException;
import java.util.Arrays;
-import org.apache.lucene.util.BytesRef;
-
/**
* Maintains a {@link IndexReader} {@link TermState} view over
* {@link IndexReader} instances containing a single term. The
diff --git a/lucene/core/src/java/org/apache/lucene/index/Terms.java b/lucene/core/src/java/org/apache/lucene/index/Terms.java
index a3109af..955197c 100644
--- a/lucene/core/src/java/org/apache/lucene/index/Terms.java
+++ b/lucene/core/src/java/org/apache/lucene/index/Terms.java
@@ -111,7 +111,7 @@ public abstract class Terms {
public abstract int getDocCount() throws IOException;
/** Returns true if documents in this field store
- * per-document term frequency ({@link DocsEnum#freq}). */
+ * per-document term frequency ({@link PostingsEnum#freq}). */
public abstract boolean hasFreqs();
/** Returns true if documents in this field store offsets. */
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/TermsEnum.java
index 895018b..8dfc1e2 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermsEnum.java
@@ -27,9 +27,9 @@ import org.apache.lucene.util.BytesRefIterator;
/** Iterator to seek ({@link #seekCeil(BytesRef)}, {@link
* #seekExact(BytesRef)}) or step through ({@link
* #next} terms to obtain frequency information ({@link
- * #docFreq}), {@link DocsEnum} or {@link
- * DocsAndPositionsEnum} for the current term ({@link
- * #docs}.
+ * #docFreq}), {@link PostingsEnum} or {@link
+ * PostingsEnum} for the current term ({@link
+ * #postings}.
*
*
Term enumerations are always ordered by
* BytesRef.compareTo, which is Unicode sort
@@ -138,57 +138,30 @@ public abstract class TermsEnum implements BytesRefIterator {
* deleted documents into account. */
public abstract long totalTermFreq() throws IOException;
- /** Get {@link DocsEnum} for the current term. Do not
+ /** Get {@link PostingsEnum} for the current term. Do not
* call this when the enum is unpositioned. This method
* will not return null.
*
* @param liveDocs unset bits are documents that should not
* be returned
- * @param reuse pass a prior DocsEnum for possible reuse */
- public final DocsEnum docs(Bits liveDocs, DocsEnum reuse) throws IOException {
- return docs(liveDocs, reuse, DocsEnum.FLAG_FREQS);
+ * @param reuse pass a prior PostingsEnum for possible reuse */
+ public final PostingsEnum postings(Bits liveDocs, PostingsEnum reuse) throws IOException {
+ return postings(liveDocs, reuse, PostingsEnum.FLAG_FREQS);
}
- /** Get {@link DocsEnum} for the current term, with
- * control over whether freqs are required. Do not
- * call this when the enum is unpositioned. This method
- * will not return null.
+ /** Get {@link PostingsEnum} for the current term, with
+ * control over whether freqs, positions, offsets or payloads
+ * are required. Do not call this when the enum is
+ * unpositioned. This method may return null if the postings
+ * information required is not available from the index
*
* @param liveDocs unset bits are documents that should not
* be returned
- * @param reuse pass a prior DocsEnum for possible reuse
+ * @param reuse pass a prior PostingsEnum for possible reuse
* @param flags specifies which optional per-document values
- * you require; see {@link DocsEnum#FLAG_FREQS}
- * @see #docs(Bits, DocsEnum, int) */
- public abstract DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException;
-
- /** Get {@link DocsAndPositionsEnum} for the current term.
- * Do not call this when the enum is unpositioned. This
- * method will return null if positions were not
- * indexed.
- *
- * @param liveDocs unset bits are documents that should not
- * be returned
- * @param reuse pass a prior DocsAndPositionsEnum for possible reuse
- * @see #docsAndPositions(Bits, DocsAndPositionsEnum, int) */
- public final DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse) throws IOException {
- return docsAndPositions(liveDocs, reuse, DocsAndPositionsEnum.FLAG_OFFSETS | DocsAndPositionsEnum.FLAG_PAYLOADS);
- }
-
- /** Get {@link DocsAndPositionsEnum} for the current term,
- * with control over whether offsets and payloads are
- * required. Some codecs may be able to optimize their
- * implementation when offsets and/or payloads are not required.
- * Do not call this when the enum is unpositioned. This
- * will return null if positions were not indexed.
-
- * @param liveDocs unset bits are documents that should not
- * be returned
- * @param reuse pass a prior DocsAndPositionsEnum for possible reuse
- * @param flags specifies which optional per-position values you
- * require; see {@link DocsAndPositionsEnum#FLAG_OFFSETS} and
- * {@link DocsAndPositionsEnum#FLAG_PAYLOADS}. */
- public abstract DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException;
+ * you require; see {@link PostingsEnum#FLAG_FREQS}
+ * @see #postings(Bits, PostingsEnum, int) */
+ public abstract PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException;
/**
* Expert: Returns the TermsEnums internal state to position the TermsEnum
@@ -245,12 +218,7 @@ public abstract class TermsEnum implements BytesRefIterator {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) {
- throw new IllegalStateException("this method should never be called");
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) {
throw new IllegalStateException("this method should never be called");
}
@@ -273,5 +241,6 @@ public abstract class TermsEnum implements BytesRefIterator {
public void seekExact(BytesRef term, TermState state) {
throw new IllegalStateException("this method should never be called");
}
+
};
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
index bef19ca..06b3832 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
@@ -17,6 +17,7 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import org.apache.lucene.index.PostingsEnum;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
@@ -242,7 +243,7 @@ public class BooleanQuery extends Query implements Iterable {
for (Iterator wIter = weights.iterator(); wIter.hasNext();) {
Weight w = wIter.next();
BooleanClause c = cIter.next();
- if (w.scorer(context, context.reader().getLiveDocs()) == null) {
+ if (w.scorer(context, PostingsEnum.FLAG_FREQS, context.reader().getLiveDocs()) == null) {
if (c.isRequired()) {
fail = true;
Explanation r = new Explanation(0.0f, "no match on required clause (" + c.getQuery().toString() + ")");
@@ -305,19 +306,19 @@ public class BooleanQuery extends Query implements Iterable {
}
@Override
- public BulkScorer bulkScorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public BulkScorer bulkScorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
if (minNrShouldMatch > 1) {
// TODO: (LUCENE-4872) in some cases BooleanScorer may be faster for minNrShouldMatch
// but the same is even true of pure conjunctions...
- return super.bulkScorer(context, acceptDocs);
+ return super.bulkScorer(context, flags, acceptDocs);
}
List optional = new ArrayList();
Iterator cIter = clauses.iterator();
for (Weight w : weights) {
BooleanClause c = cIter.next();
- BulkScorer subScorer = w.bulkScorer(context, acceptDocs);
+ BulkScorer subScorer = w.bulkScorer(context, flags, acceptDocs);
if (subScorer == null) {
if (c.isRequired()) {
return null;
@@ -326,10 +327,10 @@ public class BooleanQuery extends Query implements Iterable {
// TODO: there are some cases where BooleanScorer
// would handle conjunctions faster than
// BooleanScorer2...
- return super.bulkScorer(context, acceptDocs);
+ return super.bulkScorer(context, flags, acceptDocs);
} else if (c.isProhibited()) {
// TODO: there are some cases where BooleanScorer could do this faster
- return super.bulkScorer(context, acceptDocs);
+ return super.bulkScorer(context, flags, acceptDocs);
} else {
optional.add(subScorer);
}
@@ -343,7 +344,7 @@ public class BooleanQuery extends Query implements Iterable {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs)
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs)
throws IOException {
// initially the user provided value,
// but if minNrShouldMatch == optional.size(),
@@ -356,7 +357,7 @@ public class BooleanQuery extends Query implements Iterable {
Iterator cIter = clauses.iterator();
for (Weight w : weights) {
BooleanClause c = cIter.next();
- Scorer subScorer = w.scorer(context, acceptDocs);
+ Scorer subScorer = w.scorer(context, flags, acceptDocs);
if (subScorer == null) {
if (c.isRequired()) {
return null;
@@ -433,6 +434,15 @@ public class BooleanQuery extends Query implements Iterable {
}
}
}
+
+ @Override
+ public String toString() {
+ StringBuffer sb = new StringBuffer("BooleanWeight[");
+ for (Weight weight : weights) {
+ sb.append(weight.toString()).append(",");
+ }
+ return sb.append("]").toString();
+ }
private Scorer req(List required, boolean disableCoord) {
if (required.size() == 1) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanTopLevelScorers.java b/lucene/core/src/java/org/apache/lucene/search/BooleanTopLevelScorers.java
index 2c49ec7..721ade2 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanTopLevelScorers.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanTopLevelScorers.java
@@ -21,8 +21,6 @@ import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
-import org.apache.lucene.search.Scorer.ChildScorer;
-
/** Internal document-at-a-time scorers used to deal with stupid coord() computation */
class BooleanTopLevelScorers {
@@ -61,7 +59,7 @@ class BooleanTopLevelScorers {
private final Scorer req;
private final Scorer opt;
- CoordinatingConjunctionScorer(Weight weight, float coords[], Scorer req, int reqCount, Scorer opt) {
+ CoordinatingConjunctionScorer(Weight weight, float coords[], Scorer req, int reqCount, Scorer opt) throws IOException {
super(weight, new Scorer[] { req, opt });
this.coords = coords;
this.req = req;
diff --git a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
index d50bec3..b912b8e 100644
--- a/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/CachingCollector.java
@@ -17,15 +17,16 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.RamUsageEstimator;
-
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.RamUsageEstimator;
+
/**
* Caches all docs, and optionally also scores, coming from
* a search, and is then able to replay them to another
@@ -74,10 +75,41 @@ public abstract class CachingCollector extends FilterCollector {
public final int freq() { throw new UnsupportedOperationException(); }
@Override
+ public int nextPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
public final int nextDoc() { throw new UnsupportedOperationException(); }
@Override
public long cost() { return 1; }
+
}
private static class NoScoreCachingCollector extends CachingCollector {
diff --git a/lucene/core/src/java/org/apache/lucene/search/Collector.java b/lucene/core/src/java/org/apache/lucene/search/Collector.java
index 0ac853f..448c08c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Collector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Collector.java
@@ -72,5 +72,5 @@ public interface Collector {
* next atomic reader context
*/
LeafCollector getLeafCollector(LeafReaderContext context) throws IOException;
-
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
index 3e81187..4b6dabc 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
@@ -23,18 +23,21 @@ import java.util.Collection;
import java.util.Comparator;
import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
/** Scorer for conjunctions, sets of queries, all of which are required. */
class ConjunctionScorer extends Scorer {
+
protected int lastDoc = -1;
protected final DocsAndFreqs[] docsAndFreqs;
private final DocsAndFreqs lead;
private final float coord;
+ private final Scorer[] scorers; // to preserve order for positional queries
ConjunctionScorer(Weight weight, Scorer[] scorers) {
this(weight, scorers, 1f);
}
-
+
ConjunctionScorer(Weight weight, Scorer[] scorers, float coord) {
super(weight);
this.coord = coord;
@@ -52,6 +55,7 @@ class ConjunctionScorer extends Scorer {
});
lead = docsAndFreqs[0]; // least frequent DocsEnum leads the intersection
+ this.scorers = scorers;
}
private int doNext(int doc) throws IOException {
@@ -109,22 +113,52 @@ class ConjunctionScorer extends Scorer {
}
return sum * coord;
}
-
+
@Override
public int freq() {
return docsAndFreqs.length;
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public long cost() {
return lead.scorer.cost();
}
@Override
public Collection getChildren() {
- ArrayList children = new ArrayList<>(docsAndFreqs.length);
- for (DocsAndFreqs docs : docsAndFreqs) {
- children.add(new ChildScorer(docs.scorer, "MUST"));
+ ArrayList children = new ArrayList<>(scorers.length);
+ for (Scorer scorer : scorers) {
+ children.add(new ChildScorer(scorer, "MUST"));
}
return children;
}
@@ -133,7 +167,7 @@ class ConjunctionScorer extends Scorer {
final long cost;
final Scorer scorer;
int doc = -1;
-
+
DocsAndFreqs(Scorer scorer) {
this.scorer = scorer;
this.cost = scorer.cost();
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
index e6c7a03..52a1052 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
@@ -17,17 +17,19 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.ToStringUtils;
-
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Set;
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.ToStringUtils;
+
/**
* A query that wraps another query or a filter and simply returns a constant score equal to the
* query boost for every document that matches the filter or query.
@@ -134,14 +136,13 @@ public class ConstantScoreQuery extends Query {
}
@Override
- public BulkScorer bulkScorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
- final DocIdSetIterator disi;
+ public BulkScorer bulkScorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
if (filter != null) {
assert query == null;
- return super.bulkScorer(context, acceptDocs);
+ return super.bulkScorer(context, flags, acceptDocs);
} else {
assert query != null && innerWeight != null;
- BulkScorer bulkScorer = innerWeight.bulkScorer(context, acceptDocs);
+ BulkScorer bulkScorer = innerWeight.bulkScorer(context, flags, acceptDocs);
if (bulkScorer == null) {
return null;
}
@@ -150,29 +151,32 @@ public class ConstantScoreQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
- final DocIdSetIterator disi;
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
if (filter != null) {
assert query == null;
final DocIdSet dis = filter.getDocIdSet(context, acceptDocs);
if (dis == null) {
return null;
}
- disi = dis.iterator();
+ final DocIdSetIterator disi = dis.iterator();
+ if (disi == null)
+ return null;
+ return new ConstantDocIdSetIteratorScorer(disi, this, queryWeight);
} else {
assert query != null && innerWeight != null;
- disi = innerWeight.scorer(context, acceptDocs);
+ Scorer scorer = innerWeight.scorer(context, flags, acceptDocs);
+ if (scorer == null) {
+ return null;
+ }
+ return new ConstantScoreScorer(scorer, queryWeight);
}
- if (disi == null) {
- return null;
- }
- return new ConstantScorer(disi, this, queryWeight);
+
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- final Scorer cs = scorer(context, context.reader().getLiveDocs());
+ final Scorer cs = scorer(context, PostingsEnum.FLAG_FREQS, context.reader().getLiveDocs());
final boolean exists = (cs != null && cs.advance(doc) == doc);
final ComplexExplanation result = new ComplexExplanation();
@@ -216,17 +220,46 @@ public class ConstantScoreQuery extends Query {
@Override
public void setScorer(Scorer scorer) throws IOException {
// we must wrap again here, but using the scorer passed in as parameter:
- in.setScorer(new ConstantScorer(scorer, weight, theScore));
+ in.setScorer(new ConstantScoreScorer(scorer, theScore));
}
};
}
}
- protected class ConstantScorer extends Scorer {
+ protected class ConstantScoreScorer extends FilterScorer {
+
+ private final float score;
+
+ public ConstantScoreScorer(Scorer wrapped, float score) {
+ super(wrapped);
+ this.score = score;
+ }
+
+ @Override
+ public int freq() throws IOException {
+ return 1;
+ }
+
+ @Override
+ public float score() throws IOException {
+ return score;
+ }
+
+ @Override
+ public Collection getChildren() {
+ if (query != null) {
+ return Collections.singletonList(new ChildScorer(in, "constant"));
+ } else {
+ return Collections.emptyList();
+ }
+ }
+ }
+
+ protected class ConstantDocIdSetIteratorScorer extends Scorer {
final DocIdSetIterator docIdSetIterator;
final float theScore;
- public ConstantScorer(DocIdSetIterator docIdSetIterator, Weight w, float theScore) {
+ public ConstantDocIdSetIteratorScorer(DocIdSetIterator docIdSetIterator, Weight w, float theScore) {
super(w);
this.theScore = theScore;
this.docIdSetIterator = docIdSetIterator;
@@ -254,10 +287,40 @@ public class ConstantScoreQuery extends Query {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int advance(int target) throws IOException {
return docIdSetIterator.advance(target);
}
-
+
@Override
public long cost() {
return docIdSetIterator.cost();
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
index e27063a..5ab87a3 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
@@ -16,6 +16,11 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.util.Bits;
+
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
@@ -23,11 +28,6 @@ import java.util.Iterator;
import java.util.List;
import java.util.Set;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.util.Bits;
-
/**
* A query that generates the union of documents produced by its subqueries, and that scores each document with the maximum
* score for that document as produced by any subquery, plus a tie breaking increment for any additional matching subqueries.
@@ -153,11 +153,11 @@ public class DisjunctionMaxQuery extends Query implements Iterable {
/** Create the scorer used to score our associated DisjunctionMaxQuery */
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
List scorers = new ArrayList<>();
for (Weight w : weights) {
// we will advance() subscorers
- Scorer subScorer = w.scorer(context, acceptDocs);
+ Scorer subScorer = w.scorer(context, flags, acceptDocs);
if (subScorer != null) {
scorers.add(subScorer);
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
index b5d0a0d..e80242e 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
@@ -46,6 +46,7 @@ final class DisjunctionMaxScorer extends DisjunctionScorer {
DisjunctionMaxScorer(Weight weight, float tieBreakerMultiplier, Scorer[] subScorers) {
super(weight, subScorers);
this.tieBreakerMultiplier = tieBreakerMultiplier;
+
}
@Override
@@ -66,4 +67,5 @@ final class DisjunctionMaxScorer extends DisjunctionScorer {
protected float getFinal() {
return scoreMax + (scoreSum - scoreMax) * tieBreakerMultiplier;
}
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java
index 5b7e2ff..2840c2b 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java
@@ -21,18 +21,20 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
+import org.apache.lucene.util.BytesRef;
+
/**
* Base class for Scorers that score disjunctions.
*/
abstract class DisjunctionScorer extends Scorer {
- private final Scorer subScorers[];
- private int numScorers;
+ protected final Scorer subScorers[];
/** The document number of the current match. */
protected int doc = -1;
+ protected int numScorers;
/** Number of matching scorers for the current match. */
protected int freq = -1;
-
+
protected DisjunctionScorer(Weight weight, Scorer subScorers[]) {
super(weight);
this.subScorers = subScorers;
@@ -115,6 +117,36 @@ abstract class DisjunctionScorer extends Scorer {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public final long cost() {
long sum = 0;
for (int i = 0; i < numScorers; i++) {
@@ -167,7 +199,7 @@ abstract class DisjunctionScorer extends Scorer {
}
}
}
-
+
// if we haven't already computed freq + score, do so
private void visitScorers() throws IOException {
reset();
@@ -209,4 +241,5 @@ abstract class DisjunctionScorer extends Scorer {
/** Return final score */
protected abstract float getFinal();
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
index f291695..f775ad6 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
@@ -19,13 +19,14 @@ package org.apache.lucene.search;
import java.io.IOException;
+
/** A Scorer for OR like queries, counterpart of ConjunctionScorer.
* This Scorer implements {@link Scorer#advance(int)} and uses advance() on the given Scorers.
*/
final class DisjunctionSumScorer extends DisjunctionScorer {
private double score;
private final float[] coord;
-
+
/** Construct a DisjunctionScorer.
* @param weight The weight to be used.
* @param subScorers Array of at least two subscorers.
@@ -50,4 +51,5 @@ final class DisjunctionSumScorer extends DisjunctionScorer {
protected float getFinal() {
return (float)score * coord[freq];
}
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/DocValuesTermsFilter.java b/lucene/core/src/java/org/apache/lucene/search/DocValuesTermsFilter.java
index 464e5b2..f4bc6e1 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DocValuesTermsFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DocValuesTermsFilter.java
@@ -21,7 +21,6 @@ import java.io.IOException;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.DocValues;
-import org.apache.lucene.index.DocsEnum; // javadoc @link
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.util.Bits;
@@ -63,7 +62,7 @@ import org.apache.lucene.util.FixedBitSet;
*
* In contrast, TermsFilter builds up an {@link FixedBitSet},
* keyed by docID, every time it's created, by enumerating
- * through all matching docs using {@link DocsEnum} to seek
+ * through all matching docs using {@link org.apache.lucene.index.PostingsEnum} to seek
* and scan through each term's docID list. While there is
* no linear scan of all docIDs, besides the allocation of
* the underlying array in the {@link FixedBitSet}, this
diff --git a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java
index e73b241..232b5cd 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ExactPhraseScorer.java
@@ -22,6 +22,7 @@ import java.util.Arrays;
import org.apache.lucene.index.*;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.BytesRef;
final class ExactPhraseScorer extends Scorer {
private final int endMinus1;
@@ -35,21 +36,21 @@ final class ExactPhraseScorer extends Scorer {
private final long cost;
private final static class ChunkState {
- final DocsAndPositionsEnum posEnum;
+ final PostingsEnum posEnum;
final int offset;
int posUpto;
int posLimit;
int pos;
int lastPos;
- public ChunkState(DocsAndPositionsEnum posEnum, int offset) {
+ public ChunkState(PostingsEnum posEnum, int offset) {
this.posEnum = posEnum;
this.offset = offset;
}
}
private final ChunkState[] chunkStates;
- private final DocsAndPositionsEnum lead;
+ private final PostingsEnum lead;
private int docID = -1;
private int freq;
@@ -79,7 +80,7 @@ final class ExactPhraseScorer extends Scorer {
// TODO: don't dup this logic from conjunctionscorer :)
advanceHead: for(;;) {
for (int i = 1; i < chunkStates.length; i++) {
- final DocsAndPositionsEnum de = chunkStates[i].posEnum;
+ final PostingsEnum de = chunkStates[i].posEnum;
if (de.docID() < doc) {
int d = de.advance(doc);
@@ -125,6 +126,36 @@ final class ExactPhraseScorer extends Scorer {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int docID() {
return docID;
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java b/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java
index e2a50c8..4b0fbef 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FakeScorer.java
@@ -17,14 +17,17 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import java.io.IOException;
import java.util.Collection;
+import org.apache.lucene.util.BytesRef;
+
/** Used by {@link BulkScorer}s that need to pass a {@link
* Scorer} to {@link LeafCollector#setScorer}. */
-final class FakeScorer extends Scorer {
- float score;
- int doc = -1;
- int freq = 1;
+public final class FakeScorer extends Scorer {
+ public float score;
+ public int doc = -1;
+ public int freq = 1;
public FakeScorer() {
super(null);
@@ -46,6 +49,36 @@ final class FakeScorer extends Scorer {
}
@Override
+ public int nextPosition() throws IOException {
+ throw new UnsupportedOperationException("FakeScorer doesn't support nextPosition()");
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ throw new UnsupportedOperationException("FakeScorer doesn't support startPosition()");
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ throw new UnsupportedOperationException("FakeScorer doesn't support endPosition()");
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException("FakeScorer doesn't support startOffset()");
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException("FakeScorer doesn't support endOffset()");
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException("FakeScorer doesn't support getPayload()");
+ }
+
+ @Override
public int nextDoc() {
throw new UnsupportedOperationException("FakeScorer doesn't support nextDoc()");
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FilterScorer.java b/lucene/core/src/java/org/apache/lucene/search/FilterScorer.java
index 88881bd..8a1ecfc 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FilterScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FilterScorer.java
@@ -18,9 +18,9 @@ package org.apache.lucene.search;
*/
import java.io.IOException;
-import java.util.Collection;
import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.BytesRef;
/**
* A {@code FilterScorer} contains another {@code Scorer}, which it
@@ -32,13 +32,18 @@ import org.apache.lucene.util.AttributeSource;
* further override some of these methods and may also provide additional
* methods and fields.
*/
-abstract class FilterScorer extends Scorer {
+public abstract class FilterScorer extends Scorer {
protected final Scorer in;
public FilterScorer(Scorer in) {
super(in.weight);
this.in = in;
}
+
+ public FilterScorer(Scorer in, Weight weight) {
+ super(weight);
+ this.in = in;
+ }
@Override
public float score() throws IOException {
@@ -61,6 +66,11 @@ abstract class FilterScorer extends Scorer {
}
@Override
+ public int nextPosition() throws IOException {
+ return in.nextPosition();
+ }
+
+ @Override
public int advance(int target) throws IOException {
return in.advance(target);
}
@@ -71,6 +81,31 @@ abstract class FilterScorer extends Scorer {
}
@Override
+ public int startPosition() throws IOException {
+ return in.startPosition();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return in.endPosition();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return in.startOffset();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return in.endOffset();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return in.getPayload();
+ }
+
+ @Override
public AttributeSource attributes() {
return in.attributes();
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java b/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java
index c95f05b..ce77a68 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FilteredQuery.java
@@ -17,17 +17,17 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.ToStringUtils;
-
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Set;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ToStringUtils;
+
/**
* A query that applies a filter to the results of another query.
@@ -119,7 +119,7 @@ public class FilteredQuery extends Query {
// return a filtering scorer
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
assert filter != null;
DocIdSet filterDocIdSet = filter.getDocIdSet(context, acceptDocs);
@@ -128,12 +128,12 @@ public class FilteredQuery extends Query {
return null;
}
- return strategy.filteredScorer(context, weight, filterDocIdSet);
+ return strategy.filteredScorer(context, weight, filterDocIdSet, flags);
}
// return a filtering top scorer
@Override
- public BulkScorer bulkScorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public BulkScorer bulkScorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
assert filter != null;
DocIdSet filterDocIdSet = filter.getDocIdSet(context, acceptDocs);
@@ -142,7 +142,8 @@ public class FilteredQuery extends Query {
return null;
}
- return strategy.filteredBulkScorer(context, weight, filterDocIdSet);
+ return strategy.filteredBulkScorer(context, weight, filterDocIdSet, flags);
+
}
};
}
@@ -153,13 +154,13 @@ public class FilteredQuery extends Query {
* than document scoring or if the filter has a linear running time to compute
* the next matching doc like exact geo distances.
*/
- private static final class QueryFirstScorer extends Scorer {
+ private static final class QueryFirstScorer extends FilterScorer {
private final Scorer scorer;
private int scorerDoc = -1;
private final Bits filterBits;
protected QueryFirstScorer(Weight weight, Bits filterBits, Scorer other) {
- super(weight);
+ super(other, weight);
this.scorer = other;
this.filterBits = filterBits;
}
@@ -184,29 +185,16 @@ public class FilteredQuery extends Query {
return scorerDoc = doc;
}
}
-
@Override
public int docID() {
return scorerDoc;
}
-
- @Override
- public float score() throws IOException {
- return scorer.score();
- }
-
- @Override
- public int freq() throws IOException { return scorer.freq(); }
-
+
@Override
public Collection getChildren() {
return Collections.singleton(new ChildScorer(scorer, "FILTERED"));
}
- @Override
- public long cost() {
- return scorer.cost();
- }
}
private static class QueryFirstBulkScorer extends BulkScorer {
@@ -249,7 +237,7 @@ public class FilteredQuery extends Query {
* jumping past the target document. When both land on the same document, it's
* collected.
*/
- private static final class LeapFrogScorer extends Scorer {
+ private static final class LeapFrogScorer extends FilterScorer {
private final DocIdSetIterator secondary;
private final DocIdSetIterator primary;
private final Scorer scorer;
@@ -257,7 +245,7 @@ public class FilteredQuery extends Query {
private int secondaryDoc = -1;
protected LeapFrogScorer(Weight weight, DocIdSetIterator primary, DocIdSetIterator secondary, Scorer scorer) {
- super(weight);
+ super(scorer, weight);
this.primary = primary;
this.secondary = secondary;
this.scorer = scorer;
@@ -297,17 +285,7 @@ public class FilteredQuery extends Query {
public final int docID() {
return secondaryDoc;
}
-
- @Override
- public final float score() throws IOException {
- return scorer.score();
- }
-
- @Override
- public final int freq() throws IOException {
- return scorer.freq();
- }
-
+
@Override
public final Collection getChildren() {
return Collections.singleton(new ChildScorer(scorer, "FILTERED"));
@@ -455,12 +433,13 @@ public class FilteredQuery extends Query {
* the {@link org.apache.lucene.index.LeafReaderContext} for which to return the {@link Scorer}.
* @param weight the {@link FilteredQuery} {@link Weight} to create the filtered scorer.
* @param docIdSet the filter {@link DocIdSet} to apply
+ * @param flags the low level Posting Features for this scorer.
* @return a filtered scorer
*
* @throws IOException if an {@link IOException} occurs
*/
public abstract Scorer filteredScorer(LeafReaderContext context,
- Weight weight, DocIdSet docIdSet) throws IOException;
+ Weight weight, DocIdSet docIdSet, int flags) throws IOException;
/**
* Returns a filtered {@link BulkScorer} based on this
@@ -475,8 +454,8 @@ public class FilteredQuery extends Query {
* @return a filtered top scorer
*/
public BulkScorer filteredBulkScorer(LeafReaderContext context,
- Weight weight, DocIdSet docIdSet) throws IOException {
- Scorer scorer = filteredScorer(context, weight, docIdSet);
+ Weight weight, DocIdSet docIdSet, int flags) throws IOException {
+ Scorer scorer = filteredScorer(context, weight, docIdSet, flags);
if (scorer == null) {
return null;
}
@@ -484,6 +463,7 @@ public class FilteredQuery extends Query {
// ignore scoreDocsInOrder:
return new Weight.DefaultBulkScorer(scorer);
}
+
}
/**
@@ -497,7 +477,7 @@ public class FilteredQuery extends Query {
public static class RandomAccessFilterStrategy extends FilterStrategy {
@Override
- public Scorer filteredScorer(LeafReaderContext context, Weight weight, DocIdSet docIdSet) throws IOException {
+ public Scorer filteredScorer(LeafReaderContext context, Weight weight, DocIdSet docIdSet, int flags) throws IOException {
final DocIdSetIterator filterIter = docIdSet.iterator();
if (filterIter == null) {
// this means the filter does not accept any documents.
@@ -509,11 +489,11 @@ public class FilteredQuery extends Query {
final boolean useRandomAccess = filterAcceptDocs != null && useRandomAccess(filterAcceptDocs, filterIter.cost());
if (useRandomAccess) {
// if we are using random access, we return the inner scorer, just with other acceptDocs
- return weight.scorer(context, filterAcceptDocs);
+ return weight.scorer(context, flags, filterAcceptDocs);
} else {
// we are gonna advance() this scorer, so we set inorder=true/toplevel=false
// we pass null as acceptDocs, as our filter has already respected acceptDocs, no need to do twice
- final Scorer scorer = weight.scorer(context, null);
+ final Scorer scorer = weight.scorer(context, flags, null);
return (scorer == null) ? null : new LeapFrogScorer(weight, filterIter, scorer, scorer);
}
}
@@ -546,14 +526,14 @@ public class FilteredQuery extends Query {
@Override
public Scorer filteredScorer(LeafReaderContext context,
- Weight weight, DocIdSet docIdSet) throws IOException {
+ Weight weight, DocIdSet docIdSet, int flags) throws IOException {
final DocIdSetIterator filterIter = docIdSet.iterator();
if (filterIter == null) {
// this means the filter does not accept any documents.
return null;
}
// we pass null as acceptDocs, as our filter has already respected acceptDocs, no need to do twice
- final Scorer scorer = weight.scorer(context, null);
+ final Scorer scorer = weight.scorer(context, flags, null);
if (scorer == null) {
return null;
}
@@ -583,29 +563,27 @@ public class FilteredQuery extends Query {
@Override
public Scorer filteredScorer(final LeafReaderContext context,
Weight weight,
- DocIdSet docIdSet) throws IOException {
+ DocIdSet docIdSet, int flags) throws IOException {
Bits filterAcceptDocs = docIdSet.bits();
if (filterAcceptDocs == null) {
// Filter does not provide random-access Bits; we
// must fallback to leapfrog:
- return LEAP_FROG_QUERY_FIRST_STRATEGY.filteredScorer(context, weight, docIdSet);
+ return LEAP_FROG_QUERY_FIRST_STRATEGY.filteredScorer(context, weight, docIdSet, flags);
}
- final Scorer scorer = weight.scorer(context, null);
- return scorer == null ? null : new QueryFirstScorer(weight,
- filterAcceptDocs, scorer);
+ final Scorer scorer = weight.scorer(context, flags, null);
+ return scorer == null ? null : new QueryFirstScorer(weight, filterAcceptDocs, scorer);
}
@Override
- public BulkScorer filteredBulkScorer(final LeafReaderContext context,
- Weight weight,
- DocIdSet docIdSet) throws IOException {
+ public BulkScorer filteredBulkScorer(final LeafReaderContext context, Weight weight,
+ DocIdSet docIdSet, int flags) throws IOException {
Bits filterAcceptDocs = docIdSet.bits();
if (filterAcceptDocs == null) {
// Filter does not provide random-access Bits; we
// must fallback to leapfrog:
- return LEAP_FROG_QUERY_FIRST_STRATEGY.filteredBulkScorer(context, weight, docIdSet);
+ return LEAP_FROG_QUERY_FIRST_STRATEGY.filteredBulkScorer(context, weight, docIdSet, flags);
}
- final Scorer scorer = weight.scorer(context, null);
+ final Scorer scorer = weight.scorer(context, flags, null);
return scorer == null ? null : new QueryFirstBulkScorer(scorer, filterAcceptDocs);
}
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java b/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
index 3199966..6062118 100644
--- a/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/search/FuzzyTermsEnum.java
@@ -22,8 +22,7 @@ import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FilteredTermsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermState;
@@ -266,14 +265,8 @@ public class FuzzyTermsEnum extends TermsEnum {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
- return actualEnum.docs(liveDocs, reuse, flags);
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs,
- DocsAndPositionsEnum reuse, int flags) throws IOException {
- return actualEnum.docsAndPositions(liveDocs, reuse, flags);
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
+ return actualEnum.postings(liveDocs, reuse, flags);
}
@Override
diff --git a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
index 84df738..e701a16 100644
--- a/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
+++ b/lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java
@@ -27,10 +27,11 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
-import org.apache.lucene.index.DirectoryReader; // javadocs
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
-import org.apache.lucene.index.IndexWriter; // javadocs
+import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.ReaderUtil;
@@ -41,7 +42,7 @@ import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.Terms;
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.search.similarities.Similarity;
-import org.apache.lucene.store.NIOFSDirectory; // javadoc
+import org.apache.lucene.store.NIOFSDirectory;
import org.apache.lucene.util.ThreadInterruptedException;
/** Implements search over a single IndexReader.
@@ -586,7 +587,7 @@ public class IndexSearcher {
// continue with the following leaf
continue;
}
- BulkScorer scorer = weight.bulkScorer(ctx, ctx.reader().getLiveDocs());
+ BulkScorer scorer = weight.bulkScorer(ctx, PostingsEnum.FLAG_FREQS, ctx.reader().getLiveDocs());
if (scorer != null) {
try {
scorer.score(leafCollector);
diff --git a/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java b/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
index ed49b3c..19dcf98 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MatchAllDocsQuery.java
@@ -17,14 +17,15 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
+import java.io.IOException;
+import java.util.Set;
+
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
-import org.apache.lucene.util.ToStringUtils;
import org.apache.lucene.util.Bits;
-
-import java.util.Set;
-import java.io.IOException;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.ToStringUtils;
/**
* A query that matches all documents.
@@ -73,6 +74,36 @@ public class MatchAllDocsQuery extends Query {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int advance(int target) throws IOException {
doc = target-1;
return nextDoc();
@@ -114,7 +145,7 @@ public class MatchAllDocsQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
return new MatchAllScorer(context.reader(), acceptDocs, this, queryWeight);
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java
index 3f31ace..f81ec4a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MinShouldMatchSumScorer.java
@@ -24,6 +24,7 @@ import java.util.Comparator;
import java.util.List;
import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
/**
* A Scorer for OR like queries, counterpart of ConjunctionScorer.
@@ -110,6 +111,7 @@ class MinShouldMatchSumScorer extends Scorer {
this.coord = coord;
minheapHeapify();
assert minheapCheck();
+
}
@Override
@@ -145,6 +147,7 @@ class MinShouldMatchSumScorer extends Scorer {
break;
}
}
+
return doc;
}
@@ -231,6 +234,36 @@ class MinShouldMatchSumScorer extends Scorer {
return nrMatchers;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
/**
* Advances to the first match beyond the current whose document number is
* greater than or equal to a given target.
diff --git a/lucene/core/src/java/org/apache/lucene/search/MultiCollector.java b/lucene/core/src/java/org/apache/lucene/search/MultiCollector.java
index 495fbf7..bee5e86 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MultiCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MultiCollector.java
@@ -101,7 +101,6 @@ public class MultiCollector implements Collector {
return new MultiLeafCollector(leafCollectors);
}
-
private static class MultiLeafCollector implements LeafCollector {
private final LeafCollector[] collectors;
diff --git a/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java b/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
index 3d1fa5e..1640bab 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MultiPhraseQuery.java
@@ -21,9 +21,8 @@ import java.io.IOException;
import java.util.*;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.Term;
@@ -179,7 +178,7 @@ public class MultiPhraseQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
assert !termArrays.isEmpty();
final LeafReader reader = context.reader();
final Bits liveDocs = acceptDocs;
@@ -197,11 +196,11 @@ public class MultiPhraseQuery extends Query {
for (int pos=0; pos 1) {
- postingsEnum = new UnionDocsAndPositionsEnum(liveDocs, context, terms, termContexts, termsEnum);
+ postingsEnum = new UnionPostingsEnum(liveDocs, context, terms, termContexts, termsEnum);
// coarse -- this overcounts since a given doc can
// have more than one term:
@@ -229,11 +228,11 @@ public class MultiPhraseQuery extends Query {
return null;
}
termsEnum.seekExact(term.bytes(), termState);
- postingsEnum = termsEnum.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
+ postingsEnum = termsEnum.postings(liveDocs, null, flags | PostingsEnum.FLAG_POSITIONS);
if (postingsEnum == null) {
// term does exist, but has no positions
- assert termsEnum.docs(liveDocs, null, DocsEnum.FLAG_NONE) != null: "termstate found but no term exists in reader";
+ assert termsEnum.postings(liveDocs, null, PostingsEnum.FLAG_NONE) != null: "termstate found but no term exists in reader";
throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run PhraseQuery (term=" + term.text() + ")");
}
@@ -257,7 +256,7 @@ public class MultiPhraseQuery extends Query {
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- Scorer scorer = scorer(context, context.reader().getLiveDocs());
+ Scorer scorer = scorer(context, PostingsEnum.FLAG_POSITIONS, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
@@ -408,15 +407,15 @@ public class MultiPhraseQuery extends Query {
*/
// TODO: if ever we allow subclassing of the *PhraseScorer
-class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
+class UnionPostingsEnum extends PostingsEnum {
- private static final class DocsQueue extends PriorityQueue {
- DocsQueue(List docsEnums) throws IOException {
- super(docsEnums.size());
+ private static final class DocsQueue extends PriorityQueue {
+ DocsQueue(List postingsEnums) throws IOException {
+ super(postingsEnums.size());
- Iterator i = docsEnums.iterator();
+ Iterator i = postingsEnums.iterator();
while (i.hasNext()) {
- DocsAndPositionsEnum postings = i.next();
+ PostingsEnum postings = i.next();
if (postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
add(postings);
}
@@ -424,7 +423,7 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
}
@Override
- public final boolean lessThan(DocsAndPositionsEnum a, DocsAndPositionsEnum b) {
+ public final boolean lessThan(PostingsEnum a, PostingsEnum b) {
return a.docID() < b.docID();
}
}
@@ -446,6 +445,10 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
return _array[_index++];
}
+ final int top() {
+ return _array[_index];
+ }
+
final void sort() {
Arrays.sort(_array, _index, _lastIndex);
}
@@ -473,8 +476,8 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
private IntQueue _posList;
private long cost;
- public UnionDocsAndPositionsEnum(Bits liveDocs, LeafReaderContext context, Term[] terms, Map termContexts, TermsEnum termsEnum) throws IOException {
- List docsEnums = new LinkedList<>();
+ public UnionPostingsEnum(Bits liveDocs, LeafReaderContext context, Term[] terms, Map termContexts, TermsEnum termsEnum) throws IOException {
+ List postingsEnums = new LinkedList<>();
for (int i = 0; i < terms.length; i++) {
final Term term = terms[i];
TermState termState = termContexts.get(term).get(context.ord);
@@ -483,16 +486,16 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
continue;
}
termsEnum.seekExact(term.bytes(), termState);
- DocsAndPositionsEnum postings = termsEnum.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
+ PostingsEnum postings = termsEnum.postings(liveDocs, null, PostingsEnum.FLAG_POSITIONS);
if (postings == null) {
// term does exist, but has no positions
throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run PhraseQuery (term=" + term.text() + ")");
}
cost += postings.cost();
- docsEnums.add(postings);
+ postingsEnums.add(postings);
}
- _queue = new DocsQueue(docsEnums);
+ _queue = new DocsQueue(postingsEnums);
_posList = new IntQueue();
}
@@ -509,7 +512,7 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
_doc = _queue.top().docID();
// merge sort all positions together
- DocsAndPositionsEnum postings;
+ PostingsEnum postings;
do {
postings = _queue.top();
@@ -537,6 +540,16 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
}
@Override
+ public int startPosition() throws IOException {
+ return _posList.top();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return _posList.top();
+ }
+
+ @Override
public int startOffset() {
return -1;
}
@@ -554,7 +567,7 @@ class UnionDocsAndPositionsEnum extends DocsAndPositionsEnum {
@Override
public final int advance(int target) throws IOException {
while (_queue.top() != null && target > _queue.top().docID()) {
- DocsAndPositionsEnum postings = _queue.pop();
+ PostingsEnum postings = _queue.pop();
if (postings.advance(target) != NO_MORE_DOCS) {
_queue.add(postings);
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java b/lucene/core/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java
index f2301b2..8a7ef23 100644
--- a/lucene/core/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java
@@ -19,9 +19,7 @@ package org.apache.lucene.search;
import java.io.IOException;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.Fields;
-import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
@@ -94,9 +92,9 @@ public class MultiTermQueryWrapperFilter extends Filte
assert termsEnum != null;
BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc());
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while (termsEnum.next() != null) {
- docs = termsEnum.docs(acceptDocs, docs, DocsEnum.FLAG_NONE);
+ docs = termsEnum.postings(acceptDocs, docs, PostingsEnum.FLAG_NONE);
builder.or(docs);
}
return builder.build();
diff --git a/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java b/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java
index c975b01..bfc692c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PhrasePositions.java
@@ -29,13 +29,13 @@ final class PhrasePositions {
int count; // remaining pos in this doc
int offset; // position in phrase
final int ord; // unique across all PhrasePositions instances
- final DocsAndPositionsEnum postings; // stream of docs & positions
+ final PostingsEnum postings; // stream of docs & positions
PhrasePositions next; // used to make lists
int rptGroup = -1; // >=0 indicates that this is a repeating PP
int rptInd; // index in the rptGroup
final Term[] terms; // for repetitions initialization
- PhrasePositions(DocsAndPositionsEnum postings, int o, int ord, Term[] terms) {
+ PhrasePositions(PostingsEnum postings, int o, int ord, Term[] terms) {
this.postings = postings;
offset = o;
this.ord = ord;
diff --git a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
index bf5a373..b4c479c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/PhraseQuery.java
@@ -22,19 +22,18 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.ToStringUtils;
@@ -138,13 +137,13 @@ public class PhraseQuery extends Query {
}
static class PostingsAndFreq implements Comparable {
- final DocsAndPositionsEnum postings;
+ final PostingsEnum postings;
final int docFreq;
final int position;
final Term[] terms;
final int nTerms; // for faster comparisons
- public PostingsAndFreq(DocsAndPositionsEnum postings, int docFreq, int position, Term... terms) {
+ public PostingsAndFreq(PostingsEnum postings, int docFreq, int position, Term... terms) {
this.postings = postings;
this.docFreq = docFreq;
this.position = position;
@@ -245,7 +244,7 @@ public class PhraseQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
assert !terms.isEmpty();
final LeafReader reader = context.reader();
final Bits liveDocs = acceptDocs;
@@ -267,7 +266,7 @@ public class PhraseQuery extends Query {
return null;
}
te.seekExact(t.bytes(), state);
- DocsAndPositionsEnum postingsEnum = te.docsAndPositions(liveDocs, null, DocsEnum.FLAG_NONE);
+ PostingsEnum postingsEnum = te.postings(liveDocs, null, flags | PostingsEnum.FLAG_POSITIONS);
// PhraseQuery on a field that did not index
// positions.
@@ -276,7 +275,7 @@ public class PhraseQuery extends Query {
// term does exist, but has no positions
throw new IllegalStateException("field \"" + t.field() + "\" was indexed without position data; cannot run PhraseQuery (term=" + t.text() + ")");
}
- postingsFreqs[i] = new PostingsAndFreq(postingsEnum, te.docFreq(), positions.get(i).intValue(), t);
+ postingsFreqs[i] = new PostingsAndFreq(postingsEnum, te.docFreq(), positions.get(i), t);
}
// sort by increasing docFreq order
@@ -298,7 +297,7 @@ public class PhraseQuery extends Query {
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- Scorer scorer = scorer(context, context.reader().getLiveDocs());
+ Scorer scorer = scorer(context, PostingsEnum.FLAG_POSITIONS, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/QueryRescorer.java b/lucene/core/src/java/org/apache/lucene/search/QueryRescorer.java
index 2f17145..0f1e050 100644
--- a/lucene/core/src/java/org/apache/lucene/search/QueryRescorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/QueryRescorer.java
@@ -17,13 +17,14 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.LeafReaderContext;
+
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
-import org.apache.lucene.index.LeafReaderContext;
-
/** A {@link Rescorer} that uses a provided Query to assign
* scores to the first-pass hits.
*
@@ -82,7 +83,7 @@ public abstract class QueryRescorer extends Rescorer {
if (readerContext != null) {
// We advanced to another segment:
docBase = readerContext.docBase;
- scorer = weight.scorer(readerContext, null);
+ scorer = weight.scorer(readerContext, PostingsEnum.FLAG_NONE, null);
}
if(scorer != null) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/QueryWrapperFilter.java b/lucene/core/src/java/org/apache/lucene/search/QueryWrapperFilter.java
index 8d8a010..2624e90 100644
--- a/lucene/core/src/java/org/apache/lucene/search/QueryWrapperFilter.java
+++ b/lucene/core/src/java/org/apache/lucene/search/QueryWrapperFilter.java
@@ -17,11 +17,12 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import java.io.IOException;
-
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.Bits;
+import java.io.IOException;
+
/**
* Constrains search results to only match those which also match a provided
* query.
@@ -56,7 +57,7 @@ public class QueryWrapperFilter extends Filter {
return new DocIdSet() {
@Override
public DocIdSetIterator iterator() throws IOException {
- return weight.scorer(privateContext, acceptDocs);
+ return weight.scorer(privateContext, PostingsEnum.FLAG_FREQS, acceptDocs);
}
@Override
diff --git a/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java b/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
index 4e2a5f1..cf6aec1 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
@@ -27,7 +27,7 @@ import java.util.Collections;
* This Scorer implements {@link Scorer#advance(int)},
* and it uses the skipTo() on the given scorers.
*/
-class ReqExclScorer extends Scorer {
+class ReqExclScorer extends FilterScorer {
private Scorer reqScorer;
private DocIdSetIterator exclDisi;
private int doc = -1;
@@ -37,7 +37,7 @@ class ReqExclScorer extends Scorer {
* @param exclDisi indicates exclusion.
*/
public ReqExclScorer(Scorer reqScorer, DocIdSetIterator exclDisi) {
- super(reqScorer.weight);
+ super(reqScorer);
this.reqScorer = reqScorer;
this.exclDisi = exclDisi;
}
@@ -103,11 +103,6 @@ class ReqExclScorer extends Scorer {
public float score() throws IOException {
return reqScorer.score(); // reqScorer may be null when next() or skipTo() already return false
}
-
- @Override
- public int freq() throws IOException {
- return reqScorer.freq();
- }
@Override
public Collection getChildren() {
@@ -129,8 +124,4 @@ class ReqExclScorer extends Scorer {
return doc = toNonExcluded();
}
- @Override
- public long cost() {
- return reqScorer.cost();
- }
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java
index d7b4d86..38a9c0e 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java
@@ -20,6 +20,8 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
+import org.apache.lucene.util.BytesRef;
+
/** A Scorer for queries with a required part and an optional part.
* Delays skipTo() on the optional part until a score() is needed.
*
@@ -49,12 +51,14 @@ class ReqOptSumScorer extends Scorer {
@Override
public int nextDoc() throws IOException {
- return reqScorer.nextDoc();
+ int doc = reqScorer.nextDoc();
+ return doc;
}
@Override
public int advance(int target) throws IOException {
- return reqScorer.advance(target);
+ int doc = reqScorer.advance(target);
+ return doc;
}
@Override
@@ -93,6 +97,36 @@ class ReqOptSumScorer extends Scorer {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public Collection getChildren() {
ArrayList children = new ArrayList<>(2);
children.add(new ChildScorer(reqScorer, "MUST"));
diff --git a/lucene/core/src/java/org/apache/lucene/search/Scorer.java b/lucene/core/src/java/org/apache/lucene/search/Scorer.java
index 929d3b9..b55cfc4 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Scorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Scorer.java
@@ -20,8 +20,9 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
+import java.util.Locale;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
/**
* Expert: Common scoring functionality for different types of queries.
@@ -41,7 +42,7 @@ import org.apache.lucene.index.DocsEnum;
* TopScoreDocCollector}) will not properly collect hits
* with these scores.
*/
-public abstract class Scorer extends DocsEnum {
+public abstract class Scorer extends PostingsEnum {
/** the Scorer's parent Weight. in some cases this may be null */
// TODO can we clean this up?
protected final Weight weight;
@@ -60,6 +61,13 @@ public abstract class Scorer extends DocsEnum {
* {@link LeafCollector#collect}.
*/
public abstract float score() throws IOException;
+
+ /** Returns the score of the current interval spanned by this scorer.
+ * Initially invalid, until {@link #nextPosition()} is called
+ */
+ public float intervalScore() throws IOException {
+ return 1;
+ }
/** returns parent Weight
* @lucene.experimental
@@ -67,6 +75,15 @@ public abstract class Scorer extends DocsEnum {
public Weight getWeight() {
return weight;
}
+
+ @Override
+ public String toString() {
+ try {
+ return String.format(Locale.ROOT, "%d:%d(%d)->%d(%d)", docID(), startPosition(), startOffset(), endPosition(), endOffset());
+ } catch (IOException e) {
+ return super.toString();
+ }
+ }
/** Returns child sub-scorers
* @lucene.experimental */
diff --git a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
index 80a0270..cad273a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/SloppyPhraseScorer.java
@@ -27,6 +27,7 @@ import java.util.LinkedHashMap;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
final class SloppyPhraseScorer extends Scorer {
@@ -522,7 +523,37 @@ final class SloppyPhraseScorer extends Scorer {
public int freq() {
return numMatches;
}
-
+
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
float sloppyFreq() {
return sloppyFreq;
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermQuery.java b/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
index 1bc2978..67e9c4a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
@@ -17,71 +17,75 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import java.io.IOException;
-import java.util.Set;
-
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.ToStringUtils;
-/** A Query that matches documents containing a term.
- This may be combined with other terms with a {@link BooleanQuery}.
- */
+import java.io.IOException;
+import java.util.Set;
+
+/**
+ * A Query that matches documents containing a term. This may be combined with
+ * other terms with a {@link BooleanQuery}.
+ */
public class TermQuery extends Query {
private final Term term;
private final int docFreq;
private final TermContext perReaderTermState;
-
+
final class TermWeight extends Weight {
private final Similarity similarity;
private final Similarity.SimWeight stats;
private final TermContext termStates;
-
+
public TermWeight(IndexSearcher searcher, TermContext termStates)
- throws IOException {
+ throws IOException {
assert termStates != null : "TermContext must not be null";
this.termStates = termStates;
this.similarity = searcher.getSimilarity();
- this.stats = similarity.computeWeight(
- getBoost(),
- searcher.collectionStatistics(term.field()),
+ this.stats = similarity.computeWeight(getBoost(),
+ searcher.collectionStatistics(term.field()),
searcher.termStatistics(term, termStates));
}
-
+
@Override
- public String toString() { return "weight(" + TermQuery.this + ")"; }
-
+ public String toString() {
+ return "weight(" + TermQuery.this + ")";
+ }
+
@Override
- public Query getQuery() { return TermQuery.this; }
-
+ public Query getQuery() {
+ return TermQuery.this;
+ }
+
@Override
public float getValueForNormalization() {
return stats.getValueForNormalization();
}
-
+
@Override
public void normalize(float queryNorm, float topLevelBoost) {
stats.normalize(queryNorm, topLevelBoost);
}
-
+
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
assert termStates.topReaderContext == ReaderUtil.getTopLevelContext(context) : "The top-reader used to create Weight (" + termStates.topReaderContext + ") is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context);
final TermsEnum termsEnum = getTermsEnum(context);
if (termsEnum == null) {
return null;
}
- DocsEnum docs = termsEnum.docs(acceptDocs, null);
+ PostingsEnum docs = termsEnum.postings(acceptDocs, null, flags);
assert docs != null;
return new TermScorer(this, docs, similarity.simScorer(stats, context));
}
@@ -96,90 +100,100 @@ public class TermQuery extends Query {
assert termNotInReader(context.reader(), term) : "no termstate found but term exists in reader term=" + term;
return null;
}
- //System.out.println("LD=" + reader.getLiveDocs() + " set?=" + (reader.getLiveDocs() != null ? reader.getLiveDocs().get(0) : "null"));
- final TermsEnum termsEnum = context.reader().terms(term.field()).iterator(null);
+ // System.out.println("LD=" + reader.getLiveDocs() + " set?=" +
+ // (reader.getLiveDocs() != null ? reader.getLiveDocs().get(0) : "null"));
+ final TermsEnum termsEnum = context.reader().terms(term.field())
+ .iterator(null);
termsEnum.seekExact(term.bytes(), state);
return termsEnum;
}
private boolean termNotInReader(LeafReader reader, Term term) throws IOException {
// only called from assert
- //System.out.println("TQ.termNotInReader reader=" + reader + " term=" + field + ":" + bytes.utf8ToString());
+ // System.out.println("TQ.termNotInReader reader=" + reader + " term=" +
+ // field + ":" + bytes.utf8ToString());
return reader.docFreq(term) == 0;
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- Scorer scorer = scorer(context, context.reader().getLiveDocs());
+ Scorer scorer = scorer(context, PostingsEnum.FLAG_FREQS, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
float freq = scorer.freq();
SimScorer docScorer = similarity.simScorer(stats, context);
ComplexExplanation result = new ComplexExplanation();
- result.setDescription("weight("+getQuery()+" in "+doc+") [" + similarity.getClass().getSimpleName() + "], result of:");
- Explanation scoreExplanation = docScorer.explain(doc, new Explanation(freq, "termFreq=" + freq));
+ result.setDescription("weight(" + getQuery() + " in " + doc + ") ["
+ + similarity.getClass().getSimpleName() + "], result of:");
+ Explanation scoreExplanation = docScorer.explain(doc,
+ new Explanation(freq, "termFreq=" + freq));
result.addDetail(scoreExplanation);
result.setValue(scoreExplanation.getValue());
result.setMatch(true);
return result;
}
}
- return new ComplexExplanation(false, 0.0f, "no matching term");
+ return new ComplexExplanation(false, 0.0f, "no matching term");
}
}
-
+
/** Constructs a query for the term t. */
public TermQuery(Term t) {
this(t, -1);
}
-
- /** Expert: constructs a TermQuery that will use the
- * provided docFreq instead of looking up the docFreq
- * against the searcher. */
+
+ /**
+ * Expert: constructs a TermQuery that will use the provided docFreq instead
+ * of looking up the docFreq against the searcher.
+ */
public TermQuery(Term t, int docFreq) {
term = t;
this.docFreq = docFreq;
perReaderTermState = null;
}
- /** Expert: constructs a TermQuery that will use the
- * provided docFreq instead of looking up the docFreq
- * against the searcher. */
+ /**
+ * Expert: constructs a TermQuery that will use the provided docFreq instead
+ * of looking up the docFreq against the searcher.
+ */
public TermQuery(Term t, TermContext states) {
assert states != null;
term = t;
docFreq = states.docFreq();
perReaderTermState = states;
}
-
+
/** Returns the term of this query. */
- public Term getTerm() { return term; }
-
+ public Term getTerm() {
+ return term;
+ }
+
@Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
final IndexReaderContext context = searcher.getTopReaderContext();
final TermContext termState;
- if (perReaderTermState == null || perReaderTermState.topReaderContext != context) {
- // make TermQuery single-pass if we don't have a PRTS or if the context differs!
+ if (perReaderTermState == null
+ || perReaderTermState.topReaderContext != context) {
+ // make TermQuery single-pass if we don't have a PRTS or if the context
+ // differs!
termState = TermContext.build(context, term);
} else {
- // PRTS was pre-build for this IS
- termState = this.perReaderTermState;
+ // PRTS was pre-build for this IS
+ termState = this.perReaderTermState;
}
-
+
// we must not ignore the given docFreq - if set use the given value (lie)
- if (docFreq != -1)
- termState.setDocFreq(docFreq);
+ if (docFreq != -1) termState.setDocFreq(docFreq);
return new TermWeight(searcher, termState);
}
-
+
@Override
public void extractTerms(Set terms) {
terms.add(getTerm());
}
-
+
/** Prints a user-readable version of this query. */
@Override
public String toString(String field) {
@@ -192,21 +206,20 @@ public class TermQuery extends Query {
buffer.append(ToStringUtils.boost(getBoost()));
return buffer.toString();
}
-
+
/** Returns true iff o is equal to this. */
@Override
public boolean equals(Object o) {
- if (!(o instanceof TermQuery))
- return false;
- TermQuery other = (TermQuery)o;
+ if (!(o instanceof TermQuery)) return false;
+ TermQuery other = (TermQuery) o;
return (this.getBoost() == other.getBoost())
- && this.term.equals(other.term);
+ && this.term.equals(other.term);
}
-
- /** Returns a hash code value for this object.*/
+
+ /** Returns a hash code value for this object. */
@Override
public int hashCode() {
return Float.floatToIntBits(getBoost()) ^ term.hashCode();
}
-
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermScorer.java b/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
index 6697524..828fc97 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
@@ -19,78 +19,109 @@ package org.apache.lucene.search;
import java.io.IOException;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.BytesRef;
/** Expert: A Scorer for documents matching a Term.
*/
final class TermScorer extends Scorer {
- private final DocsEnum docsEnum;
+ private final PostingsEnum postingsEnum;
private final Similarity.SimScorer docScorer;
-
+
/**
* Construct a TermScorer.
- *
+ *
* @param weight
* The weight of the Term in the query.
* @param td
* An iterator over the documents matching the Term.
* @param docScorer
- * The Similarity.SimScorer implementation
+ * The Similarity.SimScorer implementation
* to be used for score computations.
*/
- TermScorer(Weight weight, DocsEnum td, Similarity.SimScorer docScorer) {
+ TermScorer(Weight weight, PostingsEnum td, Similarity.SimScorer docScorer) {
super(weight);
this.docScorer = docScorer;
- this.docsEnum = td;
+ this.postingsEnum = td;
}
@Override
public int docID() {
- return docsEnum.docID();
+ return postingsEnum.docID();
}
@Override
public int freq() throws IOException {
- return docsEnum.freq();
+ return postingsEnum.freq();
+ }
+
+ @Override
+ public int nextPosition() throws IOException {
+ return postingsEnum.nextPosition();
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return postingsEnum.startPosition();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return postingsEnum.endPosition();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return postingsEnum.startOffset();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return postingsEnum.endOffset();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return postingsEnum.getPayload();
}
/**
* Advances to the next document matching the query.
- *
+ *
* @return the document matching the query or NO_MORE_DOCS if there are no more documents.
*/
@Override
public int nextDoc() throws IOException {
- return docsEnum.nextDoc();
+ return postingsEnum.nextDoc();
}
-
+
@Override
public float score() throws IOException {
assert docID() != NO_MORE_DOCS;
- return docScorer.score(docsEnum.docID(), docsEnum.freq());
+ return docScorer.score(postingsEnum.docID(), postingsEnum.freq());
}
/**
* Advances to the first match beyond the current whose document number is
* greater than or equal to a given target.
- * The implementation uses {@link DocsEnum#advance(int)}.
- *
+ * The implementation uses {@link org.apache.lucene.index.PostingsEnum#advance(int)}.
+ *
* @param target
* The target document number.
* @return the matching document or NO_MORE_DOCS if none exist.
*/
@Override
public int advance(int target) throws IOException {
- return docsEnum.advance(target);
+ return postingsEnum.advance(target);
}
-
+
@Override
public long cost() {
- return docsEnum.cost();
+ return postingsEnum.cost();
}
/** Returns a string representation of this TermScorer. */
@Override
- public String toString() { return "scorer(" + weight + ")"; }
+ public String toString() { return "scorer(" + weight + ")[" + super.toString() + "]"; }
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/TimeLimitingCollector.java b/lucene/core/src/java/org/apache/lucene/search/TimeLimitingCollector.java
index e179089..9c72f2c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TimeLimitingCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TimeLimitingCollector.java
@@ -17,12 +17,12 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import java.io.IOException;
+
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.Counter;
import org.apache.lucene.util.ThreadInterruptedException;
-import java.io.IOException;
-
/**
* The {@link TimeLimitingCollector} is used to timeout search requests that
* take longer than the maximum allowed search time limit. After this time is
@@ -156,7 +156,7 @@ public class TimeLimitingCollector implements Collector {
};
}
-
+
/**
* This is so the same timer can be used with a multi-phase search process such as grouping.
* We don't want to create a new TimeLimitingCollector for each phase because that would
diff --git a/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java b/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java
index fb06e0a..8d87b4a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TotalHitCountCollector.java
@@ -17,7 +17,6 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-
/**
* Just counts the total number of hits.
*/
diff --git a/lucene/core/src/java/org/apache/lucene/search/Weight.java b/lucene/core/src/java/org/apache/lucene/search/Weight.java
index 741dc88..70117fa 100644
--- a/lucene/core/src/java/org/apache/lucene/search/Weight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/Weight.java
@@ -19,8 +19,8 @@ package org.apache.lucene.search;
import java.io.IOException;
+import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexReaderContext; // javadocs
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.util.Bits;
@@ -34,7 +34,7 @@ import org.apache.lucene.util.Bits;
* {@link org.apache.lucene.index.LeafReader} dependent state should reside in the {@link Scorer}.
*
* Since {@link Weight} creates {@link Scorer} instances for a given
- * {@link org.apache.lucene.index.LeafReaderContext} ({@link #scorer(org.apache.lucene.index.LeafReaderContext, Bits)})
+ * {@link org.apache.lucene.index.LeafReaderContext} ({@link #scorer(org.apache.lucene.index.LeafReaderContext, int, Bits)})
* callers must maintain the relationship between the searcher's top-level
* {@link IndexReaderContext} and the context used to create a {@link Scorer}.
*
The query normalization factor is passed to {@link #normalize(float, float)}. At
* this point the weighting is complete.
*
A Scorer is constructed by
- * {@link #scorer(org.apache.lucene.index.LeafReaderContext, Bits)}.
+ * {@link #scorer(org.apache.lucene.index.LeafReaderContext, int, Bits)}.
*
*
* @since 2.9
@@ -91,7 +91,7 @@ public abstract class Weight {
* @return a {@link Scorer} which scores documents in/out-of order.
* @throws IOException if there is a low-level I/O error
*/
- public abstract Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException;
+ public abstract Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException;
/**
* Optional method, to return a {@link BulkScorer} to
@@ -111,9 +111,9 @@ public abstract class Weight {
* passes them to a collector.
* @throws IOException if there is a low-level I/O error
*/
- public BulkScorer bulkScorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public BulkScorer bulkScorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
- Scorer scorer = scorer(context, acceptDocs);
+ Scorer scorer = scorer(context, flags, acceptDocs);
if (scorer == null) {
// No docs match
return null;
@@ -178,4 +178,5 @@ public abstract class Weight {
}
}
}
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/package.html b/lucene/core/src/java/org/apache/lucene/search/package.html
index 1635797..c0be57a 100644
--- a/lucene/core/src/java/org/apache/lucene/search/package.html
+++ b/lucene/core/src/java/org/apache/lucene/search/package.html
@@ -436,14 +436,14 @@ on the built-in available scoring models and extending or changing Similarity.
that scores via a {@link org.apache.lucene.search.similarities.Similarity Similarity} will just defer to the Similarity's implementation:
{@link org.apache.lucene.search.similarities.Similarity.SimWeight#normalize SimWeight#normalize(float,float)}.
- {@link org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.LeafReaderContext, org.apache.lucene.util.Bits)
- scorer(LeafReaderContext context, Bits acceptDocs)} —
+ {@link org.apache.lucene.search.Weight#scorer(org.apache.lucene.index.LeafReaderContext, int, org.apache.lucene.util.Bits)
+ scorer(LeafReaderContext context, int flags, Bits acceptDocs)} —
Construct a new {@link org.apache.lucene.search.Scorer Scorer} for this Weight. See The Scorer Class
below for help defining a Scorer. As the name implies, the Scorer is responsible for doing the actual scoring of documents
given the Query.
- {@link org.apache.lucene.search.Weight#bulkScorer(org.apache.lucene.index.LeafReaderContext, org.apache.lucene.util.Bits)
+ {@link org.apache.lucene.search.Weight#bulkScorer(org.apache.lucene.index.LeafReaderContext, int, org.apache.lucene.util.Bits)
scorer(LeafReaderContext context, Bits acceptDocs)} —
Construct a new {@link org.apache.lucene.search.BulkScorer BulkScorer} for this Weight. See The BulkScorer Class
below for help defining a BulkScorer. This is an optional method, and most queries do not implement it.
diff --git a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
index 1be23e6..8899234 100644
--- a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
@@ -17,11 +17,12 @@ package org.apache.lucene.search.payloads;
* limitations under the License.
*/
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.Explanation;
-import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.search.similarities.Similarity;
@@ -148,14 +149,14 @@ public class PayloadNearQuery extends SpanNearQuery {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
return new PayloadNearSpanScorer(query.getSpans(context, acceptDocs, termContexts), this,
similarity, similarity.simScorer(stats, context));
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- PayloadNearSpanScorer scorer = (PayloadNearSpanScorer) scorer(context, context.reader().getLiveDocs());
+ PayloadNearSpanScorer scorer = (PayloadNearSpanScorer) scorer(context, PostingsEnum.FLAG_PAYLOADS, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
index 6afbdf2..f831e6c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
@@ -19,7 +19,7 @@ package org.apache.lucene.search.payloads;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
@@ -79,7 +79,7 @@ public class PayloadTermQuery extends SpanTermQuery {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
return new PayloadTermSpanScorer((TermSpans) query.getSpans(context, acceptDocs, termContexts),
this, similarity.simScorer(stats, context));
}
@@ -120,7 +120,7 @@ public class PayloadTermQuery extends SpanTermQuery {
protected void processPayload(Similarity similarity) throws IOException {
if (termSpans.isPayloadAvailable()) {
- final DocsAndPositionsEnum postings = termSpans.getPostings();
+ final PostingsEnum postings = termSpans.getPostings();
payload = postings.getPayload();
if (payload != null) {
payloadScore = function.currentScore(doc, term.field(),
@@ -176,7 +176,7 @@ public class PayloadTermQuery extends SpanTermQuery {
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- PayloadTermSpanScorer scorer = (PayloadTermSpanScorer) scorer(context, context.reader().getLiveDocs());
+ PayloadTermSpanScorer scorer = (PayloadTermSpanScorer) scorer(context, PostingsEnum.FLAG_POSITIONS, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java b/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
index ea45f69..beb7b90 100644
--- a/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
+++ b/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
@@ -17,10 +17,8 @@ package org.apache.lucene.search.similarities;
* limitations under the License.
*/
-import java.io.IOException;
-
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.FieldInvertState;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.Explanation;
@@ -28,9 +26,11 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermStatistics;
-import org.apache.lucene.search.spans.SpanQuery; // javadoc
+import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.util.SmallFloat; // javadoc
+import org.apache.lucene.util.SmallFloat;
+
+import java.io.IOException;
/**
* Similarity defines the components of Lucene scoring.
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
index 74a098d..416ba5c 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanScorer.java
@@ -19,9 +19,10 @@ package org.apache.lucene.search.spans;
import java.io.IOException;
-import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.search.similarities.Similarity;
+import org.apache.lucene.util.BytesRef;
/**
* Public for extension only.
@@ -96,16 +97,47 @@ public class SpanScorer extends Scorer {
public int freq() throws IOException {
return numMatches;
}
-
+
+ @Override
+ public int nextPosition() throws IOException {
+ throw new UnsupportedOperationException("SpanQueries do not support nextPosition() iteration");
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
/** Returns the intermediate "sloppy freq" adjusted for edit distance
* @lucene.internal */
// only public so .payloads can see it.
public float sloppyFreq() throws IOException {
return freq;
}
-
+
@Override
public long cost() {
return spans.cost();
}
+
}
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
index c6dab4e..a8b7c5f 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
@@ -17,10 +17,13 @@ package org.apache.lucene.search.spans;
* limitations under the License.
*/
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.Fields;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.Terms;
@@ -28,10 +31,6 @@ import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.ToStringUtils;
-import java.io.IOException;
-import java.util.Map;
-import java.util.Set;
-
/** Matches spans containing a term. */
public class SpanTermQuery extends SpanQuery {
protected Term term;
@@ -115,7 +114,7 @@ public class SpanTermQuery extends SpanQuery {
final TermsEnum termsEnum = context.reader().terms(term.field()).iterator(null);
termsEnum.seekExact(term.bytes(), state);
- final DocsAndPositionsEnum postings = termsEnum.docsAndPositions(acceptDocs, null, DocsAndPositionsEnum.FLAG_PAYLOADS);
+ final PostingsEnum postings = termsEnum.postings(acceptDocs, null, PostingsEnum.FLAG_PAYLOADS);
if (postings != null) {
return new TermSpans(postings, term);
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
index 0e06343..5488649 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/SpanWeight.java
@@ -17,11 +17,18 @@ package org.apache.lucene.search.spans;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermStatistics;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.search.similarities.Similarity.SimScorer;
import org.apache.lucene.util.Bits;
@@ -81,7 +88,7 @@ public class SpanWeight extends Weight {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
if (stats == null) {
return null;
} else {
@@ -91,7 +98,7 @@ public class SpanWeight extends Weight {
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- SpanScorer scorer = (SpanScorer) scorer(context, context.reader().getLiveDocs());
+ SpanScorer scorer = (SpanScorer) scorer(context, PostingsEnum.FLAG_POSITIONS, context.reader().getLiveDocs());
if (scorer != null) {
int newDoc = scorer.advance(doc);
if (newDoc == doc) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java b/lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java
index d4974a5..bca88de 100644
--- a/lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java
+++ b/lucene/core/src/java/org/apache/lucene/search/spans/TermSpans.java
@@ -17,7 +17,7 @@ package org.apache.lucene.search.spans;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.BytesRef;
@@ -30,7 +30,7 @@ import java.util.Collection;
* Public for extension only
*/
public class TermSpans extends Spans {
- protected final DocsAndPositionsEnum postings;
+ protected final PostingsEnum postings;
protected final Term term;
protected int doc;
protected int freq;
@@ -38,7 +38,7 @@ public class TermSpans extends Spans {
protected int position;
protected boolean readPayload;
- public TermSpans(DocsAndPositionsEnum postings, Term term) {
+ public TermSpans(PostingsEnum postings, Term term) {
this.postings = postings;
this.term = term;
doc = -1;
@@ -132,7 +132,7 @@ public class TermSpans extends Spans {
(doc == -1 ? "START" : (doc == Integer.MAX_VALUE) ? "END" : doc + "-" + position);
}
- public DocsAndPositionsEnum getPostings() {
+ public PostingsEnum getPostings() {
return postings;
}
diff --git a/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java b/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java
index 4747557..c276f9f 100644
--- a/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java
+++ b/lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java
@@ -22,11 +22,28 @@ import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.Random;
-import org.apache.lucene.store.*;
-import org.apache.lucene.document.*;
-import org.apache.lucene.analysis.*;
-import org.apache.lucene.index.*;
-import org.apache.lucene.search.*;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.MergePolicy;
+import org.apache.lucene.index.StoredDocument;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
public class TestSearchForDuplicates extends LuceneTestCase {
diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java b/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
index a4310c4..10619a3 100644
--- a/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java
@@ -25,9 +25,9 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.store.Directory;
@@ -84,7 +84,7 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase {
writer.addDocument(doc);
IndexReader reader = writer.getReader();
- DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader,
+ PostingsEnum termPositions = MultiFields.getTermPositionsEnum(reader,
MultiFields.getLiveDocs(reader),
"preanalyzed",
new BytesRef("term1"));
diff --git a/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java b/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
index 485ea3f..aaa46ff 100644
--- a/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
+++ b/lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java
@@ -25,7 +25,7 @@ import java.util.Random;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.LeafReader;
@@ -321,7 +321,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase {
final Terms terms = fields.terms("f");
final TermsEnum te = terms.iterator(null);
assertEquals(new BytesRef("a"), te.next());
- final DocsAndPositionsEnum dpe = te.docsAndPositions(null, null);
+ final PostingsEnum dpe = te.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(0, dpe.nextDoc());
assertEquals(2, dpe.freq());
assertEquals(0, dpe.nextPosition());
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/asserting/TestAssertingDocValuesFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/asserting/TestAssertingDocValuesFormat.java
index c15ae9b..b6a2c61 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/asserting/TestAssertingDocValuesFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/asserting/TestAssertingDocValuesFormat.java
@@ -27,5 +27,10 @@ public class TestAssertingDocValuesFormat extends BasePostingsFormatTestCase {
@Override
protected Codec getCodec() {
return codec;
- }
+ }
+
+ @Override
+ protected boolean isPostingsEnumReuseImplemented() {
+ return false;
+ }
}
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/asserting/TestAssertingPostingsFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/asserting/TestAssertingPostingsFormat.java
index 05879b4..a68d147 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/asserting/TestAssertingPostingsFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/asserting/TestAssertingPostingsFormat.java
@@ -17,15 +17,22 @@ package org.apache.lucene.codecs.asserting;
* limitations under the License.
*/
+import com.carrotsearch.randomizedtesting.annotations.Seed;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.index.BasePostingsFormatTestCase;
/** Test AssertingPostingsFormat directly */
+@Seed("42C6785A63562842")
public class TestAssertingPostingsFormat extends BasePostingsFormatTestCase {
private final Codec codec = new AssertingCodec();
@Override
protected Codec getCodec() {
return codec;
- }
+ }
+
+ @Override
+ protected boolean isPostingsEnumReuseImplemented() {
+ return false;
+ }
}
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java
index 195746c..ed1dc0a 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestBlockPostingsFormat3.java
@@ -33,8 +33,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
@@ -284,93 +283,93 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
public void assertTermsEnum(TermsEnum leftTermsEnum, TermsEnum rightTermsEnum, boolean deep) throws Exception {
BytesRef term;
Bits randomBits = new RandomBits(MAXDOC, random().nextDouble(), random());
- DocsAndPositionsEnum leftPositions = null;
- DocsAndPositionsEnum rightPositions = null;
- DocsEnum leftDocs = null;
- DocsEnum rightDocs = null;
+ PostingsEnum leftPositions = null;
+ PostingsEnum rightPositions = null;
+ PostingsEnum leftDocs = null;
+ PostingsEnum rightDocs = null;
while ((term = leftTermsEnum.next()) != null) {
assertEquals(term, rightTermsEnum.next());
assertTermStats(leftTermsEnum, rightTermsEnum);
if (deep) {
// with payloads + off
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_ALL));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_ALL));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
+ leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_ALL));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
+ leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_ALL));
// with payloads only
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsAndPositionsEnum.FLAG_PAYLOADS),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsAndPositionsEnum.FLAG_PAYLOADS));
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsAndPositionsEnum.FLAG_PAYLOADS),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsAndPositionsEnum.FLAG_PAYLOADS));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_PAYLOADS),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_PAYLOADS));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_PAYLOADS),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_PAYLOADS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsAndPositionsEnum.FLAG_PAYLOADS),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsAndPositionsEnum.FLAG_PAYLOADS));
+ leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_PAYLOADS),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_PAYLOADS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsAndPositionsEnum.FLAG_PAYLOADS),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsAndPositionsEnum.FLAG_PAYLOADS));
+ leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_PAYLOADS),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_PAYLOADS));
// with offsets only
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsAndPositionsEnum.FLAG_OFFSETS),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsAndPositionsEnum.FLAG_OFFSETS));
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsAndPositionsEnum.FLAG_OFFSETS),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsAndPositionsEnum.FLAG_OFFSETS));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_OFFSETS),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_OFFSETS));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_OFFSETS),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_OFFSETS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsAndPositionsEnum.FLAG_OFFSETS),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsAndPositionsEnum.FLAG_OFFSETS));
+ leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_OFFSETS),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_OFFSETS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsAndPositionsEnum.FLAG_OFFSETS),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsAndPositionsEnum.FLAG_OFFSETS));
+ leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_OFFSETS),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_OFFSETS));
// with positions only
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsEnum.FLAG_NONE),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsEnum.FLAG_NONE));
- assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsEnum.FLAG_NONE),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsEnum.FLAG_NONE));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_POSITIONS),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_POSITIONS));
+ assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_POSITIONS),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_POSITIONS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions, DocsEnum.FLAG_NONE),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions, DocsEnum.FLAG_NONE));
+ leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_POSITIONS),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_POSITIONS));
assertPositionsSkipping(leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions, DocsEnum.FLAG_NONE),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions, DocsEnum.FLAG_NONE));
+ leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_POSITIONS),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_POSITIONS));
// with freqs:
- assertDocsEnum(leftDocs = leftTermsEnum.docs(null, leftDocs),
- rightDocs = rightTermsEnum.docs(null, rightDocs));
- assertDocsEnum(leftDocs = leftTermsEnum.docs(randomBits, leftDocs),
- rightDocs = rightTermsEnum.docs(randomBits, rightDocs));
+ assertDocsEnum(leftDocs = leftTermsEnum.postings(null, leftDocs),
+ rightDocs = rightTermsEnum.postings(null, rightDocs));
+ assertDocsEnum(leftDocs = leftTermsEnum.postings(randomBits, leftDocs),
+ rightDocs = rightTermsEnum.postings(randomBits, rightDocs));
// w/o freqs:
- assertDocsEnum(leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
- rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE));
- assertDocsEnum(leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
- rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE));
+ assertDocsEnum(leftDocs = leftTermsEnum.postings(null, leftDocs, PostingsEnum.FLAG_NONE),
+ rightDocs = rightTermsEnum.postings(null, rightDocs, PostingsEnum.FLAG_NONE));
+ assertDocsEnum(leftDocs = leftTermsEnum.postings(randomBits, leftDocs, PostingsEnum.FLAG_NONE),
+ rightDocs = rightTermsEnum.postings(randomBits, rightDocs, PostingsEnum.FLAG_NONE));
// with freqs:
assertDocsSkipping(leftTermsEnum.docFreq(),
- leftDocs = leftTermsEnum.docs(null, leftDocs),
- rightDocs = rightTermsEnum.docs(null, rightDocs));
+ leftDocs = leftTermsEnum.postings(null, leftDocs),
+ rightDocs = rightTermsEnum.postings(null, rightDocs));
assertDocsSkipping(leftTermsEnum.docFreq(),
- leftDocs = leftTermsEnum.docs(randomBits, leftDocs),
- rightDocs = rightTermsEnum.docs(randomBits, rightDocs));
+ leftDocs = leftTermsEnum.postings(randomBits, leftDocs),
+ rightDocs = rightTermsEnum.postings(randomBits, rightDocs));
// w/o freqs:
assertDocsSkipping(leftTermsEnum.docFreq(),
- leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
- rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE));
+ leftDocs = leftTermsEnum.postings(null, leftDocs, PostingsEnum.FLAG_NONE),
+ rightDocs = rightTermsEnum.postings(null, rightDocs, PostingsEnum.FLAG_NONE));
assertDocsSkipping(leftTermsEnum.docFreq(),
- leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
- rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE));
+ leftDocs = leftTermsEnum.postings(randomBits, leftDocs, PostingsEnum.FLAG_NONE),
+ rightDocs = rightTermsEnum.postings(randomBits, rightDocs, PostingsEnum.FLAG_NONE));
}
}
assertNull(rightTermsEnum.next());
@@ -389,7 +388,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
/**
* checks docs + freqs + positions + payloads, sequentially
*/
- public void assertDocsAndPositionsEnum(DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws Exception {
+ public void assertDocsAndPositionsEnum(PostingsEnum leftDocs, PostingsEnum rightDocs) throws Exception {
if (leftDocs == null || rightDocs == null) {
assertNull(leftDocs);
assertNull(rightDocs);
@@ -413,7 +412,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
/**
* checks docs + freqs, sequentially
*/
- public void assertDocsEnum(DocsEnum leftDocs, DocsEnum rightDocs) throws Exception {
+ public void assertDocsEnum(PostingsEnum leftDocs, PostingsEnum rightDocs) throws Exception {
if (leftDocs == null) {
assertNull(rightDocs);
return;
@@ -431,7 +430,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
/**
* checks advancing docs
*/
- public void assertDocsSkipping(int docFreq, DocsEnum leftDocs, DocsEnum rightDocs) throws Exception {
+ public void assertDocsSkipping(int docFreq, PostingsEnum leftDocs, PostingsEnum rightDocs) throws Exception {
if (leftDocs == null) {
assertNull(rightDocs);
return;
@@ -462,7 +461,7 @@ public class TestBlockPostingsFormat3 extends LuceneTestCase {
/**
* checks advancing docs + positions
*/
- public void assertPositionsSkipping(int docFreq, DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws Exception {
+ public void assertPositionsSkipping(int docFreq, PostingsEnum leftDocs, PostingsEnum rightDocs) throws Exception {
if (leftDocs == null || rightDocs == null) {
assertNull(leftDocs);
assertNull(rightDocs);
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java
index e9b5620..87d88cf 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/lucene50/TestLucene50DocValuesFormat.java
@@ -30,11 +30,11 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.BaseCompressingDocValuesFormatTestCase;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SerialMergeScheduler;
import org.apache.lucene.index.Term;
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
index 3b19087..5be8c78 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
@@ -16,6 +16,7 @@ package org.apache.lucene.codecs.perfield;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
import java.io.IOException;
import org.apache.lucene.analysis.MockAnalyzer;
@@ -265,7 +266,7 @@ public class TestPerFieldPostingsFormat2 extends LuceneTestCase {
}
dir.close();
}
-
+
public void testSameCodecDifferentInstance() throws Exception {
Codec codec = new AssertingCodec() {
@Override
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
index 43caaae..9d4313c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java
@@ -538,9 +538,9 @@ public class TestAddIndexes extends LuceneTestCase {
private void verifyTermDocs(Directory dir, Term term, int numDocs)
throws IOException {
IndexReader reader = DirectoryReader.open(dir);
- DocsEnum docsEnum = TestUtil.docs(random(), reader, term.field, term.bytes, null, null, DocsEnum.FLAG_NONE);
+ PostingsEnum postingsEnum = TestUtil.docs(random(), reader, term.field, term.bytes, null, null, PostingsEnum.FLAG_NONE);
int count = 0;
- while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS)
+ while (postingsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS)
count++;
assertEquals(numDocs, count);
reader.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
index 5ae4d89..e723ba1 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
@@ -234,7 +234,7 @@ public class TestCodecs extends LuceneTestCase {
final TermsEnum termsEnum = terms2.iterator(null);
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
for(int i=0;i= maxDoc) {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, advancedTo);
} else {
assertTrue("advanced to: " +advancedTo + " but should be <= " + next, next >= advancedTo);
}
} else {
- docsEnum.nextDoc();
+ postingsEnum.nextDoc();
}
}
}
- assertEquals("docBase: " + context.docBase + " maxDoc: " + maxDoc + " " + docsEnum.getClass(), DocIdSetIterator.NO_MORE_DOCS, docsEnum.docID());
+ assertEquals("docBase: " + context.docBase + " maxDoc: " + maxDoc + " " + postingsEnum.getClass(), DocIdSetIterator.NO_MORE_DOCS, postingsEnum.docID());
}
}
@@ -303,7 +303,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
IndexReaderContext topReaderContext = reader.getContext();
for (LeafReaderContext leafReaderContext : topReaderContext.leaves()) {
- DocsAndPositionsEnum docsAndPosEnum = getDocsAndPositions(
+ PostingsEnum docsAndPosEnum = getDocsAndPositions(
leafReaderContext.reader(), bytes, null);
assertNotNull(docsAndPosEnum);
@@ -336,7 +336,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
writer.addDocument(doc);
DirectoryReader reader = writer.getReader();
LeafReader r = getOnlySegmentReader(reader);
- DocsEnum disi = TestUtil.docs(random(), r, "foo", new BytesRef("bar"), null, null, DocsEnum.FLAG_NONE);
+ PostingsEnum disi = TestUtil.docs(random(), r, "foo", new BytesRef("bar"), null, null, PostingsEnum.FLAG_NONE);
int docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -344,7 +344,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
// now reuse and check again
TermsEnum te = r.terms("foo").iterator(null);
assertTrue(te.seekExact(new BytesRef("bar")));
- disi = TestUtil.docs(random(), te, null, disi, DocsEnum.FLAG_NONE);
+ disi = TestUtil.docs(random(), te, null, disi, PostingsEnum.FLAG_NONE);
docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -361,7 +361,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
writer.addDocument(doc);
DirectoryReader reader = writer.getReader();
LeafReader r = getOnlySegmentReader(reader);
- DocsAndPositionsEnum disi = r.termPositionsEnum(new Term("foo", "bar"));
+ PostingsEnum disi = r.termDocsEnum(new Term("foo", "bar"), PostingsEnum.FLAG_ALL);
int docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -369,7 +369,7 @@ public class TestDocsAndPositions extends LuceneTestCase {
// now reuse and check again
TermsEnum te = r.terms("foo").iterator(null);
assertTrue(te.seekExact(new BytesRef("bar")));
- disi = te.docsAndPositions(null, disi);
+ disi = te.postings(null, disi, PostingsEnum.FLAG_ALL);
docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
index 37dc798..8f55775 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java
@@ -125,7 +125,7 @@ public class TestDocumentWriter extends LuceneTestCase {
writer.close();
SegmentReader reader = new SegmentReader(info, newIOContext(random()));
- DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader),
+ PostingsEnum termPositions = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader),
"repeated", new BytesRef("repeated"));
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
int freq = termPositions.freq();
@@ -197,7 +197,7 @@ public class TestDocumentWriter extends LuceneTestCase {
writer.close();
SegmentReader reader = new SegmentReader(info, newIOContext(random()));
- DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, reader.getLiveDocs(), "f1", new BytesRef("a"));
+ PostingsEnum termPositions = MultiFields.getTermPositionsEnum(reader, reader.getLiveDocs(), "f1", new BytesRef("a"));
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
int freq = termPositions.freq();
assertEquals(3, freq);
@@ -239,18 +239,18 @@ public class TestDocumentWriter extends LuceneTestCase {
writer.close();
SegmentReader reader = new SegmentReader(info, newIOContext(random()));
- DocsAndPositionsEnum termPositions = reader.termPositionsEnum(new Term("preanalyzed", "term1"));
+ PostingsEnum termPositions = reader.termDocsEnum(new Term("preanalyzed", "term1"), PostingsEnum.FLAG_ALL);
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(1, termPositions.freq());
assertEquals(0, termPositions.nextPosition());
- termPositions = reader.termPositionsEnum(new Term("preanalyzed", "term2"));
+ termPositions = reader.termDocsEnum(new Term("preanalyzed", "term2"), PostingsEnum.FLAG_ALL);
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(2, termPositions.freq());
assertEquals(1, termPositions.nextPosition());
assertEquals(3, termPositions.nextPosition());
- termPositions = reader.termPositionsEnum(new Term("preanalyzed", "term3"));
+ termPositions = reader.termDocsEnum(new Term("preanalyzed", "term3"), PostingsEnum.FLAG_ALL);
assertTrue(termPositions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(1, termPositions.freq());
assertEquals(2, termPositions.nextPosition());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java
index 521dd90..5535b11 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java
@@ -17,9 +17,6 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import java.io.IOException;
-import java.util.Random;
-
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.document.Document;
@@ -33,6 +30,9 @@ import org.apache.lucene.util.LineFileDocs;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
+import java.io.IOException;
+import java.util.Random;
+
/**
* Compares one codec against another
*/
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java b/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java
index 1dc07e5..d137d9b 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestFilterLeafReader.java
@@ -76,14 +76,14 @@ public class TestFilterLeafReader extends LuceneTestCase {
}
@Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- return new TestPositions(super.docsAndPositions(liveDocs, reuse == null ? null : ((FilterDocsAndPositionsEnum) reuse).in, flags));
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
+ return new TestPositions(super.postings(liveDocs, reuse == null ? null : ((FilterDocsEnum) reuse).in, flags));
}
}
/** Filter that only returns odd numbered documents. */
- private static class TestPositions extends FilterDocsAndPositionsEnum {
- public TestPositions(DocsAndPositionsEnum in) {
+ private static class TestPositions extends FilterDocsEnum {
+ public TestPositions(PostingsEnum in) {
super(in);
}
@@ -151,7 +151,7 @@ public class TestFilterLeafReader extends LuceneTestCase {
assertEquals(TermsEnum.SeekStatus.FOUND, terms.seekCeil(new BytesRef("one")));
- DocsAndPositionsEnum positions = terms.docsAndPositions(MultiFields.getLiveDocs(reader), null);
+ PostingsEnum positions = terms.postings(MultiFields.getLiveDocs(reader), null, PostingsEnum.FLAG_ALL);
while (positions.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
assertTrue((positions.docID() % 2) == 1);
}
@@ -189,7 +189,6 @@ public class TestFilterLeafReader extends LuceneTestCase {
checkOverrideMethods(FilterLeafReader.FilterTerms.class);
checkOverrideMethods(FilterLeafReader.FilterTermsEnum.class);
checkOverrideMethods(FilterLeafReader.FilterDocsEnum.class);
- checkOverrideMethods(FilterLeafReader.FilterDocsAndPositionsEnum.class);
}
public void testUnwrap() throws IOException {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
index 1ba6355..7a222c4 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -502,12 +502,12 @@ public class TestIndexWriter extends LuceneTestCase {
assertEquals(1, reader.numDocs());
Term t = new Term("field", "a");
assertEquals(1, reader.docFreq(t));
- DocsEnum td = TestUtil.docs(random(), reader,
+ PostingsEnum td = TestUtil.docs(random(), reader,
"field",
new BytesRef("a"),
MultiFields.getLiveDocs(reader),
null,
- DocsEnum.FLAG_FREQS);
+ PostingsEnum.FLAG_FREQS);
td.nextDoc();
assertEquals(128*1024, td.freq());
reader.close();
@@ -833,14 +833,14 @@ public class TestIndexWriter extends LuceneTestCase {
Terms tpv = r.getTermVectors(0).terms("field");
TermsEnum termsEnum = tpv.iterator(null);
assertNotNull(termsEnum.next());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertNotNull(dpEnum);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(1, dpEnum.freq());
assertEquals(100, dpEnum.nextPosition());
assertNotNull(termsEnum.next());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertNotNull(dpEnum);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(1, dpEnum.freq());
@@ -1239,12 +1239,12 @@ public class TestIndexWriter extends LuceneTestCase {
// test that the terms were indexed.
- assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc1field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
- assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc2field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
- assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc3field1"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
- assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc1field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
- assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc2field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
- assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc3field2"), null, null, DocsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc1field1"), null, null, PostingsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc2field1"), null, null, PostingsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc3field1"), null, null, PostingsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc1field2"), null, null, PostingsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc2field2"), null, null, PostingsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc3field2"), null, null, PostingsEnum.FLAG_NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
ir.close();
dir.close();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
index eb1359b..09f0b3e 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java
@@ -521,7 +521,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
// Make sure the doc that hit the exception was marked
// as deleted:
- DocsEnum tdocs = TestUtil.docs(random(), reader,
+ PostingsEnum tdocs = TestUtil.docs(random(), reader,
t.field(),
new BytesRef(t.text()),
MultiFields.getLiveDocs(reader),
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java
index 42e75dc..3ea6a42 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java
@@ -53,7 +53,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
public static int count(Term t, IndexReader r) throws IOException {
int count = 0;
- DocsEnum td = TestUtil.docs(random(), r,
+ PostingsEnum td = TestUtil.docs(random(), r,
t.field(), new BytesRef(t.text()),
MultiFields.getLiveDocs(r),
null,
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
index 8a6c6ce..09f7830 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java
@@ -244,7 +244,7 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
// Quick test to make sure index is not corrupt:
IndexReader reader = DirectoryReader.open(dir);
- DocsEnum tdocs = TestUtil.docs(random(), reader,
+ PostingsEnum tdocs = TestUtil.docs(random(), reader,
"field",
new BytesRef("aaa"),
MultiFields.getLiveDocs(reader),
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
index 85c95dc..4a8e0a3 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexableField.java
@@ -332,14 +332,14 @@ public class TestIndexableField extends LuceneTestCase {
TermsEnum termsEnum = tfv.iterator(null);
assertEquals(new BytesRef(""+counter), termsEnum.next());
assertEquals(1, termsEnum.totalTermFreq());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(1, dpEnum.freq());
assertEquals(1, dpEnum.nextPosition());
assertEquals(new BytesRef("text"), termsEnum.next());
assertEquals(1, termsEnum.totalTermFreq());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(1, dpEnum.freq());
assertEquals(0, dpEnum.nextPosition());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java b/lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
index 9e4ae9c..4341776 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestLazyProxSkipping.java
@@ -154,7 +154,7 @@ public class TestLazyProxSkipping extends LuceneTestCase {
writer.close();
IndexReader reader = DirectoryReader.open(directory);
- DocsAndPositionsEnum tp = MultiFields.getTermPositionsEnum(reader,
+ PostingsEnum tp = MultiFields.getTermPositionsEnum(reader,
MultiFields.getLiveDocs(reader),
this.field,
new BytesRef("b"));
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java b/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
index 0212f66..7314396 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestLongPostings.java
@@ -167,7 +167,7 @@ public class TestLongPostings extends LuceneTestCase {
System.out.println("\nTEST: iter=" + iter + " doS1=" + doS1);
}
- final DocsAndPositionsEnum postings = MultiFields.getTermPositionsEnum(r, null, "field", new BytesRef(term));
+ final PostingsEnum postings = MultiFields.getTermPositionsEnum(r, null, "field", new BytesRef(term));
int docID = -1;
while(docID < DocIdSetIterator.NO_MORE_DOCS) {
@@ -370,14 +370,14 @@ public class TestLongPostings extends LuceneTestCase {
System.out.println("\nTEST: iter=" + iter + " doS1=" + doS1 + " term=" + term);
}
- final DocsEnum docs;
- final DocsEnum postings;
+ final PostingsEnum docs;
+ final PostingsEnum postings;
if (options == IndexOptions.DOCS) {
- docs = TestUtil.docs(random(), r, "field", new BytesRef(term), null, null, DocsEnum.FLAG_NONE);
+ docs = TestUtil.docs(random(), r, "field", new BytesRef(term), null, null, PostingsEnum.FLAG_NONE);
postings = null;
} else {
- docs = postings = TestUtil.docs(random(), r, "field", new BytesRef(term), null, null, DocsEnum.FLAG_FREQS);
+ docs = postings = TestUtil.docs(random(), r, "field", new BytesRef(term), null, null, PostingsEnum.FLAG_FREQS);
assert postings != null;
}
assert docs != null;
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
index a0b6170..72432b4 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java
@@ -17,12 +17,24 @@ package org.apache.lucene.index;
* limitations under the License.
*/
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.store.*;
-import org.apache.lucene.util.*;
-import org.apache.lucene.document.*;
-import org.apache.lucene.analysis.*;
-import java.util.*;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
+import org.apache.lucene.util.UnicodeUtil;
public class TestMultiFields extends LuceneTestCase {
@@ -123,15 +135,15 @@ public class TestMultiFields extends LuceneTestCase {
System.out.println("TEST: seek term="+ UnicodeUtil.toHexString(term.utf8ToString()) + " " + term);
}
- DocsEnum docsEnum = TestUtil.docs(random(), reader, "field", term, liveDocs, null, DocsEnum.FLAG_NONE);
- assertNotNull(docsEnum);
+ PostingsEnum postingsEnum = TestUtil.docs(random(), reader, "field", term, liveDocs, null, PostingsEnum.FLAG_NONE);
+ assertNotNull(postingsEnum);
for(int docID : docs.get(term)) {
if (!deleted.contains(docID)) {
- assertEquals(docID, docsEnum.nextDoc());
+ assertEquals(docID, postingsEnum.nextDoc());
}
}
- assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc());
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, postingsEnum.nextDoc());
}
reader.close();
@@ -164,8 +176,8 @@ public class TestMultiFields extends LuceneTestCase {
w.addDocument(d);
IndexReader r = w.getReader();
w.close();
- DocsEnum d1 = TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, DocsEnum.FLAG_NONE);
- DocsEnum d2 = TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, DocsEnum.FLAG_NONE);
+ PostingsEnum d1 = TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, PostingsEnum.FLAG_NONE);
+ PostingsEnum d2 = TestUtil.docs(random(), r, "f", new BytesRef("j"), null, null, PostingsEnum.FLAG_NONE);
assertEquals(0, d1.nextDoc());
assertEquals(0, d2.nextDoc());
r.close();
@@ -182,7 +194,7 @@ public class TestMultiFields extends LuceneTestCase {
w.addDocument(d);
IndexReader r = w.getReader();
w.close();
- DocsEnum de = MultiFields.getTermDocsEnum(r, null, "f", new BytesRef("j"));
+ PostingsEnum de = MultiFields.getTermDocsEnum(r, null, "f", new BytesRef("j"));
assertEquals(0, de.nextDoc());
assertEquals(1, de.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, de.nextDoc());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java b/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
index d1a9cd8..b947280 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
@@ -84,7 +84,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase {
for (int i = 0; i < 2; i++) {
counter = 0;
- DocsAndPositionsEnum tp = reader.termPositionsEnum(term);
+ PostingsEnum tp = reader.termDocsEnum(term, PostingsEnum.FLAG_ALL);
checkSkipTo(tp, 14, 185); // no skips
checkSkipTo(tp, 17, 190); // one skip on level 0
checkSkipTo(tp, 287, 200); // one skip on level 1, two on level 0
@@ -95,7 +95,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase {
}
}
- public void checkSkipTo(DocsAndPositionsEnum tp, int target, int maxCounter) throws IOException {
+ public void checkSkipTo(PostingsEnum tp, int target, int maxCounter) throws IOException {
tp.advance(target);
if (maxCounter < counter) {
fail("Too many bytes read: " + counter + " vs " + maxCounter);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java b/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java
index fb58903..6f7d39b 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java
@@ -53,7 +53,7 @@ public class TestOmitPositions extends LuceneTestCase {
assertNull(MultiFields.getTermPositionsEnum(reader, null, "foo", new BytesRef("test")));
- DocsEnum de = TestUtil.docs(random(), reader, "foo", new BytesRef("test"), null, null, DocsEnum.FLAG_FREQS);
+ PostingsEnum de = TestUtil.docs(random(), reader, "foo", new BytesRef("test"), null, null, PostingsEnum.FLAG_FREQS);
while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
assertEquals(2, de.freq());
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java b/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java
index 7fd9182..b1cdedc 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java
@@ -80,7 +80,7 @@ public class TestParallelTermEnum extends LuceneTestCase {
BytesRef b = te.next();
assertNotNull(b);
assertEquals(t, b.utf8ToString());
- DocsEnum td = TestUtil.docs(random(), te, liveDocs, null, DocsEnum.FLAG_NONE);
+ PostingsEnum td = TestUtil.docs(random(), te, liveDocs, null, PostingsEnum.FLAG_NONE);
assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(0, td.docID());
assertEquals(td.nextDoc(), DocIdSetIterator.NO_MORE_DOCS);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
index a9e068c..5eedc72 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPayloads.java
@@ -26,17 +26,24 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import org.apache.lucene.analysis.*;
-import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.CannedTokenStream;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.Token;
+import org.apache.lucene.analysis.TokenFilter;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
@@ -183,7 +190,7 @@ public class TestPayloads extends LuceneTestCase {
byte[] verifyPayloadData = new byte[payloadDataLength];
offset = 0;
- DocsAndPositionsEnum[] tps = new DocsAndPositionsEnum[numTerms];
+ PostingsEnum[] tps = new PostingsEnum[numTerms];
for (int i = 0; i < numTerms; i++) {
tps[i] = MultiFields.getTermPositionsEnum(reader,
MultiFields.getLiveDocs(reader),
@@ -214,7 +221,7 @@ public class TestPayloads extends LuceneTestCase {
/*
* test lazy skipping
*/
- DocsAndPositionsEnum tp = MultiFields.getTermPositionsEnum(reader,
+ PostingsEnum tp = MultiFields.getTermPositionsEnum(reader,
MultiFields.getLiveDocs(reader),
terms[0].field(),
new BytesRef(terms[0].text()));
@@ -481,10 +488,10 @@ public class TestPayloads extends LuceneTestCase {
IndexReader reader = DirectoryReader.open(dir);
TermsEnum terms = MultiFields.getFields(reader).terms(field).iterator(null);
Bits liveDocs = MultiFields.getLiveDocs(reader);
- DocsAndPositionsEnum tp = null;
+ PostingsEnum tp = null;
while (terms.next() != null) {
String termText = terms.term().utf8ToString();
- tp = terms.docsAndPositions(liveDocs, tp);
+ tp = terms.postings(liveDocs, tp, PostingsEnum.FLAG_POSITIONS);
while(tp.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
int freq = tp.freq();
for (int i = 0; i < freq; i++) {
@@ -604,7 +611,7 @@ public class TestPayloads extends LuceneTestCase {
writer.addDocument(doc);
DirectoryReader reader = writer.getReader();
LeafReader sr = SlowCompositeReaderWrapper.wrap(reader);
- DocsAndPositionsEnum de = sr.termPositionsEnum(new Term("field", "withPayload"));
+ PostingsEnum de = sr.termDocsEnum(new Term("field", "withPayload"), PostingsEnum.FLAG_POSITIONS);
de.nextDoc();
de.nextPosition();
assertEquals(new BytesRef("test"), de.getPayload());
@@ -638,7 +645,7 @@ public class TestPayloads extends LuceneTestCase {
writer.addDocument(doc);
DirectoryReader reader = writer.getReader();
SegmentReader sr = getOnlySegmentReader(reader);
- DocsAndPositionsEnum de = sr.termPositionsEnum(new Term("field", "withPayload"));
+ PostingsEnum de = sr.termDocsEnum(new Term("field", "withPayload"), PostingsEnum.FLAG_POSITIONS);
de.nextDoc();
de.nextPosition();
assertEquals(new BytesRef("test"), de.getPayload());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java b/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java
index 3fa9a81..08a2d41 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPayloadsOnVectors.java
@@ -72,7 +72,7 @@ public class TestPayloadsOnVectors extends LuceneTestCase {
assert terms != null;
TermsEnum termsEnum = terms.iterator(null);
assertTrue(termsEnum.seekExact(new BytesRef("withPayload")));
- DocsAndPositionsEnum de = termsEnum.docsAndPositions(null, null);
+ PostingsEnum de = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(0, de.nextDoc());
assertEquals(0, de.nextPosition());
assertEquals(new BytesRef("test"), de.getPayload());
@@ -114,7 +114,7 @@ public class TestPayloadsOnVectors extends LuceneTestCase {
assert terms != null;
TermsEnum termsEnum = terms.iterator(null);
assertTrue(termsEnum.seekExact(new BytesRef("withPayload")));
- DocsAndPositionsEnum de = termsEnum.docsAndPositions(null, null);
+ PostingsEnum de = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(0, de.nextDoc());
assertEquals(3, de.nextPosition());
assertEquals(new BytesRef("test"), de.getPayload());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
index 6c5c2d3..135ec6c 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java
@@ -226,16 +226,16 @@ public class TestPerSegmentDeletes extends LuceneTestCase {
Terms cterms = fields.terms(term.field);
TermsEnum ctermsEnum = cterms.iterator(null);
if (ctermsEnum.seekExact(new BytesRef(term.text()))) {
- DocsEnum docsEnum = TestUtil.docs(random(), ctermsEnum, bits, null, DocsEnum.FLAG_NONE);
- return toArray(docsEnum);
+ PostingsEnum postingsEnum = TestUtil.docs(random(), ctermsEnum, bits, null, PostingsEnum.FLAG_NONE);
+ return toArray(postingsEnum);
}
return null;
}
- public static int[] toArray(DocsEnum docsEnum) throws IOException {
+ public static int[] toArray(PostingsEnum postingsEnum) throws IOException {
List docs = new ArrayList<>();
- while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
- int docID = docsEnum.docID();
+ while (postingsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ int docID = postingsEnum.docID();
docs.add(docID);
}
return ArrayUtil.toIntArray(docs);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
index f7eac40..1728b1a 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java
@@ -82,7 +82,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
IndexReader r = w.getReader();
w.close();
- DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(r, null, "content", new BytesRef("a"));
+ PostingsEnum dp = MultiFields.getTermPositionsEnum(r, null, "content", new BytesRef("a"));
assertNotNull(dp);
assertEquals(0, dp.nextDoc());
assertEquals(2, dp.freq());
@@ -154,7 +154,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
String terms[] = { "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "hundred" };
for (String term : terms) {
- DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef(term));
+ PostingsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef(term));
int doc;
while((doc = dp.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
String storedNumbers = reader.document(doc).get("numbers");
@@ -182,7 +182,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
for (int j = 0; j < numSkippingTests; j++) {
int num = TestUtil.nextInt(random(), 100, Math.min(numDocs - 1, 999));
- DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef("hundred"));
+ PostingsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef("hundred"));
int doc = dp.advance(num);
assertEquals(num, doc);
int freq = dp.freq();
@@ -207,7 +207,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
// check that other fields (without offsets) work correctly
for (int i = 0; i < numDocs; i++) {
- DocsEnum dp = MultiFields.getTermDocsEnum(reader, null, "id", new BytesRef("" + i), 0);
+ PostingsEnum dp = MultiFields.getTermDocsEnum(reader, null, "id", new BytesRef("" + i), 0);
assertEquals(i, dp.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dp.nextDoc());
}
@@ -294,14 +294,14 @@ public class TestPostingsOffsets extends LuceneTestCase {
LeafReader sub = ctx.reader();
//System.out.println("\nsub=" + sub);
final TermsEnum termsEnum = sub.fields().terms("content").iterator(null);
- DocsEnum docs = null;
- DocsAndPositionsEnum docsAndPositions = null;
- DocsAndPositionsEnum docsAndPositionsAndOffsets = null;
+ PostingsEnum docs = null;
+ PostingsEnum docsAndPositions = null;
+ PostingsEnum docsAndPositionsAndOffsets = null;
final NumericDocValues docIDToID = DocValues.getNumeric(sub, "id");
for(String term : terms) {
//System.out.println(" term=" + term);
if (termsEnum.seekExact(new BytesRef(term))) {
- docs = termsEnum.docs(null, docs);
+ docs = termsEnum.postings(null, docs);
assertNotNull(docs);
int doc;
//System.out.println(" doc/freq");
@@ -313,7 +313,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
}
// explicitly exclude offsets here
- docsAndPositions = termsEnum.docsAndPositions(null, docsAndPositions, DocsAndPositionsEnum.FLAG_PAYLOADS);
+ docsAndPositions = termsEnum.postings(null, docsAndPositions, PostingsEnum.FLAG_ALL);
assertNotNull(docsAndPositions);
//System.out.println(" doc/freq/pos");
while((doc = docsAndPositions.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
@@ -328,7 +328,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
}
}
- docsAndPositionsAndOffsets = termsEnum.docsAndPositions(null, docsAndPositions);
+ docsAndPositionsAndOffsets = termsEnum.postings(null, docsAndPositions, PostingsEnum.FLAG_ALL);
assertNotNull(docsAndPositionsAndOffsets);
//System.out.println(" doc/freq/pos/offs");
while((doc = docsAndPositionsAndOffsets.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
index d7b577f..0108502 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
@@ -106,7 +106,7 @@ public class TestSegmentMerger extends LuceneTestCase {
assertTrue(newDoc2 != null);
assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
- DocsEnum termDocs = TestUtil.docs(random(), mergedReader,
+ PostingsEnum termDocs = TestUtil.docs(random(), mergedReader,
DocHelper.TEXT_FIELD_2_KEY,
new BytesRef("field"),
MultiFields.getLiveDocs(mergedReader),
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
index 918b915..5abf2c4 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java
@@ -127,7 +127,7 @@ public class TestSegmentReader extends LuceneTestCase {
}
}
- DocsEnum termDocs = TestUtil.docs(random(), reader,
+ PostingsEnum termDocs = TestUtil.docs(random(), reader,
DocHelper.TEXT_FIELD_1_KEY,
new BytesRef("field"),
MultiFields.getLiveDocs(reader),
@@ -145,7 +145,7 @@ public class TestSegmentReader extends LuceneTestCase {
assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
- DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(reader,
+ PostingsEnum positions = MultiFields.getTermPositionsEnum(reader,
MultiFields.getLiveDocs(reader),
DocHelper.TEXT_FIELD_1_KEY,
new BytesRef("field"));
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
index 3f5686e..b67f111 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
@@ -58,7 +58,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
TermsEnum terms = reader.fields().terms(DocHelper.TEXT_FIELD_2_KEY).iterator(null);
terms.seekCeil(new BytesRef("field"));
- DocsEnum termDocs = TestUtil.docs(random(), terms, reader.getLiveDocs(), null, DocsEnum.FLAG_FREQS);
+ PostingsEnum termDocs = TestUtil.docs(random(), terms, reader.getLiveDocs(), null, PostingsEnum.FLAG_FREQS);
if (termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
int docId = termDocs.docID();
assertTrue(docId == 0);
@@ -73,7 +73,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
//After adding the document, we should be able to read it back in
SegmentReader reader = new SegmentReader(info, newIOContext(random()));
assertTrue(reader != null);
- DocsEnum termDocs = TestUtil.docs(random(), reader,
+ PostingsEnum termDocs = TestUtil.docs(random(), reader,
"textField2",
new BytesRef("bad"),
reader.getLiveDocs(),
@@ -87,7 +87,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
//After adding the document, we should be able to read it back in
SegmentReader reader = new SegmentReader(info, newIOContext(random()));
assertTrue(reader != null);
- DocsEnum termDocs = TestUtil.docs(random(), reader,
+ PostingsEnum termDocs = TestUtil.docs(random(), reader,
"junk",
new BytesRef("bad"),
reader.getLiveDocs(),
@@ -121,12 +121,12 @@ public class TestSegmentTermDocs extends LuceneTestCase {
IndexReader reader = DirectoryReader.open(dir);
- DocsEnum tdocs = TestUtil.docs(random(), reader,
+ PostingsEnum tdocs = TestUtil.docs(random(), reader,
ta.field(),
new BytesRef(ta.text()),
MultiFields.getLiveDocs(reader),
null,
- DocsEnum.FLAG_FREQS);
+ PostingsEnum.FLAG_FREQS);
// without optimization (assumption skipInterval == 16)
@@ -169,7 +169,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
new BytesRef(tb.text()),
MultiFields.getLiveDocs(reader),
null,
- DocsEnum.FLAG_FREQS);
+ PostingsEnum.FLAG_FREQS);
assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(10, tdocs.docID());
@@ -193,7 +193,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
new BytesRef(tb.text()),
MultiFields.getLiveDocs(reader),
null,
- DocsEnum.FLAG_FREQS);
+ PostingsEnum.FLAG_FREQS);
assertTrue(tdocs.advance(5) != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(10, tdocs.docID());
@@ -213,7 +213,7 @@ public class TestSegmentTermDocs extends LuceneTestCase {
new BytesRef(tc.text()),
MultiFields.getLiveDocs(reader),
null,
- DocsEnum.FLAG_FREQS);
+ PostingsEnum.FLAG_FREQS);
assertTrue(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(26, tdocs.docID());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java b/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java
index bd35b50..a28e3d0 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java
@@ -76,17 +76,17 @@ public class TestStressAdvance extends LuceneTestCase {
}
final TermsEnum te = getOnlySegmentReader(r).fields().terms("field").iterator(null);
- DocsEnum de = null;
+ PostingsEnum de = null;
for(int iter2=0;iter2<10;iter2++) {
if (VERBOSE) {
System.out.println("\nTEST: iter=" + iter + " iter2=" + iter2);
}
assertEquals(TermsEnum.SeekStatus.FOUND, te.seekCeil(new BytesRef("a")));
- de = TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE);
+ de = TestUtil.docs(random(), te, null, de, PostingsEnum.FLAG_NONE);
testOne(de, aDocIDs);
assertEquals(TermsEnum.SeekStatus.FOUND, te.seekCeil(new BytesRef("b")));
- de = TestUtil.docs(random(), te, null, de, DocsEnum.FLAG_NONE);
+ de = TestUtil.docs(random(), te, null, de, PostingsEnum.FLAG_NONE);
testOne(de, bDocIDs);
}
@@ -96,7 +96,7 @@ public class TestStressAdvance extends LuceneTestCase {
}
}
- private void testOne(DocsEnum docs, List expected) throws Exception {
+ private void testOne(PostingsEnum docs, List expected) throws Exception {
if (VERBOSE) {
System.out.println("test");
}
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
index 926d8db..13a4577 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestStressIndexing2.java
@@ -325,9 +325,9 @@ public class TestStressIndexing2 extends LuceneTestCase {
// make sure r1 is in fact empty (eg has only all
// deleted docs):
Bits liveDocs = MultiFields.getLiveDocs(r1);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while(termsEnum.next() != null) {
- docs = TestUtil.docs(random(), termsEnum, liveDocs, docs, DocsEnum.FLAG_NONE);
+ docs = TestUtil.docs(random(), termsEnum, liveDocs, docs, PostingsEnum.FLAG_NONE);
while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
fail("r1 is not empty but r2 is");
}
@@ -336,8 +336,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
}
TermsEnum termsEnum2 = terms2.iterator(null);
- DocsEnum termDocs1 = null;
- DocsEnum termDocs2 = null;
+ PostingsEnum termDocs1 = null;
+ PostingsEnum termDocs2 = null;
while(true) {
BytesRef term = termsEnum.next();
@@ -346,9 +346,9 @@ public class TestStressIndexing2 extends LuceneTestCase {
break;
}
- termDocs1 = TestUtil.docs(random(), termsEnum, liveDocs1, termDocs1, DocsEnum.FLAG_NONE);
+ termDocs1 = TestUtil.docs(random(), termsEnum, liveDocs1, termDocs1, PostingsEnum.FLAG_NONE);
if (termsEnum2.seekExact(term)) {
- termDocs2 = TestUtil.docs(random(), termsEnum2, liveDocs2, termDocs2, DocsEnum.FLAG_NONE);
+ termDocs2 = TestUtil.docs(random(), termsEnum2, liveDocs2, termDocs2, PostingsEnum.FLAG_NONE);
} else {
termDocs2 = null;
}
@@ -386,8 +386,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
Fields tv1 = r1.getTermVectors(id1);
System.out.println(" d1=" + tv1);
if (tv1 != null) {
- DocsAndPositionsEnum dpEnum = null;
- DocsEnum dEnum = null;
+ PostingsEnum dpEnum = null;
+ PostingsEnum dEnum = null;
for (String field : tv1) {
System.out.println(" " + field + ":");
Terms terms3 = tv1.terms(field);
@@ -396,7 +396,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
BytesRef term2;
while((term2 = termsEnum3.next()) != null) {
System.out.println(" " + term2.utf8ToString() + ": freq=" + termsEnum3.totalTermFreq());
- dpEnum = termsEnum3.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum3.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
if (dpEnum != null) {
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
final int freq = dpEnum.freq();
@@ -405,7 +405,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
System.out.println(" pos=" + dpEnum.nextPosition());
}
} else {
- dEnum = TestUtil.docs(random(), termsEnum3, null, dEnum, DocsEnum.FLAG_FREQS);
+ dEnum = TestUtil.docs(random(), termsEnum3, null, dEnum, PostingsEnum.FLAG_FREQS);
assertNotNull(dEnum);
assertTrue(dEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
final int freq = dEnum.freq();
@@ -418,8 +418,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
Fields tv2 = r2.getTermVectors(id2);
System.out.println(" d2=" + tv2);
if (tv2 != null) {
- DocsAndPositionsEnum dpEnum = null;
- DocsEnum dEnum = null;
+ PostingsEnum dpEnum = null;
+ PostingsEnum dEnum = null;
for (String field : tv2) {
System.out.println(" " + field + ":");
Terms terms3 = tv2.terms(field);
@@ -428,7 +428,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
BytesRef term2;
while((term2 = termsEnum3.next()) != null) {
System.out.println(" " + term2.utf8ToString() + ": freq=" + termsEnum3.totalTermFreq());
- dpEnum = termsEnum3.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum3.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
if (dpEnum != null) {
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
final int freq = dpEnum.freq();
@@ -437,7 +437,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
System.out.println(" pos=" + dpEnum.nextPosition());
}
} else {
- dEnum = TestUtil.docs(random(), termsEnum3, null, dEnum, DocsEnum.FLAG_FREQS);
+ dEnum = TestUtil.docs(random(), termsEnum3, null, dEnum, PostingsEnum.FLAG_FREQS);
assertNotNull(dEnum);
assertTrue(dEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
final int freq = dEnum.freq();
@@ -464,7 +464,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
String field1=null, field2=null;
TermsEnum termsEnum1 = null;
termsEnum2 = null;
- DocsEnum docs1=null, docs2=null;
+ PostingsEnum docs1=null, docs2=null;
// pack both doc and freq into single element for easy sorting
long[] info1 = new long[r1.numDocs()];
@@ -496,7 +496,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
}
//System.out.println("TEST: term1=" + term1);
- docs1 = TestUtil.docs(random(), termsEnum1, liveDocs1, docs1, DocsEnum.FLAG_FREQS);
+ docs1 = TestUtil.docs(random(), termsEnum1, liveDocs1, docs1, PostingsEnum.FLAG_FREQS);
while (docs1.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
int d = docs1.docID();
int f = docs1.freq();
@@ -529,7 +529,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
}
//System.out.println("TEST: term1=" + term1);
- docs2 = TestUtil.docs(random(), termsEnum2, liveDocs2, docs2, DocsEnum.FLAG_FREQS);
+ docs2 = TestUtil.docs(random(), termsEnum2, liveDocs2, docs2, PostingsEnum.FLAG_FREQS);
while (docs2.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
int d = r2r1[docs2.docID()];
int f = docs2.freq();
@@ -606,10 +606,10 @@ public class TestStressIndexing2 extends LuceneTestCase {
assertNotNull(terms2);
TermsEnum termsEnum2 = terms2.iterator(null);
- DocsAndPositionsEnum dpEnum1 = null;
- DocsAndPositionsEnum dpEnum2 = null;
- DocsEnum dEnum1 = null;
- DocsEnum dEnum2 = null;
+ PostingsEnum dpEnum1 = null;
+ PostingsEnum dpEnum2 = null;
+ PostingsEnum dEnum1 = null;
+ PostingsEnum dEnum2 = null;
BytesRef term1;
while ((term1 = termsEnum1.next()) != null) {
@@ -618,8 +618,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
assertEquals(termsEnum1.totalTermFreq(),
termsEnum2.totalTermFreq());
- dpEnum1 = termsEnum1.docsAndPositions(null, dpEnum1);
- dpEnum2 = termsEnum2.docsAndPositions(null, dpEnum2);
+ dpEnum1 = termsEnum1.postings(null, dpEnum1, PostingsEnum.FLAG_ALL);
+ dpEnum2 = termsEnum2.postings(null, dpEnum2, PostingsEnum.FLAG_ALL);
if (dpEnum1 != null) {
assertNotNull(dpEnum2);
int docID1 = dpEnum1.nextDoc();
@@ -655,8 +655,8 @@ public class TestStressIndexing2 extends LuceneTestCase {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum1.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum2.nextDoc());
} else {
- dEnum1 = TestUtil.docs(random(), termsEnum1, null, dEnum1, DocsEnum.FLAG_FREQS);
- dEnum2 = TestUtil.docs(random(), termsEnum2, null, dEnum2, DocsEnum.FLAG_FREQS);
+ dEnum1 = TestUtil.docs(random(), termsEnum1, null, dEnum1, PostingsEnum.FLAG_FREQS);
+ dEnum2 = TestUtil.docs(random(), termsEnum2, null, dEnum2, PostingsEnum.FLAG_FREQS);
assertNotNull(dEnum1);
assertNotNull(dEnum2);
int docID1 = dEnum1.nextDoc();
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java
index 356353e..d4bbf44 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectors.java
@@ -17,8 +17,6 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import java.io.IOException;
-
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
@@ -37,6 +35,8 @@ import org.apache.lucene.util.TestUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
+import java.io.IOException;
+
public class TestTermVectors extends LuceneTestCase {
private static IndexReader reader;
private static Directory directory;
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
index a8adfb7..0d43cab 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsReader.java
@@ -220,7 +220,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
assertNotNull(vector);
assertEquals(testTerms.length, vector.size());
TermsEnum termsEnum = vector.iterator(null);
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
for (int i = 0; i < testTerms.length; i++) {
final BytesRef text = termsEnum.next();
assertNotNull(text);
@@ -228,12 +228,12 @@ public class TestTermVectorsReader extends LuceneTestCase {
//System.out.println("Term: " + term);
assertEquals(testTerms[i], term);
- docsEnum = TestUtil.docs(random(), termsEnum, null, docsEnum, DocsEnum.FLAG_NONE);
- assertNotNull(docsEnum);
- int doc = docsEnum.docID();
+ postingsEnum = TestUtil.docs(random(), termsEnum, null, postingsEnum, PostingsEnum.FLAG_NONE);
+ assertNotNull(postingsEnum);
+ int doc = postingsEnum.docID();
assertEquals(-1, doc);
- assertTrue(docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
- assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc());
+ assertTrue(postingsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ assertEquals(DocIdSetIterator.NO_MORE_DOCS, postingsEnum.nextDoc());
}
assertNull(termsEnum.next());
}
@@ -247,7 +247,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
assertNotNull(vector);
assertEquals(testTerms.length, vector.size());
TermsEnum termsEnum = vector.iterator(null);
- DocsAndPositionsEnum dpEnum = null;
+ PostingsEnum dpEnum = null;
for (int i = 0; i < testTerms.length; i++) {
final BytesRef text = termsEnum.next();
assertNotNull(text);
@@ -255,7 +255,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
//System.out.println("Term: " + term);
assertEquals(testTerms[i], term);
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertNotNull(dpEnum);
int doc = dpEnum.docID();
assertEquals(-1, doc);
@@ -266,7 +266,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
}
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
doc = dpEnum.docID();
assertEquals(-1, doc);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -291,8 +291,8 @@ public class TestTermVectorsReader extends LuceneTestCase {
String term = text.utf8ToString();
//System.out.println("Term: " + term);
assertEquals(testTerms[i], term);
- assertNotNull(termsEnum.docs(null, null));
- assertNull(termsEnum.docsAndPositions(null, null)); // no pos
+ assertNotNull(termsEnum.postings(null, null));
+ assertNull(termsEnum.postings(null, null, PostingsEnum.FLAG_ALL)); // no pos
}
reader.close();
}
@@ -304,14 +304,14 @@ public class TestTermVectorsReader extends LuceneTestCase {
TermsEnum termsEnum = vector.iterator(null);
assertNotNull(termsEnum);
assertEquals(testTerms.length, vector.size());
- DocsAndPositionsEnum dpEnum = null;
+ PostingsEnum dpEnum = null;
for (int i = 0; i < testTerms.length; i++) {
final BytesRef text = termsEnum.next();
assertNotNull(text);
String term = text.utf8ToString();
assertEquals(testTerms[i], term);
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertNotNull(dpEnum);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertEquals(dpEnum.freq(), positions[i].length);
@@ -320,7 +320,7 @@ public class TestTermVectorsReader extends LuceneTestCase {
}
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertNotNull(dpEnum);
assertEquals(dpEnum.freq(), positions[i].length);
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
index edd1c9e..1a9ca1f 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
@@ -68,7 +68,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
// Token "" occurred once
assertEquals(1, termsEnum.totalTermFreq());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(8, dpEnum.startOffset());
@@ -77,7 +77,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
// Token "abcd" occurred three times
assertEquals(new BytesRef("abcd"), termsEnum.next());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertEquals(3, termsEnum.totalTermFreq());
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -117,7 +117,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
IndexReader r = DirectoryReader.open(dir);
TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
assertNotNull(termsEnum.next());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(2, termsEnum.totalTermFreq());
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -152,7 +152,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
IndexReader r = DirectoryReader.open(dir);
TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
assertNotNull(termsEnum.next());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(2, termsEnum.totalTermFreq());
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -190,7 +190,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
IndexReader r = DirectoryReader.open(dir);
TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
assertNotNull(termsEnum.next());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(2, termsEnum.totalTermFreq());
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -225,7 +225,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
IndexReader r = DirectoryReader.open(dir);
TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
assertNotNull(termsEnum.next());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(2, termsEnum.totalTermFreq());
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -261,7 +261,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
IndexReader r = DirectoryReader.open(dir);
TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
assertNotNull(termsEnum.next());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
@@ -269,14 +269,14 @@ public class TestTermVectorsWriter extends LuceneTestCase {
assertEquals(4, dpEnum.endOffset());
assertNotNull(termsEnum.next());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(11, dpEnum.startOffset());
assertEquals(17, dpEnum.endOffset());
assertNotNull(termsEnum.next());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(18, dpEnum.startOffset());
@@ -305,7 +305,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
IndexReader r = DirectoryReader.open(dir);
TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
assertNotNull(termsEnum.next());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(1, (int) termsEnum.totalTermFreq());
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -314,7 +314,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
assertEquals(7, dpEnum.endOffset());
assertNotNull(termsEnum.next());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(8, dpEnum.startOffset());
@@ -347,7 +347,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
IndexReader r = DirectoryReader.open(dir);
TermsEnum termsEnum = r.getTermVectors(0).terms("field").iterator(null);
assertNotNull(termsEnum.next());
- DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
+ PostingsEnum dpEnum = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
assertEquals(1, (int) termsEnum.totalTermFreq());
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -356,7 +356,7 @@ public class TestTermVectorsWriter extends LuceneTestCase {
assertEquals(4, dpEnum.endOffset());
assertNotNull(termsEnum.next());
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_ALL);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(6, dpEnum.startOffset());
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java b/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java
index a1d3a77..f2e33bd 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestTermdocPerf.java
@@ -119,11 +119,11 @@ public class TestTermdocPerf extends LuceneTestCase {
start = System.currentTimeMillis();
int ret=0;
- DocsEnum tdocs = null;
+ PostingsEnum tdocs = null;
final Random random = new Random(random().nextLong());
for (int i=0; i hits = new ArrayList<>();
@@ -252,7 +251,7 @@ public class TestBooleanQuery extends LuceneTestCase {
for(int iter2=0;iter2<10;iter2++) {
weight = s.createNormalizedWeight(q);
- scorer = weight.scorer(s.leafContexts.get(0), null);
+ scorer = weight.scorer(s.leafContexts.get(0), PostingsEnum.FLAG_FREQS, null);
if (VERBOSE) {
System.out.println(" iter2=" + iter2);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
index 8b9e0bb..dcdefca 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
@@ -30,15 +30,13 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
-import org.apache.lucene.search.BooleanQuery.BooleanWeight;
import org.apache.lucene.search.Scorer.ChildScorer;
-import org.apache.lucene.search.Weight.DefaultBulkScorer;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.LuceneTestCase;
@@ -279,8 +277,8 @@ public class TestBooleanQueryVisitSubscorers extends LuceneTestCase {
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new BooleanWeight(searcher, false) {
@Override
- public BulkScorer bulkScorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
- Scorer scorer = scorer(context, acceptDocs);
+ public BulkScorer bulkScorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
+ Scorer scorer = scorer(context, flags, acceptDocs);
if (scorer == null) {
return null;
}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
index 58f4f38..436a0dc 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
@@ -92,12 +92,12 @@ public class TestBooleanScorer extends LuceneTestCase {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) {
throw new UnsupportedOperationException();
}
@Override
- public BulkScorer bulkScorer(LeafReaderContext context, Bits acceptDocs) {
+ public BulkScorer bulkScorer(LeafReaderContext context, int flags, Bits acceptDocs) {
return new BulkScorer() {
@Override
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java
index 0b6e02a..2bf3d8e 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java
@@ -19,6 +19,7 @@ package org.apache.lucene.search;
import java.io.IOException;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
public class TestCachingCollector extends LuceneTestCase {
@@ -38,6 +39,36 @@ public class TestCachingCollector extends LuceneTestCase {
public int freq() throws IOException { return 0; }
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int docID() { return 0; }
@Override
@@ -45,7 +76,7 @@ public class TestCachingCollector extends LuceneTestCase {
@Override
public int advance(int target) throws IOException { return 0; }
-
+
@Override
public long cost() {
return 1;
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
index 2cb8f52..17906cd 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java
@@ -17,6 +17,8 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import java.io.IOException;
+
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
@@ -26,8 +28,6 @@ import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
-import java.io.IOException;
-
/** This class only tests some basic functionality in CSQ, the main parts are mostly
* tested by MultiTermQuery tests, explanations seems to be tested in TestExplanations! */
public class TestConstantScoreQuery extends LuceneTestCase {
@@ -56,9 +56,9 @@ public class TestConstantScoreQuery extends LuceneTestCase {
public void setScorer(Scorer scorer) {
this.scorer = scorer;
assertEquals("Scorer is implemented by wrong class", scorerClassName, scorer.getClass().getName());
- if (innerScorerClassName != null && scorer instanceof ConstantScoreQuery.ConstantScorer) {
- final ConstantScoreQuery.ConstantScorer innerScorer = (ConstantScoreQuery.ConstantScorer) scorer;
- assertEquals("inner Scorer is implemented by wrong class", innerScorerClassName, innerScorer.docIdSetIterator.getClass().getName());
+ if (innerScorerClassName != null && scorer instanceof ConstantScoreQuery.ConstantScoreScorer) {
+ final ConstantScoreQuery.ConstantScoreScorer innerScorer = (ConstantScoreQuery.ConstantScoreScorer) scorer;
+ assertEquals("inner Scorer is implemented by wrong class", innerScorerClassName, innerScorer.in.getClass().getName());
}
}
@@ -108,13 +108,13 @@ public class TestConstantScoreQuery extends LuceneTestCase {
final Query csqbq = new ConstantScoreQuery(bq);
csqbq.setBoost(17.0f);
- checkHits(searcher, csq1, csq1.getBoost(), ConstantScoreQuery.ConstantScorer.class.getName(), null);
- checkHits(searcher, csq2, csq2.getBoost(), ConstantScoreQuery.ConstantScorer.class.getName(), ConstantScoreQuery.ConstantScorer.class.getName());
+ checkHits(searcher, csq1, csq1.getBoost(), ConstantScoreQuery.ConstantScoreScorer.class.getName(), null);
+ checkHits(searcher, csq2, csq2.getBoost(), ConstantScoreQuery.ConstantScoreScorer.class.getName(), ConstantScoreQuery.ConstantScoreScorer.class.getName());
// for the combined BQ, the scorer should always be BooleanScorer's BucketScorer, because our scorer supports out-of order collection!
final String bucketScorerClass = FakeScorer.class.getName();
checkHits(searcher, bq, csq1.getBoost() + csq2.getBoost(), bucketScorerClass, null);
- checkHits(searcher, csqbq, csqbq.getBoost(), ConstantScoreQuery.ConstantScorer.class.getName(), bucketScorerClass);
+ checkHits(searcher, csqbq, csqbq.getBoost(), ConstantScoreQuery.ConstantScoreScorer.class.getName(), bucketScorerClass);
} finally {
if (reader != null) reader.close();
if (directory != null) directory.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
index 34923e3..6c533c1 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
@@ -17,21 +17,21 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.FieldInvertState;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.SlowCompositeReaderWrapper;
-import org.apache.lucene.index.FieldInvertState;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SlowCompositeReaderWrapper;
import org.apache.lucene.index.StoredDocument;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.similarities.DefaultSimilarity;
@@ -39,11 +39,12 @@ import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols;
import java.util.Locale;
-import java.io.IOException;
/**
* Test of the DisjunctionMaxQuery.
@@ -180,7 +181,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
assertTrue(s.getTopReaderContext() instanceof LeafReaderContext);
final Weight dw = s.createNormalizedWeight(dq);
LeafReaderContext context = (LeafReaderContext)s.getTopReaderContext();
- final Scorer ds = dw.scorer(context, context.reader().getLiveDocs());
+ final Scorer ds = dw.scorer(context, PostingsEnum.FLAG_FREQS, context.reader().getLiveDocs());
final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS;
if (skipOk) {
fail("firsttime skipTo found a match? ... "
@@ -196,7 +197,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
QueryUtils.check(random(), dq, s);
final Weight dw = s.createNormalizedWeight(dq);
LeafReaderContext context = (LeafReaderContext)s.getTopReaderContext();
- final Scorer ds = dw.scorer(context, context.reader().getLiveDocs());
+ final Scorer ds = dw.scorer(context, PostingsEnum.FLAG_FREQS, context.reader().getLiveDocs());
assertTrue("firsttime skipTo found no match",
ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
assertEquals("found wrong docid", "d4", r.document(ds.docID()).get("id"));
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java b/lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java
index 052bd43..9703c06 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java
@@ -16,6 +16,7 @@ package org.apache.lucene.search;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
import java.io.IOException;
import org.apache.lucene.analysis.MockAnalyzer;
@@ -26,7 +27,6 @@ import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.FilterLeafReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java
index c91ef33..de55435 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java
@@ -27,7 +27,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
@@ -432,13 +432,13 @@ public class TestFilteredQuery extends LuceneTestCase {
Bits acceptDocs) throws IOException {
final boolean nullBitset = random().nextInt(10) == 5;
final LeafReader reader = context.reader();
- DocsEnum termDocsEnum = reader.termDocsEnum(new Term("field", "0"));
- if (termDocsEnum == null) {
+ PostingsEnum termPostingsEnum = reader.termDocsEnum(new Term("field", "0"));
+ if (termPostingsEnum == null) {
return null; // no docs -- return null
}
final BitSet bitSet = new BitSet(reader.maxDoc());
int d;
- while ((d = termDocsEnum.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
+ while ((d = termPostingsEnum.nextDoc()) != PostingsEnum.NO_MORE_DOCS) {
bitSet.set(d, true);
}
return new DocIdSet() {
@@ -526,8 +526,8 @@ public class TestFilteredQuery extends LuceneTestCase {
}
@Override
public DocIdSetIterator iterator() throws IOException {
- final DocsEnum termDocsEnum = context.reader().termDocsEnum(new Term("field", "0"));
- if (termDocsEnum == null) {
+ final PostingsEnum termPostingsEnum = context.reader().termDocsEnum(new Term("field", "0"));
+ if (termPostingsEnum == null) {
return null;
}
return new DocIdSetIterator() {
@@ -537,24 +537,24 @@ public class TestFilteredQuery extends LuceneTestCase {
public int nextDoc() throws IOException {
assertTrue("queryFirst: "+ queryFirst + " advanced: " + advanceCalled + " next: "+ nextCalled, nextCalled || advanceCalled ^ !queryFirst);
nextCalled = true;
- return termDocsEnum.nextDoc();
+ return termPostingsEnum.nextDoc();
}
@Override
public int docID() {
- return termDocsEnum.docID();
+ return termPostingsEnum.docID();
}
@Override
public int advance(int target) throws IOException {
assertTrue("queryFirst: "+ queryFirst + " advanced: " + advanceCalled + " next: "+ nextCalled, advanceCalled || nextCalled ^ queryFirst);
advanceCalled = true;
- return termDocsEnum.advance(target);
+ return termPostingsEnum.advance(target);
}
@Override
public long cost() {
- return termDocsEnum.cost();
+ return termPostingsEnum.cost();
}
};
}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
index 2216ed5..7c479c1 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestMinShouldMatch2.java
@@ -29,8 +29,9 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.document.StringField;
-import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.index.Term;
@@ -124,7 +125,7 @@ public class TestMinShouldMatch2 extends LuceneTestCase {
if (slow) {
return new SlowMinShouldMatchScorer(weight, reader, searcher);
} else {
- return weight.scorer(reader.getContext(), null);
+ return weight.scorer(reader.getContext(), PostingsEnum.FLAG_FREQS, null);
}
}
@@ -315,6 +316,36 @@ public class TestMinShouldMatch2 extends LuceneTestCase {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int docID() {
return currentDoc;
}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
index 26cf76a..3de48e9 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
@@ -17,19 +17,19 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import org.apache.lucene.document.Field;
-import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+import java.util.LinkedList;
+
import org.apache.lucene.document.Document;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.Directory;
-
-import java.io.IOException;
-import java.util.LinkedList;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
/**
* This class tests PhrasePrefixQuery class.
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java b/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
index 68e958e..4ac6bff 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java
@@ -17,17 +17,27 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-import java.io.*;
-import java.util.*;
-
-import org.apache.lucene.analysis.*;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenFilter;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
-import org.apache.lucene.document.*;
-import org.apache.lucene.index.*;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
import org.apache.lucene.search.similarities.DefaultSimilarity;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.*;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.TestUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -35,7 +45,7 @@ import org.junit.BeforeClass;
* Tests {@link PhraseQuery}.
*
* @see TestPositionIncrement
- */
+ */
public class TestPhraseQuery extends LuceneTestCase {
/** threshold for comparing floats */
@@ -184,7 +194,7 @@ public class TestPhraseQuery extends LuceneTestCase {
* slop is the total number of positional moves allowed
* to line up a phrase
*/
- public void testMulipleTerms() throws Exception {
+ public void testMultipleTerms() throws Exception {
query.setSlop(2);
query.add(new Term("field", "one"));
query.add(new Term("field", "three"));
@@ -670,7 +680,7 @@ public class TestPhraseQuery extends LuceneTestCase {
}
}
- assertTrue("phrase '" + sb + "' not found; start=" + start, found);
+ assertTrue("phrase '" + sb + "' not found; start=" + start + ", it=" + i + ", expected doc " + docID, found);
}
reader.close();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java b/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java
index 6086ff6..b5edd9e 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java
@@ -31,7 +31,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.MultiFields;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SlowCompositeReaderWrapper;
@@ -100,7 +100,7 @@ public class TestPositionIncrement extends LuceneTestCase {
IndexSearcher searcher = newSearcher(reader);
- DocsAndPositionsEnum pos = MultiFields.getTermPositionsEnum(searcher.getIndexReader(),
+ PostingsEnum pos = MultiFields.getTermPositionsEnum(searcher.getIndexReader(),
MultiFields.getLiveDocs(searcher.getIndexReader()),
"field",
new BytesRef("1"));
@@ -212,7 +212,7 @@ public class TestPositionIncrement extends LuceneTestCase {
final IndexReader readerFromWriter = writer.getReader();
LeafReader r = SlowCompositeReaderWrapper.wrap(readerFromWriter);
- DocsAndPositionsEnum tp = r.termPositionsEnum(new Term("content", "a"));
+ PostingsEnum tp = r.termDocsEnum(new Term("content", "a"), PostingsEnum.FLAG_ALL);
int count = 0;
assertTrue(tp.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java b/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
index f464ff9..a3fdfa4 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java
@@ -17,12 +17,15 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import java.io.IOException;
+
+import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.document.Document;
public class TestPositiveScoresOnlyCollector extends LuceneTestCase {
@@ -41,6 +44,36 @@ public class TestPositiveScoresOnlyCollector extends LuceneTestCase {
return 1;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
@Override public int docID() { return idx; }
@Override public int nextDoc() {
@@ -51,7 +84,7 @@ public class TestPositiveScoresOnlyCollector extends LuceneTestCase {
idx = target;
return idx < scores.length ? idx : NO_MORE_DOCS;
}
-
+
@Override
public long cost() {
return scores.length;
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java b/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java
index 20e337c..f60c34b 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestQueryRescorer.java
@@ -25,8 +25,8 @@ import java.util.Set;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericDocValuesField;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
@@ -36,6 +36,7 @@ import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
@@ -443,7 +444,7 @@ public class TestQueryRescorer extends LuceneTestCase {
}
@Override
- public Scorer scorer(final LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(final LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
return new Scorer(null) {
int docID = -1;
@@ -459,6 +460,36 @@ public class TestQueryRescorer extends LuceneTestCase {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public long cost() {
return 1;
}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java
index 3fd3c2b..b447b0b 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java
@@ -23,6 +23,7 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
public class TestScoreCachingWrappingScorer extends LuceneTestCase {
@@ -47,6 +48,36 @@ public class TestScoreCachingWrappingScorer extends LuceneTestCase {
return 1;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
@Override public int docID() { return doc; }
@Override public int nextDoc() {
@@ -57,7 +88,7 @@ public class TestScoreCachingWrappingScorer extends LuceneTestCase {
doc = target;
return doc < scores.length ? doc : NO_MORE_DOCS;
}
-
+
@Override
public long cost() {
return scores.length;
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSearchWithThreads.java b/lucene/core/src/test/org/apache/lucene/search/TestSearchWithThreads.java
index cd5ac03..58de70d 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSearchWithThreads.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSearchWithThreads.java
@@ -20,16 +20,18 @@ package org.apache.lucene.search;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
+import com.carrotsearch.randomizedtesting.annotations.Seed;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
@SuppressCodecs({ "SimpleText", "Memory", "Direct" })
+@Seed("12017F5C55C9DD62")
public class TestSearchWithThreads extends LuceneTestCase {
int NUM_DOCS;
final int NUM_SEARCH_THREADS = 5;
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestSimpleExplanations.java b/lucene/core/src/test/org/apache/lucene/search/TestSimpleExplanations.java
index afbabc2..ec9b4de 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestSimpleExplanations.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestSimpleExplanations.java
@@ -242,6 +242,13 @@ public class TestSimpleExplanations extends BaseExplanationTestCase {
}
/* MultiPhraseQuery */
+
+ /*
+ "w1 w2 w3 w4 w5",
+ "w1 w3 w2 w3 zz",
+ "w1 xx w2 yy w3",
+ "w1 w3 xx w2 yy w3 zz"
+ */
public void testMPQ1() throws Exception {
MultiPhraseQuery q = new MultiPhraseQuery();
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
index 30a3581..0377c2d 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java
@@ -24,8 +24,9 @@ import java.util.List;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SlowCompositeReaderWrapper;
import org.apache.lucene.index.Term;
@@ -78,7 +79,7 @@ public class TestTermScorer extends LuceneTestCase {
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
assertTrue(indexSearcher.getTopReaderContext() instanceof LeafReaderContext);
LeafReaderContext context = (LeafReaderContext)indexSearcher.getTopReaderContext();
- BulkScorer ts = weight.bulkScorer(context, context.reader().getLiveDocs());
+ BulkScorer ts = weight.bulkScorer(context, PostingsEnum.FLAG_FREQS, context.reader().getLiveDocs());
// we have 2 documents with the term all in them, one document for all the
// other values
final List docs = new ArrayList<>();
@@ -135,7 +136,7 @@ public class TestTermScorer extends LuceneTestCase {
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
assertTrue(indexSearcher.getTopReaderContext() instanceof LeafReaderContext);
LeafReaderContext context = (LeafReaderContext) indexSearcher.getTopReaderContext();
- Scorer ts = weight.scorer(context, context.reader().getLiveDocs());
+ Scorer ts = weight.scorer(context, PostingsEnum.FLAG_FREQS, context.reader().getLiveDocs());
assertTrue("next did not return a doc",
ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
assertTrue("score is not correct", ts.score() == 1.6931472f);
@@ -154,7 +155,7 @@ public class TestTermScorer extends LuceneTestCase {
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
assertTrue(indexSearcher.getTopReaderContext() instanceof LeafReaderContext);
LeafReaderContext context = (LeafReaderContext) indexSearcher.getTopReaderContext();
- Scorer ts = weight.scorer(context, context.reader().getLiveDocs());
+ Scorer ts = weight.scorer(context, PostingsEnum.FLAG_FREQS, context.reader().getLiveDocs());
assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
// The next doc should be doc 5
assertTrue("doc should be number 5", ts.docID() == 5);
diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
index 649f301..3eb9b42 100644
--- a/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
+++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
@@ -20,16 +20,17 @@ package org.apache.lucene.search.spans;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.IndexReaderContext;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.CheckHits;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
@@ -183,7 +184,7 @@ public class TestNearSpansOrdered extends LuceneTestCase {
Weight w = searcher.createNormalizedWeight(q);
IndexReaderContext topReaderContext = searcher.getTopReaderContext();
LeafReaderContext leave = topReaderContext.leaves().get(0);
- Scorer s = w.scorer(leave, leave.reader().getLiveDocs());
+ Scorer s = w.scorer(leave, PostingsEnum.FLAG_POSITIONS, leave.reader().getLiveDocs());
assertEquals(1, s.advance(1));
}
diff --git a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java
index 5a960f3..bab56b9 100644
--- a/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java
+++ b/lucene/core/src/test/org/apache/lucene/search/spans/TestSpans.java
@@ -17,14 +17,12 @@ package org.apache.lucene.search.spans;
* limitations under the License.
*/
-import java.io.IOException;
-import java.util.List;
-
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.IndexWriter;
@@ -43,6 +41,9 @@ import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+import java.util.List;
+
public class TestSpans extends LuceneTestCase {
private IndexSearcher searcher;
private IndexReader reader;
@@ -429,7 +430,7 @@ public class TestSpans extends LuceneTestCase {
slop,
ordered);
- spanScorer = searcher.createNormalizedWeight(snq).scorer(ctx, ctx.reader().getLiveDocs());
+ spanScorer = searcher.createNormalizedWeight(snq).scorer(ctx, PostingsEnum.FLAG_POSITIONS, ctx.reader().getLiveDocs());
} finally {
searcher.setSimilarity(oldSim);
}
diff --git a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionRescorer.java b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionRescorer.java
index 18ec02c..8f77e95 100644
--- a/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionRescorer.java
+++ b/lucene/expressions/src/java/org/apache/lucene/expressions/ExpressionRescorer.java
@@ -18,7 +18,6 @@ package org.apache.lucene.expressions;
*/
import java.io.IOException;
-import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -27,12 +26,11 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.FakeScorer;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Rescorer;
-import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortRescorer;
-import org.apache.lucene.search.Weight;
/**
* A {@link Rescorer} that uses an expression to re-score
@@ -58,56 +56,6 @@ class ExpressionRescorer extends SortRescorer {
this.bindings = bindings;
}
- private static class FakeScorer extends Scorer {
- float score;
- int doc = -1;
- int freq = 1;
-
- public FakeScorer() {
- super(null);
- }
-
- @Override
- public int advance(int target) {
- throw new UnsupportedOperationException("FakeScorer doesn't support advance(int)");
- }
-
- @Override
- public int docID() {
- return doc;
- }
-
- @Override
- public int freq() {
- return freq;
- }
-
- @Override
- public int nextDoc() {
- throw new UnsupportedOperationException("FakeScorer doesn't support nextDoc()");
- }
-
- @Override
- public float score() {
- return score;
- }
-
- @Override
- public long cost() {
- return 1;
- }
-
- @Override
- public Weight getWeight() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public Collection getChildren() {
- throw new UnsupportedOperationException();
- }
- }
-
@Override
public Explanation explain(IndexSearcher searcher, Explanation firstPassExplanation, int docID) throws IOException {
Explanation result = super.explain(searcher, firstPassExplanation, docID);
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysQuery.java b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysQuery.java
index 6fde4ae..648fa34 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysQuery.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysQuery.java
@@ -19,8 +19,9 @@ package org.apache.lucene.facet;
import java.io.IOException;
import java.util.Arrays;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.BulkScorer;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
@@ -29,10 +30,8 @@ import org.apache.lucene.search.Filter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.BulkScorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
-
/** Only purpose is to punch through and return a
* DrillSidewaysScorer */
@@ -111,17 +110,17 @@ class DrillSidewaysQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
// We can only run as a top scorer:
throw new UnsupportedOperationException();
}
@Override
- public BulkScorer bulkScorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public BulkScorer bulkScorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
// TODO: it could be better if we take acceptDocs
// into account instead of baseScorer?
- Scorer baseScorer = baseWeight.scorer(context, acceptDocs);
+ Scorer baseScorer = baseWeight.scorer(context, flags, acceptDocs);
DrillSidewaysScorer.DocsAndCost[] dims = new DrillSidewaysScorer.DocsAndCost[drillDowns.length];
int nullCount = 0;
@@ -166,7 +165,7 @@ class DrillSidewaysQuery extends Query {
dims[dim].disi = disi;
}
} else {
- DocIdSetIterator disi = ((Weight) drillDowns[dim]).scorer(context, null);
+ DocIdSetIterator disi = ((Weight) drillDowns[dim]).scorer(context, flags, null);
if (disi == null) {
nullCount++;
continue;
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java
index 7988cb2..5cdb948 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/DrillSidewaysScorer.java
@@ -21,15 +21,16 @@ import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.BulkScorer;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.BulkScorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
class DrillSidewaysScorer extends BulkScorer {
@@ -168,7 +169,7 @@ class DrillSidewaysScorer extends BulkScorer {
//}
int docID = baseScorer.docID();
- nextDoc: while (docID != DocsEnum.NO_MORE_DOCS) {
+ nextDoc: while (docID != PostingsEnum.NO_MORE_DOCS) {
LeafCollector failedCollector = null;
for (int i=0;i getChildren() {
- throw new UnsupportedOperationException();
- }
- }
-
private final void sumValues(List matchingDocs, boolean keepScores, ValueSource valueSource) throws IOException {
final FakeScorer scorer = new FakeScorer();
Map context = new HashMap<>();
@@ -104,7 +81,7 @@ public class TaxonomyFacetSumValueSource extends FloatTaxonomyFacets {
while ((doc = docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
ords.get(doc, scratch);
if (keepScores) {
- scorer.docID = doc;
+ scorer.doc = doc;
scorer.score = scores[scoresIdx++];
}
float value = (float) functionValues.doubleVal(doc);
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java
index a216bb9..35d2210 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyReader.java
@@ -12,7 +12,7 @@ import org.apache.lucene.facet.taxonomy.ParallelTaxonomyArrays;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.index.CorruptIndexException; // javadocs
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.StoredDocument;
@@ -273,7 +273,7 @@ public class DirectoryTaxonomyReader extends TaxonomyReader {
// If we're still here, we have a cache miss. We need to fetch the
// value from disk, and then also put it in the cache:
int ret = TaxonomyReader.INVALID_ORDINAL;
- DocsEnum docs = MultiFields.getTermDocsEnum(indexReader, null, Consts.FULL, new BytesRef(FacetsConfig.pathToString(cp.components, cp.length)), 0);
+ PostingsEnum docs = MultiFields.getTermDocsEnum(indexReader, null, Consts.FULL, new BytesRef(FacetsConfig.pathToString(cp.components, cp.length)), 0);
if (docs != null && docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
ret = docs.docID();
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
index 03cc88f..7472b3a 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
@@ -28,7 +28,7 @@ import org.apache.lucene.facet.taxonomy.writercache.LruTaxonomyWriterCache;
import org.apache.lucene.facet.taxonomy.writercache.TaxonomyWriterCache;
import org.apache.lucene.index.CorruptIndexException; // javadocs
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@@ -382,14 +382,14 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
try {
final BytesRef catTerm = new BytesRef(FacetsConfig.pathToString(categoryPath.components, categoryPath.length));
TermsEnum termsEnum = null; // reuse
- DocsEnum docs = null; // reuse
+ PostingsEnum docs = null; // reuse
for (LeafReaderContext ctx : reader.leaves()) {
Terms terms = ctx.reader().terms(Consts.FULL);
if (terms != null) {
termsEnum = terms.iterator(termsEnum);
if (termsEnum.seekExact(catTerm)) {
// liveDocs=null because the taxonomy has no deletes
- docs = termsEnum.docs(null, docs, 0 /* freqs not required */);
+ docs = termsEnum.postings(null, docs, 0 /* freqs not required */);
// if the term was found, we know it has exactly one document.
doc = docs.nextDoc() + ctx.docBase;
break;
@@ -675,7 +675,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
DirectoryReader reader = readerManager.acquire();
try {
TermsEnum termsEnum = null;
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
for (LeafReaderContext ctx : reader.leaves()) {
Terms terms = ctx.reader().terms(Consts.FULL);
if (terms != null) { // cannot really happen, but be on the safe side
@@ -689,8 +689,8 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
// is sufficient to call next(), and then doc(), exactly once with no
// 'validation' checks.
FacetLabel cp = new FacetLabel(FacetsConfig.stringToPath(t.utf8ToString()));
- docsEnum = termsEnum.docs(null, docsEnum, DocsEnum.FLAG_NONE);
- boolean res = cache.put(cp, docsEnum.nextDoc() + ctx.docBase);
+ postingsEnum = termsEnum.postings(null, postingsEnum, PostingsEnum.FLAG_NONE);
+ boolean res = cache.put(cp, postingsEnum.nextDoc() + ctx.docBase);
assert !res : "entries should not have been evicted from the cache";
} else {
// the cache is full and the next put() will evict entries from it, therefore abort the iteration.
@@ -771,7 +771,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
ordinalMap.setSize(size);
int base = 0;
TermsEnum te = null;
- DocsEnum docs = null;
+ PostingsEnum docs = null;
for (final LeafReaderContext ctx : r.leaves()) {
final LeafReader ar = ctx.reader();
final Terms terms = ar.terms(Consts.FULL);
@@ -779,7 +779,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
while (te.next() != null) {
FacetLabel cp = new FacetLabel(FacetsConfig.stringToPath(te.term().utf8ToString()));
final int ordinal = addCategory(cp);
- docs = te.docs(null, docs, DocsEnum.FLAG_NONE);
+ docs = te.postings(null, docs, PostingsEnum.FLAG_NONE);
ordinalMap.addMapping(docs.nextDoc() + base, ordinal);
}
base += ar.maxDoc(); // no deletions, so we're ok
diff --git a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java
index 6a59db8..b4dbe68 100644
--- a/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java
+++ b/lucene/facet/src/java/org/apache/lucene/facet/taxonomy/directory/TaxonomyIndexArrays.java
@@ -1,16 +1,16 @@
package org.apache.lucene.facet.taxonomy.directory;
-import java.io.IOException;
-
import org.apache.lucene.facet.taxonomy.ParallelTaxonomyArrays;
import org.apache.lucene.facet.taxonomy.TaxonomyReader;
import org.apache.lucene.index.CorruptIndexException;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.ArrayUtil;
+import java.io.IOException;
+
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@@ -129,9 +129,9 @@ class TaxonomyIndexArrays extends ParallelTaxonomyArrays {
// it's ok to use MultiFields because we only iterate on one posting list.
// breaking it to loop over the leaves() only complicates code for no
// apparent gain.
- DocsAndPositionsEnum positions = MultiFields.getTermPositionsEnum(reader, null,
+ PostingsEnum positions = MultiFields.getTermPositionsEnum(reader, null,
Consts.FIELD_PAYLOADS, Consts.PAYLOAD_PARENT_BYTES_REF,
- DocsAndPositionsEnum.FLAG_PAYLOADS);
+ PostingsEnum.FLAG_PAYLOADS);
// shouldn't really happen, if it does, something's wrong
if (positions == null || positions.advance(first) == DocIdSetIterator.NO_MORE_DOCS) {
diff --git a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
index 16cbf40..473d13f 100644
--- a/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
+++ b/lucene/grouping/src/java/org/apache/lucene/search/grouping/BlockGroupingCollector.java
@@ -17,13 +17,24 @@ package org.apache.lucene.search.grouping;
* limitations under the License.
*/
-
import java.io.IOException;
-import java.util.Collection;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.search.*;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.FakeScorer;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.LeafFieldComparator;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.SimpleCollector;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopDocsCollector;
+import org.apache.lucene.search.TopFieldCollector;
+import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.PriorityQueue;
@@ -86,56 +97,6 @@ public class BlockGroupingCollector extends SimpleCollector {
private final GroupQueue groupQueue;
private boolean groupCompetes;
- private final static class FakeScorer extends Scorer {
-
- float score;
- int doc;
-
- public FakeScorer() {
- super(null);
- }
-
- @Override
- public float score() {
- return score;
- }
-
- @Override
- public int freq() {
- throw new UnsupportedOperationException(); // TODO: wtf does this class do?
- }
-
- @Override
- public int docID() {
- return doc;
- }
-
- @Override
- public int advance(int target) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public int nextDoc() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public long cost() {
- return 1;
- }
-
- @Override
- public Weight getWeight() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public Collection getChildren() {
- throw new UnsupportedOperationException();
- }
- }
-
private static final class OneGroup {
LeafReaderContext readerContext;
//int groupOrd;
diff --git a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
index ec3e829..943c15b 100644
--- a/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
+++ b/lucene/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java
@@ -18,10 +18,27 @@
package org.apache.lucene.search.grouping;
import java.io.IOException;
-import java.util.*;
-
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.carrotsearch.randomizedtesting.annotations.Seed;
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.*;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReaderContext;
@@ -34,7 +51,20 @@ import org.apache.lucene.index.SlowCompositeReaderWrapper;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.valuesource.BytesRefFieldSource;
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.CachingCollector;
+import org.apache.lucene.search.CachingWrapperFilter;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MultiCollector;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.search.grouping.function.FunctionAllGroupsCollector;
import org.apache.lucene.search.grouping.function.FunctionFirstPassGroupingCollector;
import org.apache.lucene.search.grouping.function.FunctionSecondPassGroupingCollector;
@@ -54,6 +84,7 @@ import org.apache.lucene.util.mutable.MutableValueStr;
// - test ties
// - test compound sort
+@Seed("3C4E441C6A8DA6A2:4E026113DBED10D1")
public class TestGrouping extends LuceneTestCase {
public void testBasic() throws Exception {
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java
index 936ac2b..c381fad 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermVector.java
@@ -16,6 +16,7 @@ package org.apache.lucene.search.highlight;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
import java.io.IOException;
import org.apache.lucene.analysis.TokenStream;
@@ -24,7 +25,7 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PackedTokenAttributeImpl;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.AttributeFactory;
@@ -122,7 +123,7 @@ public final class TokenStreamFromTermVector extends TokenStream {
final TermsEnum termsEnum = vector.iterator(null);
BytesRef termBytesRef;
- DocsAndPositionsEnum dpEnum = null;
+ PostingsEnum dpEnum = null;
//int sumFreq = 0;
while ((termBytesRef = termsEnum.next()) != null) {
//Grab the term (in same way as BytesRef.utf8ToString() but we don't want a String obj)
@@ -130,7 +131,7 @@ public final class TokenStreamFromTermVector extends TokenStream {
final char[] termChars = new char[termBytesRef.length];
final int termCharsLen = UnicodeUtil.UTF8toUTF16(termBytesRef, termChars);
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_POSITIONS);
assert dpEnum != null; // presumably checked by TokenSources.hasPositions earlier
dpEnum.nextDoc();
final int freq = dpEnum.freq();
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/MultiTermHighlighting.java b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/MultiTermHighlighting.java
index 67cdf91..4655564 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/MultiTermHighlighting.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/MultiTermHighlighting.java
@@ -26,7 +26,7 @@ import java.util.List;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.AutomatonQuery;
import org.apache.lucene.search.BooleanClause;
@@ -47,10 +47,10 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.util.automaton.Automata;
-import org.apache.lucene.util.automaton.Operations;
+import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
import org.apache.lucene.util.automaton.LevenshteinAutomata;
-import org.apache.lucene.util.automaton.Automaton;
+import org.apache.lucene.util.automaton.Operations;
/**
* Support for highlighting multiterm queries in PostingsHighlighter.
@@ -197,7 +197,7 @@ class MultiTermHighlighting {
*
* This is solely used internally by PostingsHighlighter: DO NOT USE THIS METHOD!
*/
- static DocsAndPositionsEnum getDocsEnum(final TokenStream ts, final CharacterRunAutomaton[] matchers) throws IOException {
+ static PostingsEnum getDocsEnum(final TokenStream ts, final CharacterRunAutomaton[] matchers) throws IOException {
final CharTermAttribute charTermAtt = ts.addAttribute(CharTermAttribute.class);
final OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
ts.reset();
@@ -207,7 +207,7 @@ class MultiTermHighlighting {
// would only serve to make this method less bogus.
// instead, we always return freq() = Integer.MAX_VALUE and let PH terminate based on offset...
- return new DocsAndPositionsEnum() {
+ return new PostingsEnum() {
int currentDoc = -1;
int currentMatch = -1;
int currentStartOffset = -1;
@@ -237,7 +237,19 @@ class MultiTermHighlighting {
currentStartOffset = currentEndOffset = Integer.MAX_VALUE;
return Integer.MAX_VALUE;
}
-
+
+ @Override
+ public int startPosition() throws IOException {
+ if (currentStartOffset < Integer.MAX_VALUE)
+ return 0;
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return startPosition();
+ }
+
@Override
public int freq() throws IOException {
return Integer.MAX_VALUE; // lie
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
index 14f364b..17044fe 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
@@ -31,7 +31,7 @@ import java.util.SortedSet;
import java.util.TreeSet;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader;
@@ -455,7 +455,7 @@ public class PostingsHighlighter {
private Map highlightField(String field, String contents[], BreakIterator bi, BytesRef terms[], int[] docids, List leaves, int maxPassages, Query query) throws IOException {
Map highlights = new HashMap<>();
-
+
PassageFormatter fieldFormatter = getFormatter(field);
if (fieldFormatter == null) {
throw new NullPointerException("PassageFormatter cannot be null");
@@ -477,7 +477,7 @@ public class PostingsHighlighter {
// we are processing in increasing docid order, so we only need to reinitialize stuff on segment changes
// otherwise, we will just advance() existing enums to the new document in the same segment.
- DocsAndPositionsEnum postings[] = null;
+ PostingsEnum postings[] = null;
TermsEnum termsEnum = null;
int lastLeaf = -1;
@@ -499,7 +499,7 @@ public class PostingsHighlighter {
Terms t = r.terms(field);
if (t != null) {
termsEnum = t.iterator(null);
- postings = new DocsAndPositionsEnum[terms.length];
+ postings = new PostingsEnum[terms.length];
}
}
if (termsEnum == null) {
@@ -508,7 +508,7 @@ public class PostingsHighlighter {
// if there are multi-term matches, we have to initialize the "fake" enum for each document
if (automata.length > 0) {
- DocsAndPositionsEnum dp = MultiTermHighlighting.getDocsEnum(analyzer.tokenStream(field, content), automata);
+ PostingsEnum dp = MultiTermHighlighting.getDocsEnum(analyzer.tokenStream(field, content), automata);
dp.advance(doc - subContext.docBase);
postings[terms.length-1] = dp; // last term is the multiterm matcher
}
@@ -534,7 +534,7 @@ public class PostingsHighlighter {
// we can intersect these with the postings lists via BreakIterator.preceding(offset),s
// score each sentence as norm(sentenceStartOffset) * sum(weight * tf(freq))
private Passage[] highlightDoc(String field, BytesRef terms[], int contentLength, BreakIterator bi, int doc,
- TermsEnum termsEnum, DocsAndPositionsEnum[] postings, int n) throws IOException {
+ TermsEnum termsEnum, PostingsEnum[] postings, int n) throws IOException {
PassageScorer scorer = getScorer(field);
if (scorer == null) {
throw new NullPointerException("PassageScorer cannot be null");
@@ -543,7 +543,7 @@ public class PostingsHighlighter {
float weights[] = new float[terms.length];
// initialize postings
for (int i = 0; i < terms.length; i++) {
- DocsAndPositionsEnum de = postings[i];
+ PostingsEnum de = postings[i];
int pDoc;
if (de == EMPTY) {
continue;
@@ -552,7 +552,7 @@ public class PostingsHighlighter {
if (!termsEnum.seekExact(terms[i])) {
continue; // term not found
}
- de = postings[i] = termsEnum.docsAndPositions(null, null, DocsAndPositionsEnum.FLAG_OFFSETS);
+ de = postings[i] = termsEnum.postings(null, null, PostingsEnum.FLAG_OFFSETS);
if (de == null) {
// no positions available
throw new IllegalArgumentException("field '" + field + "' was indexed without offsets, cannot highlight");
@@ -590,7 +590,7 @@ public class PostingsHighlighter {
OffsetsEnum off;
while ((off = pq.poll()) != null) {
- final DocsAndPositionsEnum dp = off.dp;
+ final PostingsEnum dp = off.dp;
int start = dp.startOffset();
if (start == -1) {
throw new IllegalArgumentException("field '" + field + "' was indexed without offsets, cannot highlight");
@@ -698,11 +698,11 @@ public class PostingsHighlighter {
}
private static class OffsetsEnum implements Comparable {
- DocsAndPositionsEnum dp;
+ PostingsEnum dp;
int pos;
int id;
- OffsetsEnum(DocsAndPositionsEnum dp, int id) throws IOException {
+ OffsetsEnum(PostingsEnum dp, int id) throws IOException {
this.dp = dp;
this.id = id;
this.pos = 1;
@@ -724,10 +724,20 @@ public class PostingsHighlighter {
}
}
- private static final DocsAndPositionsEnum EMPTY = new DocsAndPositionsEnum() {
+ private static final PostingsEnum EMPTY = new PostingsEnum() {
+
+ @Override
+ public int nextPosition() throws IOException { return -1; }
@Override
- public int nextPosition() throws IOException { return 0; }
+ public int startPosition() throws IOException {
+ assert false; return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
@Override
public int startOffset() throws IOException { return Integer.MAX_VALUE; }
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
index 29c307a..80de2d8 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
@@ -22,7 +22,7 @@ import java.util.Iterator;
import java.util.LinkedList;
import java.util.Set;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
@@ -93,7 +93,7 @@ public class FieldTermStack {
final CharsRefBuilder spare = new CharsRefBuilder();
final TermsEnum termsEnum = vector.iterator(null);
- DocsAndPositionsEnum dpEnum = null;
+ PostingsEnum dpEnum = null;
BytesRef text;
int numDocs = reader.maxDoc();
@@ -104,7 +104,7 @@ public class FieldTermStack {
if (!termSet.contains(term)) {
continue;
}
- dpEnum = termsEnum.docsAndPositions(null, dpEnum);
+ dpEnum = termsEnum.postings(null, dpEnum, PostingsEnum.FLAG_POSITIONS);
if (dpEnum == null) {
// null snippet
return;
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java
index 536259a..a52d9b7 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java
@@ -16,9 +16,6 @@ package org.apache.lucene.search.highlight.custom;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-import java.io.IOException;
-import java.util.Map;
-
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
@@ -36,6 +33,9 @@ import org.apache.lucene.search.highlight.WeightedSpanTerm;
import org.apache.lucene.search.highlight.WeightedSpanTermExtractor;
import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+import java.util.Map;
+
/**
* Tests the extensibility of {@link WeightedSpanTermExtractor} and
* {@link QueryScorer} in a user defined package
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java b/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java
deleted file mode 100644
index cbd1ff8..0000000
--- a/lucene/join/src/java/org/apache/lucene/search/join/FakeScorer.java
+++ /dev/null
@@ -1,75 +0,0 @@
-package org.apache.lucene.search.join;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.Collection;
-
-import org.apache.lucene.search.LeafCollector;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Weight;
-
-/** Passed to {@link LeafCollector#setScorer} during join collection. */
-final class FakeScorer extends Scorer {
- float score;
- int doc = -1;
- int freq = 1;
-
- public FakeScorer() {
- super(null);
- }
-
- @Override
- public int advance(int target) {
- throw new UnsupportedOperationException("FakeScorer doesn't support advance(int)");
- }
-
- @Override
- public int docID() {
- return doc;
- }
-
- @Override
- public int freq() {
- throw new UnsupportedOperationException("FakeScorer doesn't support freq()");
- }
-
- @Override
- public int nextDoc() {
- throw new UnsupportedOperationException("FakeScorer doesn't support nextDoc()");
- }
-
- @Override
- public float score() {
- return score;
- }
-
- @Override
- public long cost() {
- return 1;
- }
-
- @Override
- public Weight getWeight() {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public Collection getChildren() {
- throw new UnsupportedOperationException();
- }
-}
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
index ce7f445..6743b92 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/TermsIncludingScoreQuery.java
@@ -21,18 +21,16 @@ import java.io.IOException;
import java.util.Locale;
import java.util.Set;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.BulkScorer;
import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
@@ -137,11 +135,11 @@ class TermsIncludingScoreQuery extends Query {
if (terms != null) {
segmentTermsEnum = terms.iterator(segmentTermsEnum);
BytesRef spare = new BytesRef();
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
for (int i = 0; i < TermsIncludingScoreQuery.this.terms.size(); i++) {
if (segmentTermsEnum.seekExact(TermsIncludingScoreQuery.this.terms.get(ords[i], spare))) {
- docsEnum = segmentTermsEnum.docs(null, docsEnum, DocsEnum.FLAG_NONE);
- if (docsEnum.advance(doc) == doc) {
+ postingsEnum = segmentTermsEnum.postings(null, postingsEnum, PostingsEnum.FLAG_NONE);
+ if (postingsEnum.advance(doc) == doc) {
final float score = TermsIncludingScoreQuery.this.scores[ords[i]];
return new ComplexExplanation(true, score, "Score based on join value " + segmentTermsEnum.term().utf8ToString());
}
@@ -167,7 +165,7 @@ class TermsIncludingScoreQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
Terms terms = context.reader().terms(field);
if (terms == null) {
return null;
@@ -183,9 +181,10 @@ class TermsIncludingScoreQuery extends Query {
return new SVInOrderScorer(this, acceptDocs, segmentTermsEnum, context.reader().maxDoc(), cost);
}
}
+
};
}
-
+
class SVInOrderScorer extends Scorer {
final DocIdSetIterator matchingDocsIterator;
@@ -205,12 +204,12 @@ class TermsIncludingScoreQuery extends Query {
protected void fillDocsAndScores(FixedBitSet matchingDocs, Bits acceptDocs, TermsEnum termsEnum) throws IOException {
BytesRef spare = new BytesRef();
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
for (int i = 0; i < terms.size(); i++) {
if (termsEnum.seekExact(terms.get(ords[i], spare))) {
- docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
+ postingsEnum = termsEnum.postings(acceptDocs, postingsEnum, PostingsEnum.FLAG_NONE);
float score = TermsIncludingScoreQuery.this.scores[ords[i]];
- for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) {
+ for (int doc = postingsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = postingsEnum.nextDoc()) {
matchingDocs.set(doc);
// In the case the same doc is also related to a another doc, a score might be overwritten. I think this
// can only happen in a many-to-many relation
@@ -231,6 +230,36 @@ class TermsIncludingScoreQuery extends Query {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int docID() {
return currentDoc;
}
@@ -261,12 +290,12 @@ class TermsIncludingScoreQuery extends Query {
@Override
protected void fillDocsAndScores(FixedBitSet matchingDocs, Bits acceptDocs, TermsEnum termsEnum) throws IOException {
BytesRef spare = new BytesRef();
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
for (int i = 0; i < terms.size(); i++) {
if (termsEnum.seekExact(terms.get(ords[i], spare))) {
- docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
+ postingsEnum = termsEnum.postings(acceptDocs, postingsEnum, PostingsEnum.FLAG_NONE);
float score = TermsIncludingScoreQuery.this.scores[ords[i]];
- for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) {
+ for (int doc = postingsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = postingsEnum.nextDoc()) {
// I prefer this:
/*if (scores[doc] < score) {
scores[doc] = score;
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java
index 5a39106..26790f0 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToChildBlockJoinQuery.java
@@ -25,7 +25,6 @@ import java.util.Set;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
-import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
@@ -34,6 +33,7 @@ import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
/**
* Just like {@link ToParentBlockJoinQuery}, except this
@@ -128,9 +128,9 @@ public class ToChildBlockJoinQuery extends Query {
// NOTE: acceptDocs applies (and is checked) only in the
// child document space
@Override
- public Scorer scorer(LeafReaderContext readerContext, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext readerContext, int flags, Bits acceptDocs) throws IOException {
- final Scorer parentScorer = parentWeight.scorer(readerContext, null);
+ final Scorer parentScorer = parentWeight.scorer(readerContext, flags, null);
if (parentScorer == null) {
// No matches
@@ -274,6 +274,36 @@ public class ToChildBlockJoinQuery extends Query {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int advance(int childTarget) throws IOException {
//System.out.println("Q.advance childTarget=" + childTarget);
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
index 36fc37e..bd0e5ae 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinCollector.java
@@ -17,17 +17,34 @@ package org.apache.lucene.search.join;
* limitations under the License.
*/
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.Map;
+import java.util.Queue;
+
+import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.IndexWriter; // javadocs
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.Collector;
+import org.apache.lucene.search.FakeScorer;
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.FieldValueHitQueue;
+import org.apache.lucene.search.LeafCollector;
+import org.apache.lucene.search.LeafFieldComparator;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.ScoreCachingWrappingScorer;
+import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Scorer.ChildScorer;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.TopDocsCollector;
+import org.apache.lucene.search.TopFieldCollector;
+import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.search.grouping.GroupDocs;
import org.apache.lucene.search.grouping.TopGroups;
import org.apache.lucene.util.ArrayUtil;
-import java.io.IOException;
-import java.util.*;
-
/** Collects parent document hits for a Query containing one more more
* BlockJoinQuery clauses, sorted by the
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinIndexSearcher.java b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinIndexSearcher.java
index 31a0463..bf45f69 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinIndexSearcher.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinIndexSearcher.java
@@ -21,6 +21,7 @@ import java.io.IOException;
import java.util.List;
import java.util.concurrent.ExecutorService;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Collector;
@@ -55,7 +56,7 @@ public class ToParentBlockJoinIndexSearcher extends IndexSearcher {
// we force the use of Scorer (not BulkScorer) to make sure
// that the scorer passed to LeafCollector.setScorer supports
// Scorer.getChildren
- Scorer scorer = weight.scorer(ctx, ctx.reader().getLiveDocs());
+ Scorer scorer = weight.scorer(ctx, PostingsEnum.FLAG_NONE, ctx.reader().getLiveDocs());
if (scorer != null) {
final LeafCollector leafCollector = collector.getLeafCollector(ctx);
leafCollector.setScorer(scorer);
diff --git a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java
index bba2225..90aebb4 100644
--- a/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java
+++ b/lucene/join/src/java/org/apache/lucene/search/join/ToParentBlockJoinQuery.java
@@ -23,6 +23,7 @@ import java.util.Collections;
import java.util.Locale;
import java.util.Set;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.LeafReaderContext;
@@ -39,6 +40,7 @@ import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
/**
* This query requires that you index
@@ -160,9 +162,9 @@ public class ToParentBlockJoinQuery extends Query {
// NOTE: acceptDocs applies (and is checked) only in the
// parent document space
@Override
- public Scorer scorer(LeafReaderContext readerContext, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext readerContext, int flags, Bits acceptDocs) throws IOException {
- final Scorer childScorer = childWeight.scorer(readerContext, readerContext.reader().getLiveDocs());
+ final Scorer childScorer = childWeight.scorer(readerContext, flags, readerContext.reader().getLiveDocs());
if (childScorer == null) {
// No matches
return null;
@@ -188,7 +190,7 @@ public class ToParentBlockJoinQuery extends Query {
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- BlockJoinScorer scorer = (BlockJoinScorer) scorer(context, context.reader().getLiveDocs());
+ BlockJoinScorer scorer = (BlockJoinScorer) scorer(context, PostingsEnum.FLAG_FREQS, context.reader().getLiveDocs());
if (scorer != null && scorer.advance(doc) == doc) {
return scorer.explain(context.docBase);
}
@@ -370,6 +372,36 @@ public class ToParentBlockJoinQuery extends Query {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int advance(int parentTarget) throws IOException {
//System.out.println("Q.advance parentTarget=" + parentTarget);
diff --git a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
index 63ba2bb..c213f27 100644
--- a/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
+++ b/lucene/join/src/test/org/apache/lucene/search/join/TestBlockJoin.java
@@ -25,14 +25,55 @@ import java.util.List;
import java.util.Locale;
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.*;
-import org.apache.lucene.index.*;
-import org.apache.lucene.search.*;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.IntField;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.SortedDocValuesField;
+import org.apache.lucene.document.StoredField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.LogDocMergePolicy;
+import org.apache.lucene.index.MultiFields;
+import org.apache.lucene.index.NoMergePolicy;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.ReaderUtil;
+import org.apache.lucene.index.StoredDocument;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.Filter;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.MultiTermQuery;
+import org.apache.lucene.search.NumericRangeQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryUtils;
+import org.apache.lucene.search.QueryWrapperFilter;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.search.grouping.GroupDocs;
import org.apache.lucene.search.grouping.TopGroups;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.*;
+import org.apache.lucene.util.BitSet;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefBuilder;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.NumericUtils;
+import org.apache.lucene.util.TestUtil;
public class TestBlockJoin extends LuceneTestCase {
@@ -624,9 +665,9 @@ public class TestBlockJoin extends LuceneTestCase {
for(int docIDX=0;docIDX joinValues = new TreeSet<>(BytesRef.getUTF8SortedAsUnicodeComparator());
joinValues.addAll(joinValueToJoinScores.keySet());
for (BytesRef joinValue : joinValues) {
termsEnum = terms.iterator(termsEnum);
if (termsEnum.seekExact(joinValue)) {
- docsEnum = termsEnum.docs(slowCompositeReader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
+ postingsEnum = termsEnum.postings(slowCompositeReader.getLiveDocs(), postingsEnum, PostingsEnum.FLAG_NONE);
JoinScore joinScore = joinValueToJoinScores.get(joinValue);
- for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) {
+ for (int doc = postingsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = postingsEnum.nextDoc()) {
// First encountered join value determines the score.
// Something to keep in mind for many-to-many relations.
if (!docToJoinScore.containsKey(doc)) {
@@ -828,9 +828,9 @@ public class TestJoinUtil extends LuceneTestCase {
}
for (RandomDoc otherSideDoc : otherMatchingDocs) {
- DocsEnum docsEnum = MultiFields.getTermDocsEnum(topLevelReader, MultiFields.getLiveDocs(topLevelReader), "id", new BytesRef(otherSideDoc.id), 0);
- assert docsEnum != null;
- int doc = docsEnum.nextDoc();
+ PostingsEnum postingsEnum = MultiFields.getTermDocsEnum(topLevelReader, MultiFields.getLiveDocs(topLevelReader), "id", new BytesRef(otherSideDoc.id), 0);
+ assert postingsEnum != null;
+ int doc = postingsEnum.nextDoc();
expectedResult.set(doc);
}
}
diff --git a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
index a578102..85a691b 100644
--- a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
+++ b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
@@ -34,8 +34,7 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.FieldInvertState;
@@ -979,20 +978,12 @@ public class MemoryIndex {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) {
- if (reuse == null || !(reuse instanceof MemoryDocsEnum)) {
- reuse = new MemoryDocsEnum();
- }
- return ((MemoryDocsEnum) reuse).reset(liveDocs, info.sliceArray.freq[info.sortedTerms[termUpto]]);
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) {
- if (reuse == null || !(reuse instanceof MemoryDocsAndPositionsEnum)) {
- reuse = new MemoryDocsAndPositionsEnum();
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) {
+ if (reuse == null || !(reuse instanceof MemoryPostingsEnum)) {
+ reuse = new MemoryPostingsEnum();
}
final int ord = info.sortedTerms[termUpto];
- return ((MemoryDocsAndPositionsEnum) reuse).reset(liveDocs, info.sliceArray.start[ord], info.sliceArray.end[ord], info.sliceArray.freq[ord]);
+ return ((MemoryPostingsEnum) reuse).reset(liveDocs, info.sliceArray.start[ord], info.sliceArray.end[ord], info.sliceArray.freq[ord]);
}
@Override
@@ -1009,69 +1000,26 @@ public class MemoryIndex {
}
}
- private class MemoryDocsEnum extends DocsEnum {
- private boolean hasNext;
- private Bits liveDocs;
- private int doc = -1;
- private int freq;
-
- public DocsEnum reset(Bits liveDocs, int freq) {
- this.liveDocs = liveDocs;
- hasNext = true;
- doc = -1;
- this.freq = freq;
- return this;
- }
-
- @Override
- public int docID() {
- return doc;
- }
-
- @Override
- public int nextDoc() {
- if (hasNext && (liveDocs == null || liveDocs.get(0))) {
- hasNext = false;
- return doc = 0;
- } else {
- return doc = NO_MORE_DOCS;
- }
- }
-
- @Override
- public int advance(int target) throws IOException {
- return slowAdvance(target);
- }
+ private class MemoryPostingsEnum extends PostingsEnum {
- @Override
- public int freq() throws IOException {
- return freq;
- }
-
- @Override
- public long cost() {
- return 1;
- }
- }
-
- private class MemoryDocsAndPositionsEnum extends DocsAndPositionsEnum {
private final SliceReader sliceReader;
private int posUpto; // for assert
private boolean hasNext;
private Bits liveDocs;
private int doc = -1;
private int freq;
+ private int pos;
private int startOffset;
private int endOffset;
private int payloadIndex;
private final BytesRefBuilder payloadBuilder;//only non-null when storePayloads
- public MemoryDocsAndPositionsEnum() {
+ public MemoryPostingsEnum() {
this.sliceReader = new SliceReader(intBlockPool);
this.payloadBuilder = storePayloads ? new BytesRefBuilder() : null;
}
- public DocsAndPositionsEnum reset(Bits liveDocs, int start, int end, int freq) {
+ public PostingsEnum reset(Bits liveDocs, int start, int end, int freq) {
this.liveDocs = liveDocs;
this.sliceReader.reset(start, end);
posUpto = 0; // for assert
@@ -1089,6 +1037,7 @@ public class MemoryIndex {
@Override
public int nextDoc() {
+ pos = -1;
if (hasNext && (liveDocs == null || liveDocs.get(0))) {
hasNext = false;
return doc = 0;
@@ -1109,10 +1058,12 @@ public class MemoryIndex {
@Override
public int nextPosition() {
- assert posUpto++ < freq;
+ posUpto++;
+ assert posUpto <= freq;
assert !sliceReader.endOfSlice() : " stores offsets : " + startOffset;
int pos = sliceReader.readInt();
if (storeOffsets) {
+ //pos = sliceReader.readInt();
startOffset = sliceReader.readInt();
endOffset = sliceReader.readInt();
}
@@ -1123,6 +1074,16 @@ public class MemoryIndex {
}
@Override
+ public int startPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
public int startOffset() {
return startOffset;
}
diff --git a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
index a507552..2b14858 100644
--- a/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
+++ b/lucene/memory/src/test/org/apache/lucene/index/memory/TestMemoryIndexAgainstRAMDir.java
@@ -42,8 +42,7 @@ import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.CompositeReader;
import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader;
@@ -199,9 +198,9 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
while(iwTermsIter.next() != null) {
assertNotNull(memTermsIter.next());
assertEquals(iwTermsIter.term(), memTermsIter.term());
- DocsAndPositionsEnum iwDocsAndPos = iwTermsIter.docsAndPositions(null, null);
- DocsAndPositionsEnum memDocsAndPos = memTermsIter.docsAndPositions(null, null);
- while(iwDocsAndPos.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS) {
+ PostingsEnum iwDocsAndPos = iwTermsIter.postings(null, null, PostingsEnum.FLAG_ALL);
+ PostingsEnum memDocsAndPos = memTermsIter.postings(null, null, PostingsEnum.FLAG_ALL);
+ while(iwDocsAndPos.nextDoc() != PostingsEnum.NO_MORE_DOCS) {
assertEquals(iwDocsAndPos.docID(), memDocsAndPos.nextDoc());
assertEquals(iwDocsAndPos.freq(), memDocsAndPos.freq());
for (int i = 0; i < iwDocsAndPos.freq(); i++) {
@@ -222,9 +221,9 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
} else {
while(iwTermsIter.next() != null) {
assertEquals(iwTermsIter.term(), memTermsIter.term());
- DocsEnum iwDocsAndPos = iwTermsIter.docs(null, null);
- DocsEnum memDocsAndPos = memTermsIter.docs(null, null);
- while(iwDocsAndPos.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS) {
+ PostingsEnum iwDocsAndPos = iwTermsIter.postings(null, null);
+ PostingsEnum memDocsAndPos = memTermsIter.postings(null, null);
+ while(iwDocsAndPos.nextDoc() != PostingsEnum.NO_MORE_DOCS) {
assertEquals(iwDocsAndPos.docID(), memDocsAndPos.nextDoc());
assertEquals(iwDocsAndPos.freq(), memDocsAndPos.freq());
}
@@ -319,7 +318,7 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
MemoryIndex memory = new MemoryIndex(random().nextBoolean(), false, random().nextInt(50) * 1024 * 1024);
memory.addField("foo", "bar", analyzer);
LeafReader reader = (LeafReader) memory.createSearcher().getIndexReader();
- DocsEnum disi = TestUtil.docs(random(), reader, "foo", new BytesRef("bar"), null, null, DocsEnum.FLAG_NONE);
+ PostingsEnum disi = TestUtil.docs(random(), reader, "foo", new BytesRef("bar"), null, null, PostingsEnum.FLAG_NONE);
int docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -327,7 +326,7 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
// now reuse and check again
TermsEnum te = reader.terms("foo").iterator(null);
assertTrue(te.seekExact(new BytesRef("bar")));
- disi = te.docs(null, disi, DocsEnum.FLAG_NONE);
+ disi = te.postings(null, disi, PostingsEnum.FLAG_NONE);
docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -354,7 +353,7 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
memory.addField("foo", "bar", analyzer);
LeafReader reader = (LeafReader) memory.createSearcher().getIndexReader();
assertEquals(1, reader.terms("foo").getSumTotalTermFreq());
- DocsAndPositionsEnum disi = reader.termPositionsEnum(new Term("foo", "bar"));
+ PostingsEnum disi = reader.termDocsEnum(new Term("foo", "bar"), PostingsEnum.FLAG_ALL);
int docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -365,7 +364,7 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
// now reuse and check again
TermsEnum te = reader.terms("foo").iterator(null);
assertTrue(te.seekExact(new BytesRef("bar")));
- disi = te.docsAndPositions(null, disi);
+ disi = te.postings(null, disi);
docid = disi.docID();
assertEquals(-1, docid);
assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
@@ -426,7 +425,7 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
assertNull(reader.getNumericDocValues("not-in-index"));
assertNull(reader.getNormValues("not-in-index"));
assertNull(reader.termDocsEnum(new Term("not-in-index", "foo")));
- assertNull(reader.termPositionsEnum(new Term("not-in-index", "foo")));
+ assertNull(reader.termDocsEnum(new Term("not-in-index", "foo"), PostingsEnum.FLAG_ALL));
assertNull(reader.terms("not-in-index"));
}
@@ -526,8 +525,8 @@ public class TestMemoryIndexAgainstRAMDir extends BaseTokenStreamTestCase {
assertNotNull(memTermEnum.next());
assertThat(termEnum.totalTermFreq(), equalTo(memTermEnum.totalTermFreq()));
- DocsAndPositionsEnum docsPosEnum = termEnum.docsAndPositions(null, null, 0);
- DocsAndPositionsEnum memDocsPosEnum = memTermEnum.docsAndPositions(null, null, 0);
+ PostingsEnum docsPosEnum = termEnum.postings(null, null, PostingsEnum.FLAG_POSITIONS);
+ PostingsEnum memDocsPosEnum = memTermEnum.postings(null, null, PostingsEnum.FLAG_POSITIONS);
String currentTerm = termEnum.term().utf8ToString();
assertThat("Token mismatch for field: " + field_name, currentTerm, equalTo(memTermEnum.term().utf8ToString()));
diff --git a/lucene/misc/src/java/org/apache/lucene/index/Sorter.java b/lucene/misc/src/java/org/apache/lucene/index/Sorter.java
index 22912bc..93b0eeb 100644
--- a/lucene/misc/src/java/org/apache/lucene/index/Sorter.java
+++ b/lucene/misc/src/java/org/apache/lucene/index/Sorter.java
@@ -20,6 +20,7 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.util.Comparator;
+import org.apache.lucene.search.FakeScorer;
import org.apache.lucene.search.LeafFieldComparator;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;
@@ -258,25 +259,6 @@ final class Sorter {
return getID();
}
- static final Scorer FAKESCORER = new Scorer(null) {
-
- @Override
- public float score() throws IOException { throw new UnsupportedOperationException(); }
-
- @Override
- public int freq() throws IOException { throw new UnsupportedOperationException(); }
-
- @Override
- public int docID() { throw new UnsupportedOperationException(); }
-
- @Override
- public int nextDoc() throws IOException { throw new UnsupportedOperationException(); }
-
- @Override
- public int advance(int target) throws IOException { throw new UnsupportedOperationException(); }
-
- @Override
- public long cost() { throw new UnsupportedOperationException(); }
- };
+ static final Scorer FAKESCORER = new FakeScorer();
}
diff --git a/lucene/misc/src/java/org/apache/lucene/index/SortingLeafReader.java b/lucene/misc/src/java/org/apache/lucene/index/SortingLeafReader.java
index ee6140b..2b23e2a 100644
--- a/lucene/misc/src/java/org/apache/lucene/index/SortingLeafReader.java
+++ b/lucene/misc/src/java/org/apache/lucene/index/SortingLeafReader.java
@@ -20,21 +20,6 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.util.Arrays;
-import org.apache.lucene.index.FilterLeafReader;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.index.FieldInfos;
-import org.apache.lucene.index.Fields;
-import org.apache.lucene.index.NumericDocValues;
-import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.index.SortedNumericDocValues;
-import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.index.StoredFieldVisitor;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.index.Sorter.DocMap;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Sort;
@@ -52,7 +37,7 @@ import org.apache.lucene.util.automaton.CompiledAutomaton;
/**
* An {@link org.apache.lucene.index.LeafReader} which supports sorting documents by a given
* {@link Sort}. You can use this class to sort an index as follows:
- *
+ *
*
* IndexWriter writer; // writer to which the sorted index will be added
* DirectoryReader reader; // reader on the input index
@@ -62,7 +47,7 @@ import org.apache.lucene.util.automaton.CompiledAutomaton;
* writer.close();
* reader.close();
*
- *
+ *
* @lucene.experimental
*/
public class SortingLeafReader extends FilterLeafReader {
@@ -94,7 +79,7 @@ public class SortingLeafReader extends FilterLeafReader {
private final Sorter.DocMap docMap;
private final IndexOptions indexOptions;
-
+
public SortingTerms(final Terms in, IndexOptions indexOptions, final Sorter.DocMap docMap) {
super(in);
this.docMap = docMap;
@@ -118,7 +103,7 @@ public class SortingLeafReader extends FilterLeafReader {
final Sorter.DocMap docMap; // pkg-protected to avoid synthetic accessor methods
private final IndexOptions indexOptions;
-
+
public SortingTermsEnum(final TermsEnum in, Sorter.DocMap docMap, IndexOptions indexOptions) {
super(in);
this.docMap = docMap;
@@ -145,8 +130,35 @@ public class SortingLeafReader extends FilterLeafReader {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, final int flags) throws IOException {
- final DocsEnum inReuse;
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, final int flags) throws IOException {
+
+ if (PostingsEnum.requiresPositions(flags)) {
+ final PostingsEnum inReuse;
+ final SortingPostingsEnum wrapReuse;
+ if (reuse != null && reuse instanceof SortingPostingsEnum) {
+ // if we're asked to reuse the given DocsEnum and it is Sorting, return
+ // the wrapped one, since some Codecs expect it.
+ wrapReuse = (SortingPostingsEnum) reuse;
+ inReuse = wrapReuse.getWrapped();
+ } else {
+ wrapReuse = null;
+ inReuse = reuse;
+ }
+
+ final PostingsEnum inDocsAndPositions = in.postings(newToOld(liveDocs), inReuse, flags);
+ if (inDocsAndPositions == null) {
+ return null;
+ }
+
+ // we ignore the fact that offsets may be stored but not asked for,
+ // since this code is expected to be used during addIndexes which will
+ // ask for everything. if that assumption changes in the future, we can
+ // factor in whether 'flags' says offsets are not required.
+ final boolean storeOffsets = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
+ return new SortingPostingsEnum(docMap.size(), wrapReuse, inDocsAndPositions, docMap, storeOffsets);
+ }
+
+ final PostingsEnum inReuse;
final SortingDocsEnum wrapReuse;
if (reuse != null && reuse instanceof SortingDocsEnum) {
// if we're asked to reuse the given DocsEnum and it is Sorting, return
@@ -158,45 +170,18 @@ public class SortingLeafReader extends FilterLeafReader {
inReuse = reuse;
}
- final DocsEnum inDocs = in.docs(newToOld(liveDocs), inReuse, flags);
- final boolean withFreqs = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS) >=0 && (flags & DocsEnum.FLAG_FREQS) != 0;
+ final PostingsEnum inDocs = in.postings(newToOld(liveDocs), inReuse, flags);
+ final boolean withFreqs = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS) >=0 && (flags & PostingsEnum.FLAG_FREQS) != 0;
return new SortingDocsEnum(docMap.size(), wrapReuse, inDocs, withFreqs, docMap);
}
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, final int flags) throws IOException {
- final DocsAndPositionsEnum inReuse;
- final SortingDocsAndPositionsEnum wrapReuse;
- if (reuse != null && reuse instanceof SortingDocsAndPositionsEnum) {
- // if we're asked to reuse the given DocsEnum and it is Sorting, return
- // the wrapped one, since some Codecs expect it.
- wrapReuse = (SortingDocsAndPositionsEnum) reuse;
- inReuse = wrapReuse.getWrapped();
- } else {
- wrapReuse = null;
- inReuse = reuse;
- }
-
- final DocsAndPositionsEnum inDocsAndPositions = in.docsAndPositions(newToOld(liveDocs), inReuse, flags);
- if (inDocsAndPositions == null) {
- return null;
- }
-
- // we ignore the fact that offsets may be stored but not asked for,
- // since this code is expected to be used during addIndexes which will
- // ask for everything. if that assumption changes in the future, we can
- // factor in whether 'flags' says offsets are not required.
- final boolean storeOffsets = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
- return new SortingDocsAndPositionsEnum(docMap.size(), wrapReuse, inDocsAndPositions, docMap, storeOffsets);
- }
-
}
private static class SortingBinaryDocValues extends BinaryDocValues {
-
+
private final BinaryDocValues in;
private final Sorter.DocMap docMap;
-
+
SortingBinaryDocValues(BinaryDocValues in, Sorter.DocMap docMap) {
this.in = in;
this.docMap = docMap;
@@ -207,7 +192,7 @@ public class SortingLeafReader extends FilterLeafReader {
return in.get(docMap.newToOld(docID));
}
}
-
+
private static class SortingNumericDocValues extends NumericDocValues {
private final NumericDocValues in;
@@ -223,33 +208,33 @@ public class SortingLeafReader extends FilterLeafReader {
return in.get(docMap.newToOld(docID));
}
}
-
+
private static class SortingSortedNumericDocValues extends SortedNumericDocValues {
-
+
private final SortedNumericDocValues in;
private final Sorter.DocMap docMap;
-
+
SortingSortedNumericDocValues(SortedNumericDocValues in, DocMap docMap) {
this.in = in;
this.docMap = docMap;
}
-
+
@Override
public int count() {
return in.count();
}
-
+
@Override
public void setDocument(int doc) {
in.setDocument(docMap.newToOld(doc));
}
-
+
@Override
public long valueAt(int index) {
return in.valueAt(index);
}
}
-
+
private static class SortingBits implements Bits {
private final Bits in;
@@ -270,12 +255,12 @@ public class SortingLeafReader extends FilterLeafReader {
return in.length();
}
}
-
+
private static class SortingSortedDocValues extends SortedDocValues {
-
+
private final SortedDocValues in;
private final Sorter.DocMap docMap;
-
+
SortingSortedDocValues(SortedDocValues in, Sorter.DocMap docMap) {
this.in = in;
this.docMap = docMap;
@@ -306,12 +291,12 @@ public class SortingLeafReader extends FilterLeafReader {
return in.lookupTerm(key);
}
}
-
+
private static class SortingSortedSetDocValues extends SortedSetDocValues {
-
+
private final SortedSetDocValues in;
private final Sorter.DocMap docMap;
-
+
SortingSortedSetDocValues(SortedSetDocValues in, Sorter.DocMap docMap) {
this.in = in;
this.docMap = docMap;
@@ -344,14 +329,14 @@ public class SortingLeafReader extends FilterLeafReader {
}
static class SortingDocsEnum extends FilterDocsEnum {
-
+
private static final class DocFreqSorter extends TimSorter {
-
+
private int[] docs;
private int[] freqs;
private final int[] tmpDocs;
private int[] tmpFreqs;
-
+
public DocFreqSorter(int maxDoc) {
super(maxDoc / 64);
this.tmpDocs = new int[maxDoc / 64];
@@ -369,13 +354,13 @@ public class SortingLeafReader extends FilterLeafReader {
protected int compare(int i, int j) {
return docs[i] - docs[j];
}
-
+
@Override
protected void swap(int i, int j) {
int tmpDoc = docs[i];
docs[i] = docs[j];
docs[j] = tmpDoc;
-
+
if (freqs != null) {
int tmpFreq = freqs[i];
freqs[i] = freqs[j];
@@ -421,7 +406,7 @@ public class SortingLeafReader extends FilterLeafReader {
private final int upto;
private final boolean withFreqs;
- SortingDocsEnum(int maxDoc, SortingDocsEnum reuse, final DocsEnum in, boolean withFreqs, final Sorter.DocMap docMap) throws IOException {
+ SortingDocsEnum(int maxDoc, SortingDocsEnum reuse, final PostingsEnum in, boolean withFreqs, final Sorter.DocMap docMap) throws IOException {
super(in);
this.maxDoc = maxDoc;
this.withFreqs = withFreqs;
@@ -470,7 +455,7 @@ public class SortingLeafReader extends FilterLeafReader {
}
// for testing
- boolean reused(DocsEnum other) {
+ boolean reused(PostingsEnum other) {
if (other == null || !(other instanceof SortingDocsEnum)) {
return false;
}
@@ -483,43 +468,43 @@ public class SortingLeafReader extends FilterLeafReader {
// don't bother to implement efficiently for now.
return slowAdvance(target);
}
-
+
@Override
public int docID() {
return docIt < 0 ? -1 : docIt >= upto ? NO_MORE_DOCS : docs[docIt];
}
-
+
@Override
public int freq() throws IOException {
return withFreqs && docIt < upto ? freqs[docIt] : 1;
}
-
+
@Override
public int nextDoc() throws IOException {
if (++docIt >= upto) return NO_MORE_DOCS;
return docs[docIt];
}
-
- /** Returns the wrapped {@link DocsEnum}. */
- DocsEnum getWrapped() {
+
+ /** Returns the wrapped {@link PostingsEnum}. */
+ PostingsEnum getWrapped() {
return in;
}
}
-
- static class SortingDocsAndPositionsEnum extends FilterDocsAndPositionsEnum {
-
+
+ static class SortingPostingsEnum extends FilterDocsEnum {
+
/**
* A {@link TimSorter} which sorts two parallel arrays of doc IDs and
* offsets in one go. Everytime a doc ID is 'swapped', its corresponding offset
* is swapped too.
*/
private static final class DocOffsetSorter extends TimSorter {
-
+
private int[] docs;
private long[] offsets;
private final int[] tmpDocs;
private final long[] tmpOffsets;
-
+
public DocOffsetSorter(int maxDoc) {
super(maxDoc / 64);
this.tmpDocs = new int[maxDoc / 64];
@@ -535,13 +520,13 @@ public class SortingLeafReader extends FilterLeafReader {
protected int compare(int i, int j) {
return docs[i] - docs[j];
}
-
+
@Override
protected void swap(int i, int j) {
int tmpDoc = docs[i];
docs[i] = docs[j];
docs[j] = tmpDoc;
-
+
long tmpOffset = offsets[i];
offsets[i] = offsets[j];
offsets[j] = tmpOffset;
@@ -570,16 +555,16 @@ public class SortingLeafReader extends FilterLeafReader {
return tmpDocs[i] - docs[j];
}
}
-
+
private final int maxDoc;
private final DocOffsetSorter sorter;
private int[] docs;
private long[] offsets;
private final int upto;
-
+
private final IndexInput postingInput;
private final boolean storeOffsets;
-
+
private int docIt = -1;
private int pos;
private int startOffset = -1;
@@ -589,7 +574,7 @@ public class SortingLeafReader extends FilterLeafReader {
private final RAMFile file;
- SortingDocsAndPositionsEnum(int maxDoc, SortingDocsAndPositionsEnum reuse, final DocsAndPositionsEnum in, Sorter.DocMap docMap, boolean storeOffsets) throws IOException {
+ SortingPostingsEnum(int maxDoc, SortingPostingsEnum reuse, final PostingsEnum in, Sorter.DocMap docMap, boolean storeOffsets) throws IOException {
super(in);
this.maxDoc = maxDoc;
this.storeOffsets = storeOffsets;
@@ -632,14 +617,14 @@ public class SortingLeafReader extends FilterLeafReader {
}
// for testing
- boolean reused(DocsAndPositionsEnum other) {
- if (other == null || !(other instanceof SortingDocsAndPositionsEnum)) {
+ boolean reused(PostingsEnum other) {
+ if (other == null || !(other instanceof SortingPostingsEnum)) {
return false;
}
- return docs == ((SortingDocsAndPositionsEnum) other).docs;
+ return docs == ((SortingPostingsEnum) other).docs;
}
- private void addPositions(final DocsAndPositionsEnum in, final IndexOutput out) throws IOException {
+ private void addPositions(final PostingsEnum in, final IndexOutput out) throws IOException {
int freq = in.freq();
out.writeVInt(freq);
int previousPosition = 0;
@@ -648,7 +633,7 @@ public class SortingLeafReader extends FilterLeafReader {
final int pos = in.nextPosition();
final BytesRef payload = in.getPayload();
// The low-order bit of token is set only if there is a payload, the
- // previous bits are the delta-encoded position.
+ // previous bits are the delta-encoded position.
final int token = (pos - previousPosition) << 1 | (payload == null ? 0 : 1);
out.writeVInt(token);
previousPosition = pos;
@@ -665,34 +650,34 @@ public class SortingLeafReader extends FilterLeafReader {
}
}
}
-
+
@Override
public int advance(final int target) throws IOException {
// need to support it for checkIndex, but in practice it won't be called, so
// don't bother to implement efficiently for now.
return slowAdvance(target);
}
-
+
@Override
public int docID() {
return docIt < 0 ? -1 : docIt >= upto ? NO_MORE_DOCS : docs[docIt];
}
-
+
@Override
public int endOffset() throws IOException {
return endOffset;
}
-
+
@Override
public int freq() throws IOException {
return currFreq;
}
-
+
@Override
public BytesRef getPayload() throws IOException {
return payload.length == 0 ? null : payload;
}
-
+
@Override
public int nextDoc() throws IOException {
if (++docIt >= upto) return DocIdSetIterator.NO_MORE_DOCS;
@@ -703,7 +688,7 @@ public class SortingLeafReader extends FilterLeafReader {
endOffset = 0;
return docs[docIt];
}
-
+
@Override
public int nextPosition() throws IOException {
final int token = postingInput.readVInt();
@@ -724,14 +709,14 @@ public class SortingLeafReader extends FilterLeafReader {
}
return pos;
}
-
+
@Override
public int startOffset() throws IOException {
return startOffset;
}
- /** Returns the wrapped {@link DocsAndPositionsEnum}. */
- DocsAndPositionsEnum getWrapped() {
+ /** Returns the wrapped {@link PostingsEnum}. */
+ PostingsEnum getWrapped() {
return in;
}
}
@@ -767,12 +752,12 @@ public class SortingLeafReader extends FilterLeafReader {
public void document(final int docID, final StoredFieldVisitor visitor) throws IOException {
in.document(docMap.newToOld(docID), visitor);
}
-
+
@Override
public Fields fields() throws IOException {
return new SortingFields(in.fields(), in.getFieldInfos(), docMap);
}
-
+
@Override
public BinaryDocValues getBinaryDocValues(String field) throws IOException {
BinaryDocValues oldDocValues = in.getBinaryDocValues(field);
@@ -782,7 +767,7 @@ public class SortingLeafReader extends FilterLeafReader {
return new SortingBinaryDocValues(oldDocValues, docMap);
}
}
-
+
@Override
public Bits getLiveDocs() {
final Bits inLiveDocs = in.getLiveDocs();
@@ -792,7 +777,7 @@ public class SortingLeafReader extends FilterLeafReader {
return new SortingBits(inLiveDocs, docMap);
}
}
-
+
@Override
public NumericDocValues getNormValues(String field) throws IOException {
final NumericDocValues norm = in.getNormValues(field);
@@ -809,7 +794,7 @@ public class SortingLeafReader extends FilterLeafReader {
if (oldDocValues == null) return null;
return new SortingNumericDocValues(oldDocValues, docMap);
}
-
+
@Override
public SortedNumericDocValues getSortedNumericDocValues(String field)
throws IOException {
@@ -830,7 +815,7 @@ public class SortingLeafReader extends FilterLeafReader {
return new SortingSortedDocValues(sortedDV, docMap);
}
}
-
+
@Override
public SortedSetDocValues getSortedSetDocValues(String field) throws IOException {
SortedSetDocValues sortedSetDV = in.getSortedSetDocValues(field);
@@ -838,7 +823,7 @@ public class SortingLeafReader extends FilterLeafReader {
return null;
} else {
return new SortingSortedSetDocValues(sortedSetDV, docMap);
- }
+ }
}
@Override
diff --git a/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java b/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java
index 6b6eb8f..aebb33e 100644
--- a/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java
+++ b/lucene/misc/src/java/org/apache/lucene/uninverting/DocTermOrds.java
@@ -25,8 +25,7 @@ import java.util.List;
import org.apache.lucene.codecs.PostingsFormat; // javadocs
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.SortedSetDocValues;
@@ -165,7 +164,7 @@ public class DocTermOrds implements Accountable {
protected int ordBase;
/** Used while uninverting. */
- protected DocsEnum docsEnum;
+ protected PostingsEnum postingsEnum;
/** Returns total bytes used. */
public long ramBytesUsed() {
@@ -326,7 +325,7 @@ public class DocTermOrds implements Accountable {
// frequent terms ahead of time.
int termNum = 0;
- docsEnum = null;
+ postingsEnum = null;
// Loop begins with te positioned to first term (we call
// seek above):
@@ -366,13 +365,13 @@ public class DocTermOrds implements Accountable {
final int df = te.docFreq();
if (df <= maxTermDocFreq) {
- docsEnum = te.docs(liveDocs, docsEnum, DocsEnum.FLAG_NONE);
+ postingsEnum = te.postings(liveDocs, postingsEnum, PostingsEnum.FLAG_NONE);
// dF, but takes deletions into account
int actualDF = 0;
for (;;) {
- int doc = docsEnum.nextDoc();
+ int doc = postingsEnum.nextDoc();
if (doc == DocIdSetIterator.NO_MORE_DOCS) {
break;
}
@@ -613,13 +612,8 @@ public class DocTermOrds implements Accountable {
}
@Override
- public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
- return termsEnum.docs(liveDocs, reuse, flags);
- }
-
- @Override
- public DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- return termsEnum.docsAndPositions(liveDocs, reuse, flags);
+ public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
+ return termsEnum.postings(liveDocs, reuse, flags);
}
@Override
diff --git a/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java b/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java
index 6b18fbf..b7f98f1 100644
--- a/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java
+++ b/lucene/misc/src/java/org/apache/lucene/uninverting/FieldCacheImpl.java
@@ -30,7 +30,7 @@ import java.util.WeakHashMap;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.LeafReader;
@@ -280,7 +280,7 @@ class FieldCacheImpl implements FieldCache {
final TermsEnum termsEnum = termsEnum(terms);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
FixedBitSet docsWithField = null;
while(true) {
final BytesRef term = termsEnum.next();
@@ -288,7 +288,7 @@ class FieldCacheImpl implements FieldCache {
break;
}
visitTerm(term);
- docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
+ docs = termsEnum.postings(null, docs, PostingsEnum.FLAG_NONE);
while (true) {
final int docID = docs.nextDoc();
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
@@ -408,7 +408,7 @@ class FieldCacheImpl implements FieldCache {
return new BitsEntry(new Bits.MatchAllBits(maxDoc));
}
final TermsEnum termsEnum = terms.iterator(null);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while(true) {
final BytesRef term = termsEnum.next();
if (term == null) {
@@ -419,7 +419,7 @@ class FieldCacheImpl implements FieldCache {
res = new FixedBitSet(maxDoc);
}
- docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
+ docs = termsEnum.postings(null, docs, PostingsEnum.FLAG_NONE);
// TODO: use bulk API
while (true) {
final int docID = docs.nextDoc();
@@ -686,7 +686,7 @@ class FieldCacheImpl implements FieldCache {
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while(true) {
final BytesRef term = termsEnum.next();
@@ -698,7 +698,7 @@ class FieldCacheImpl implements FieldCache {
}
termOrdToBytesOffset.add(bytes.copyUsingLengthPrefix(term));
- docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
+ docs = termsEnum.postings(null, docs, PostingsEnum.FLAG_NONE);
while (true) {
final int docID = docs.nextDoc();
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
@@ -836,7 +836,7 @@ class FieldCacheImpl implements FieldCache {
if (terms != null) {
int termCount = 0;
final TermsEnum termsEnum = terms.iterator(null);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while(true) {
if (termCount++ == termCountHardLimit) {
// app is misusing the API (there is more than
@@ -850,7 +850,7 @@ class FieldCacheImpl implements FieldCache {
break;
}
final long pointer = bytes.copyUsingLengthPrefix(term);
- docs = termsEnum.docs(null, docs, DocsEnum.FLAG_NONE);
+ docs = termsEnum.postings(null, docs, PostingsEnum.FLAG_NONE);
while (true) {
final int docID = docs.nextDoc();
if (docID == DocIdSetIterator.NO_MORE_DOCS) {
diff --git a/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java b/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java
index 30b0be7..c6596cc 100644
--- a/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java
+++ b/lucene/misc/src/test/org/apache/lucene/index/SorterTestBase.java
@@ -31,8 +31,8 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.document.BinaryDocValuesField;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedDocValuesField;
@@ -40,27 +40,8 @@ import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.document.SortedSetDocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
-import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.index.DirectoryReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.FieldInvertState;
-import org.apache.lucene.index.IndexOptions;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.LeafReader;
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.NumericDocValues;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.SlowCompositeReaderWrapper;
-import org.apache.lucene.index.SortedDocValues;
-import org.apache.lucene.index.SortedNumericDocValues;
-import org.apache.lucene.index.SortedSetDocValues;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.Terms;
-import org.apache.lucene.index.TermsEnum.SeekStatus;
-import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.index.SortingLeafReader.SortingDocsAndPositionsEnum;
import org.apache.lucene.index.SortingLeafReader.SortingDocsEnum;
+import org.apache.lucene.index.TermsEnum.SeekStatus;
import org.apache.lucene.search.CollectionStatistics;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.TermStatistics;
@@ -254,7 +235,7 @@ public abstract class SorterTestBase extends LuceneTestCase {
public void testDocsAndPositionsEnum() throws Exception {
TermsEnum termsEnum = sortedReader.terms(DOC_POSITIONS_FIELD).iterator(null);
assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef(DOC_POSITIONS_TERM)));
- DocsAndPositionsEnum sortedPositions = termsEnum.docsAndPositions(null, null);
+ PostingsEnum sortedPositions = termsEnum.postings(null, null, PostingsEnum.FLAG_ALL);
int doc;
// test nextDoc()
@@ -270,10 +251,10 @@ public abstract class SorterTestBase extends LuceneTestCase {
}
// test advance()
- final DocsAndPositionsEnum reuse = sortedPositions;
- sortedPositions = termsEnum.docsAndPositions(null, reuse);
- if (sortedPositions instanceof SortingDocsAndPositionsEnum) {
- assertTrue(((SortingDocsAndPositionsEnum) sortedPositions).reused(reuse)); // make sure reuse worked
+ final PostingsEnum reuse = sortedPositions;
+ sortedPositions = termsEnum.postings(null, reuse, PostingsEnum.FLAG_ALL);
+ if (sortedPositions instanceof SortingDocsEnum) {
+ assertTrue(((SortingDocsEnum) sortedPositions).reused(reuse)); // make sure reuse worked
}
doc = 0;
while ((doc = sortedPositions.advance(doc + TestUtil.nextInt(random(), 1, 5))) != DocIdSetIterator.NO_MORE_DOCS) {
@@ -315,7 +296,7 @@ public abstract class SorterTestBase extends LuceneTestCase {
Bits mappedLiveDocs = randomLiveDocs(sortedReader.maxDoc());
TermsEnum termsEnum = sortedReader.terms(DOCS_ENUM_FIELD).iterator(null);
assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef(DOCS_ENUM_TERM)));
- DocsEnum docs = termsEnum.docs(mappedLiveDocs, null);
+ PostingsEnum docs = termsEnum.postings(mappedLiveDocs, null);
int doc;
int prev = -1;
@@ -330,8 +311,8 @@ public abstract class SorterTestBase extends LuceneTestCase {
assertFalse("document " + prev + " not marked as deleted", mappedLiveDocs == null || mappedLiveDocs.get(prev));
}
- DocsEnum reuse = docs;
- docs = termsEnum.docs(mappedLiveDocs, reuse);
+ PostingsEnum reuse = docs;
+ docs = termsEnum.postings(mappedLiveDocs, reuse);
if (docs instanceof SortingDocsEnum) {
assertTrue(((SortingDocsEnum) docs).reused(reuse)); // make sure reuse worked
}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
index 3aae0cc..00cc97c 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/CustomScoreQuery.java
@@ -18,23 +18,24 @@ package org.apache.lucene.queries;
*/
import java.io.IOException;
+import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Set;
-import java.util.Arrays;
-import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.function.FunctionQuery;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.ToStringUtils;
/**
@@ -234,14 +235,14 @@ public class CustomScoreQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
- Scorer subQueryScorer = subQueryWeight.scorer(context, acceptDocs);
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
+ Scorer subQueryScorer = subQueryWeight.scorer(context, flags, acceptDocs);
if (subQueryScorer == null) {
return null;
}
Scorer[] valSrcScorers = new Scorer[valSrcWeights.length];
for(int i = 0; i < valSrcScorers.length; i++) {
- valSrcScorers[i] = valSrcWeights[i].scorer(context, acceptDocs);
+ valSrcScorers[i] = valSrcWeights[i].scorer(context, flags, acceptDocs);
}
return new CustomScorer(CustomScoreQuery.this.getCustomScoreProvider(context), this, queryWeight, subQueryScorer, valSrcScorers);
}
@@ -286,6 +287,8 @@ public class CustomScoreQuery extends Query {
private final CustomScoreProvider provider;
private final float[] vScores; // reused in score() to avoid allocating this array for each doc
+ // TODO : can we use FilterScorer here instead?
+
// constructor
private CustomScorer(CustomScoreProvider provider, CustomWeight w, float qWeight,
Scorer subQueryScorer, Scorer[] valSrcScorers) {
@@ -328,6 +331,36 @@ public class CustomScoreQuery extends Query {
}
@Override
+ public int nextPosition() throws IOException {
+ return subQueryScorer.nextPosition();
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return subQueryScorer.startPosition();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return subQueryScorer.endPosition();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return subQueryScorer.startOffset();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return subQueryScorer.endOffset();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return subQueryScorer.getPayload();
+ }
+
+ @Override
public Collection getChildren() {
return Collections.singleton(new ChildScorer(subQueryScorer, "CUSTOM"));
}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/TermFilter.java b/lucene/queries/src/java/org/apache/lucene/queries/TermFilter.java
index c075984..a6be527 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/TermFilter.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/TermFilter.java
@@ -18,7 +18,7 @@ package org.apache.lucene.queries;
*/
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
@@ -69,7 +69,7 @@ final public class TermFilter extends Filter {
return new DocIdSet() {
@Override
public DocIdSetIterator iterator() throws IOException {
- return termsEnum.docs(acceptDocs, null, DocsEnum.FLAG_NONE);
+ return termsEnum.postings(acceptDocs, null, PostingsEnum.FLAG_NONE);
}
@Override
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/TermsFilter.java b/lucene/queries/src/java/org/apache/lucene/queries/TermsFilter.java
index b9b7e0f..8c07d68 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/TermsFilter.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/TermsFilter.java
@@ -24,7 +24,7 @@ import java.util.Collections;
import java.util.Iterator;
import java.util.List;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
@@ -199,7 +199,7 @@ public final class TermsFilter extends Filter implements Accountable {
final BytesRef spare = new BytesRef(this.termsBytes);
Terms terms = null;
TermsEnum termsEnum = null;
- DocsEnum docs = null;
+ PostingsEnum docs = null;
for (TermsAndField termsAndField : this.termsAndFields) {
if ((terms = fields.terms(termsAndField.field)) != null) {
termsEnum = terms.iterator(termsEnum); // this won't return null
@@ -207,7 +207,7 @@ public final class TermsFilter extends Filter implements Accountable {
spare.offset = offsets[i];
spare.length = offsets[i+1] - offsets[i];
if (termsEnum.seekExact(spare)) {
- docs = termsEnum.docs(acceptDocs, docs, DocsEnum.FLAG_NONE); // no freq since we don't need them
+ docs = termsEnum.postings(acceptDocs, docs, PostingsEnum.FLAG_NONE); // no freq since we don't need them
builder.or(docs);
}
}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java
index 90bb2e0..cb46dec 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/BoostedQuery.java
@@ -17,18 +17,24 @@ package org.apache.lucene.queries.function;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.search.*;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.ToStringUtils;
-
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
-import java.util.Set;
import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.FilterScorer;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ToStringUtils;
/**
* Query that is boosted by a ValueSource
@@ -97,8 +103,8 @@ public class BoostedQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
- Scorer subQueryScorer = qWeight.scorer(context, acceptDocs);
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
+ Scorer subQueryScorer = qWeight.scorer(context, flags, acceptDocs);
if (subQueryScorer == null) {
return null;
}
@@ -122,41 +128,24 @@ public class BoostedQuery extends Query {
}
- private class CustomScorer extends Scorer {
+ private class CustomScorer extends FilterScorer {
private final BoostedQuery.BoostedWeight weight;
private final float qWeight;
- private final Scorer scorer;
private final FunctionValues vals;
private final LeafReaderContext readerContext;
private CustomScorer(LeafReaderContext readerContext, BoostedQuery.BoostedWeight w, float qWeight,
Scorer scorer, ValueSource vs) throws IOException {
- super(w);
+ super(scorer);
this.weight = w;
this.qWeight = qWeight;
- this.scorer = scorer;
this.readerContext = readerContext;
this.vals = vs.getValues(weight.fcontext, readerContext);
}
- @Override
- public int docID() {
- return scorer.docID();
- }
-
- @Override
- public int advance(int target) throws IOException {
- return scorer.advance(target);
- }
-
- @Override
- public int nextDoc() throws IOException {
- return scorer.nextDoc();
- }
-
@Override
public float score() throws IOException {
- float score = qWeight * scorer.score() * vals.floatVal(scorer.docID());
+ float score = qWeight * in.score() * vals.floatVal(in.docID());
// Current Lucene priority queues can't handle NaN and -Infinity, so
// map to -Float.MAX_VALUE. This conditional handles both -infinity
@@ -165,13 +154,8 @@ public class BoostedQuery extends Query {
}
@Override
- public int freq() throws IOException {
- return scorer.freq();
- }
-
- @Override
public Collection getChildren() {
- return Collections.singleton(new ChildScorer(scorer, "CUSTOM"));
+ return Collections.singleton(new ChildScorer(in, "CUSTOM"));
}
public Explanation explain(int doc) throws IOException {
@@ -187,10 +171,6 @@ public class BoostedQuery extends Query {
return res;
}
- @Override
- public long cost() {
- return scorer.cost();
- }
}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java
index 4abc312..b67bfb3 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionQuery.java
@@ -17,15 +17,22 @@ package org.apache.lucene.queries.function;
* limitations under the License.
*/
-import org.apache.lucene.index.LeafReaderContext;
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.ComplexExplanation;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
-
-import java.io.IOException;
-import java.util.Set;
-import java.util.Map;
+import org.apache.lucene.util.BytesRef;
/**
@@ -89,13 +96,13 @@ public class FunctionQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
return new AllScorer(context, acceptDocs, this, queryWeight);
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
- return ((AllScorer)scorer(context, context.reader().getLiveDocs())).explain(doc);
+ return ((AllScorer)scorer(context, PostingsEnum.FLAG_FREQS, context.reader().getLiveDocs())).explain(doc);
}
}
@@ -166,6 +173,36 @@ public class FunctionQuery extends Query {
return 1;
}
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
public Explanation explain(int doc) throws IOException {
float sc = qWeight * vals.floatVal(doc);
@@ -177,6 +214,7 @@ public class FunctionQuery extends Query {
result.addDetail(new Explanation(weight.queryNorm,"queryNorm"));
return result;
}
+
}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java b/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java
index ec8aced..4a54151 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/ValueSourceScorer.java
@@ -17,12 +17,13 @@ package org.apache.lucene.queries.function;
* limitations under the License.
*/
+import java.io.IOException;
+
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.util.Bits;
-
-import java.io.IOException;
+import org.apache.lucene.util.BytesRef;
/**
* {@link Scorer} which returns the result of {@link FunctionValues#floatVal(int)} as
@@ -93,6 +94,36 @@ public class ValueSourceScorer extends Scorer {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public long cost() {
return maxDoc;
}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java
index 3d57315..00e6fb7 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/QueryValueSource.java
@@ -17,12 +17,16 @@
package org.apache.lucene.queries.function.valuesource;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.docvalues.FloatDocValues;
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.mutable.MutableValue;
import org.apache.lucene.util.mutable.MutableValueFloat;
@@ -123,7 +127,7 @@ class QueryDocValues extends FloatDocValues {
try {
if (doc < lastDocRequested) {
if (noMatches) return defVal;
- scorer = weight.scorer(readerContext, acceptDocs);
+ scorer = weight.scorer(readerContext, PostingsEnum.FLAG_FREQS, acceptDocs);
if (scorer==null) {
noMatches = true;
return defVal;
@@ -154,7 +158,7 @@ class QueryDocValues extends FloatDocValues {
try {
if (doc < lastDocRequested) {
if (noMatches) return false;
- scorer = weight.scorer(readerContext, acceptDocs);
+ scorer = weight.scorer(readerContext, PostingsEnum.FLAG_FREQS, acceptDocs);
scorerDoc = -1;
if (scorer==null) {
noMatches = true;
@@ -212,7 +216,7 @@ class QueryDocValues extends FloatDocValues {
mval.exists = false;
return;
}
- scorer = weight.scorer(readerContext, acceptDocs);
+ scorer = weight.scorer(readerContext, PostingsEnum.FLAG_FREQS, acceptDocs);
scorerDoc = -1;
if (scorer==null) {
noMatches = true;
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
index 4d73d55..4a91ffb 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TFValueSource.java
@@ -17,7 +17,14 @@ package org.apache.lucene.queries.function.valuesource;
* limitations under the License.
*/
-import org.apache.lucene.index.*;
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.docvalues.FloatDocValues;
import org.apache.lucene.search.DocIdSetIterator;
@@ -25,9 +32,6 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.similarities.TFIDFSimilarity;
import org.apache.lucene.util.BytesRef;
-import java.io.IOException;
-import java.util.Map;
-
/**
* Function that returns {@link TFIDFSimilarity#tf(float)}
* for every document.
@@ -56,7 +60,7 @@ public class TFValueSource extends TermFreqValueSource {
}
return new FloatDocValues(this) {
- DocsEnum docs ;
+ PostingsEnum docs ;
int atDoc;
int lastDocRequested = -1;
@@ -68,7 +72,7 @@ public class TFValueSource extends TermFreqValueSource {
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(indexedBytes)) {
- docs = termsEnum.docs(null, null);
+ docs = termsEnum.postings(null, null);
} else {
docs = null;
}
@@ -77,13 +81,43 @@ public class TFValueSource extends TermFreqValueSource {
}
if (docs == null) {
- docs = new DocsEnum() {
+ docs = new PostingsEnum() {
@Override
public int freq() {
return 0;
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int docID() {
return DocIdSetIterator.NO_MORE_DOCS;
}
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java
index b5e4bc2..f865013 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/valuesource/TermFreqValueSource.java
@@ -17,17 +17,21 @@
package org.apache.lucene.queries.function.valuesource;
-import org.apache.lucene.index.*;
+import java.io.IOException;
+import java.util.Map;
+
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.docvalues.IntDocValues;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.util.BytesRef;
-import java.io.IOException;
-import java.util.Map;
-
/**
- * Function that returns {@link DocsEnum#freq()} for the
+ * Function that returns {@link org.apache.lucene.index.PostingsEnum#freq()} for the
* supplied term in every document.
*
* If the term does not exist in the document, returns 0.
@@ -49,7 +53,7 @@ public class TermFreqValueSource extends DocFreqValueSource {
final Terms terms = fields.terms(indexedField);
return new IntDocValues(this) {
- DocsEnum docs ;
+ PostingsEnum docs ;
int atDoc;
int lastDocRequested = -1;
@@ -61,7 +65,7 @@ public class TermFreqValueSource extends DocFreqValueSource {
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(indexedBytes)) {
- docs = termsEnum.docs(null, null);
+ docs = termsEnum.postings(null, null);
} else {
docs = null;
}
@@ -70,13 +74,43 @@ public class TermFreqValueSource extends DocFreqValueSource {
}
if (docs == null) {
- docs = new DocsEnum() {
+ docs = new PostingsEnum() {
@Override
public int freq() {
return 0;
}
@Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
public int docID() {
return DocIdSetIterator.NO_MORE_DOCS;
}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsReader.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsReader.java
index b716acc..718ea39 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsReader.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsReader.java
@@ -22,8 +22,7 @@ import java.io.IOException;
import org.apache.lucene.codecs.BlockTermState;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.PostingsReaderBase;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.store.DataInput;
@@ -64,9 +63,22 @@ final class IDVersionPostingsReader extends PostingsReaderBase {
}
@Override
- public DocsEnum docs(FieldInfo fieldInfo, BlockTermState termState, Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum docs(FieldInfo fieldInfo, BlockTermState termState, Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
SingleDocsEnum docsEnum;
+ if (PostingsEnum.requiresPositions(flags)) {
+ SinglePostingsEnum posEnum;
+
+ if (reuse instanceof SinglePostingsEnum) {
+ posEnum = (SinglePostingsEnum) reuse;
+ } else {
+ posEnum = new SinglePostingsEnum();
+ }
+ IDVersionTermState _termState = (IDVersionTermState) termState;
+ posEnum.reset(_termState.docID, _termState.idVersion, liveDocs);
+ return posEnum;
+ }
+
if (reuse instanceof SingleDocsEnum) {
docsEnum = (SingleDocsEnum) reuse;
} else {
@@ -78,21 +90,6 @@ final class IDVersionPostingsReader extends PostingsReaderBase {
}
@Override
- public DocsAndPositionsEnum docsAndPositions(FieldInfo fieldInfo, BlockTermState _termState, Bits liveDocs,
- DocsAndPositionsEnum reuse, int flags) {
- SingleDocsAndPositionsEnum posEnum;
-
- if (reuse instanceof SingleDocsAndPositionsEnum) {
- posEnum = (SingleDocsAndPositionsEnum) reuse;
- } else {
- posEnum = new SingleDocsAndPositionsEnum();
- }
- IDVersionTermState termState = (IDVersionTermState) _termState;
- posEnum.reset(termState.docID, termState.idVersion, liveDocs);
- return posEnum;
- }
-
- @Override
public long ramBytesUsed() {
return 0;
}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionSegmentTermsEnum.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionSegmentTermsEnum.java
index 481e74d..a89655e 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionSegmentTermsEnum.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionSegmentTermsEnum.java
@@ -21,9 +21,7 @@ import java.io.IOException;
import java.io.PrintStream;
import org.apache.lucene.codecs.BlockTermState;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
-import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.ByteArrayDataInput;
@@ -997,7 +995,7 @@ public final class IDVersionSegmentTermsEnum extends TermsEnum {
}
@Override
- public DocsEnum docs(Bits skipDocs, DocsEnum reuse, int flags) throws IOException {
+ public PostingsEnum postings(Bits skipDocs, PostingsEnum reuse, int flags) throws IOException {
assert !eof;
//if (DEBUG) {
//System.out.println("BTTR.docs seg=" + segment);
@@ -1010,18 +1008,6 @@ public final class IDVersionSegmentTermsEnum extends TermsEnum {
}
@Override
- public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (fr.fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- // Positions were not indexed:
- return null;
- }
-
- assert !eof;
- currentFrame.decodeMetaData();
- return fr.parent.postingsReader.docsAndPositions(fr.fieldInfo, currentFrame.state, skipDocs, reuse, flags);
- }
-
- @Override
public void seekExact(BytesRef target, TermState otherState) {
// if (DEBUG) {
// System.out.println("BTTR.seekExact termState seg=" + segment + " target=" + target.utf8ToString() + " " + target + " state=" + otherState);
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsAndPositionsEnum.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsAndPositionsEnum.java
deleted file mode 100644
index eecc700..0000000
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsAndPositionsEnum.java
+++ /dev/null
@@ -1,105 +0,0 @@
-package org.apache.lucene.codecs.idversion;
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.util.Bits;
-import org.apache.lucene.util.BytesRef;
-
-class SingleDocsAndPositionsEnum extends DocsAndPositionsEnum {
- private int doc;
- private int pos;
- private int singleDocID;
- private Bits liveDocs;
- private long version;
- private final BytesRef payload;
-
- public SingleDocsAndPositionsEnum() {
- payload = new BytesRef(8);
- payload.length = 8;
- }
-
- /** For reuse */
- public void reset(int singleDocID, long version, Bits liveDocs) {
- doc = -1;
- this.liveDocs = liveDocs;
- this.singleDocID = singleDocID;
- this.version = version;
- }
-
- @Override
- public int nextDoc() {
- if (doc == -1 && (liveDocs == null || liveDocs.get(singleDocID))) {
- doc = singleDocID;
- } else {
- doc = NO_MORE_DOCS;
- }
- pos = -1;
-
- return doc;
- }
-
- @Override
- public int docID() {
- return doc;
- }
-
- @Override
- public int advance(int target) {
- if (doc == -1 && target <= singleDocID && (liveDocs == null || liveDocs.get(singleDocID))) {
- doc = singleDocID;
- pos = -1;
- } else {
- doc = NO_MORE_DOCS;
- }
- return doc;
- }
-
- @Override
- public long cost() {
- return 1;
- }
-
- @Override
- public int freq() {
- return 1;
- }
-
- @Override
- public int nextPosition() {
- assert pos == -1;
- pos = 0;
- IDVersionPostingsFormat.longToBytes(version, payload);
- return pos;
- }
-
- @Override
- public BytesRef getPayload() {
- return payload;
- }
-
- @Override
- public int startOffset() {
- return -1;
- }
-
- @Override
- public int endOffset() {
- return -1;
- }
-}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsEnum.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsEnum.java
index b29619c..4b307e0 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsEnum.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SingleDocsEnum.java
@@ -17,10 +17,13 @@ package org.apache.lucene.codecs.idversion;
* limitations under the License.
*/
-import org.apache.lucene.index.DocsEnum;
+import java.io.IOException;
+
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
-class SingleDocsEnum extends DocsEnum {
+class SingleDocsEnum extends PostingsEnum {
private int doc;
private int singleDocID;
@@ -68,4 +71,34 @@ class SingleDocsEnum extends DocsEnum {
public int freq() {
return 1;
}
+
+ @Override
+ public int nextPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return -1;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SinglePostingsEnum.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SinglePostingsEnum.java
new file mode 100644
index 0000000..c418a52
--- /dev/null
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/SinglePostingsEnum.java
@@ -0,0 +1,117 @@
+package org.apache.lucene.codecs.idversion;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+
+class SinglePostingsEnum extends PostingsEnum {
+ private int doc;
+ private int pos;
+ private int singleDocID;
+ private Bits liveDocs;
+ private long version;
+ private final BytesRef payload;
+
+ public SinglePostingsEnum() {
+ payload = new BytesRef(8);
+ payload.length = 8;
+ }
+
+ /** For reuse */
+ public void reset(int singleDocID, long version, Bits liveDocs) {
+ doc = -1;
+ this.liveDocs = liveDocs;
+ this.singleDocID = singleDocID;
+ this.version = version;
+ }
+
+ @Override
+ public int nextDoc() {
+ if (doc == -1 && (liveDocs == null || liveDocs.get(singleDocID))) {
+ doc = singleDocID;
+ } else {
+ doc = NO_MORE_DOCS;
+ }
+ pos = -1;
+
+ return doc;
+ }
+
+ @Override
+ public int docID() {
+ return doc;
+ }
+
+ @Override
+ public int advance(int target) {
+ if (doc == -1 && target <= singleDocID && (liveDocs == null || liveDocs.get(singleDocID))) {
+ doc = singleDocID;
+ pos = -1;
+ } else {
+ doc = NO_MORE_DOCS;
+ }
+ return doc;
+ }
+
+ @Override
+ public long cost() {
+ return 1;
+ }
+
+ @Override
+ public int freq() {
+ return 1;
+ }
+
+ @Override
+ public int nextPosition() {
+ assert pos == -1;
+ pos = 0;
+ IDVersionPostingsFormat.longToBytes(version, payload);
+ return pos;
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
+ public BytesRef getPayload() {
+ return payload;
+ }
+
+ @Override
+ public int startOffset() {
+ return -1;
+ }
+
+ @Override
+ public int endOffset() {
+ return -1;
+ }
+}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/DuplicateFilter.java b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/DuplicateFilter.java
index 826614c..3648003 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/DuplicateFilter.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/sandbox/queries/DuplicateFilter.java
@@ -18,7 +18,7 @@ package org.apache.lucene.sandbox.queries;
import java.io.IOException;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SlowCompositeReaderWrapper;
@@ -100,13 +100,13 @@ public class DuplicateFilter extends Filter {
if (terms != null) {
TermsEnum termsEnum = terms.iterator(null);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while (true) {
BytesRef currTerm = termsEnum.next();
if (currTerm == null) {
break;
} else {
- docs = termsEnum.docs(acceptDocs, docs, DocsEnum.FLAG_NONE);
+ docs = termsEnum.postings(acceptDocs, docs, PostingsEnum.FLAG_NONE);
int doc = docs.nextDoc();
if (doc != DocIdSetIterator.NO_MORE_DOCS) {
if (keepMode == KeepMode.KM_USE_FIRST_OCCURRENCE) {
@@ -136,7 +136,7 @@ public class DuplicateFilter extends Filter {
if (terms != null) {
TermsEnum termsEnum = terms.iterator(null);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while (true) {
BytesRef currTerm = termsEnum.next();
if (currTerm == null) {
@@ -144,7 +144,7 @@ public class DuplicateFilter extends Filter {
} else {
if (termsEnum.docFreq() > 1) {
// unset potential duplicates
- docs = termsEnum.docs(acceptDocs, docs, DocsEnum.FLAG_NONE);
+ docs = termsEnum.postings(acceptDocs, docs, PostingsEnum.FLAG_NONE);
int doc = docs.nextDoc();
if (doc != DocIdSetIterator.NO_MORE_DOCS) {
if (keepMode == KeepMode.KM_USE_FIRST_OCCURRENCE) {
diff --git a/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonQuery.java b/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonQuery.java
index 7120dc2..20be332 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonQuery.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonQuery.java
@@ -24,7 +24,7 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
@@ -325,7 +325,7 @@ public class TermAutomatonQuery extends Query {
static class EnumAndScorer {
public final int termID;
- public final DocsAndPositionsEnum posEnum;
+ public final PostingsEnum posEnum;
// How many positions left in the current document:
public int posLeft;
@@ -333,7 +333,7 @@ public class TermAutomatonQuery extends Query {
// Current position
public int pos;
- public EnumAndScorer(int termID, DocsAndPositionsEnum posEnum) {
+ public EnumAndScorer(int termID, PostingsEnum posEnum) {
this.termID = termID;
this.posEnum = posEnum;
}
@@ -385,7 +385,7 @@ public class TermAutomatonQuery extends Query {
}
@Override
- public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
+ public Scorer scorer(LeafReaderContext context, int flags, Bits acceptDocs) throws IOException {
// Initialize the enums; null for a given slot means that term didn't appear in this reader
EnumAndScorer[] enums = new EnumAndScorer[idToTerm.size()];
@@ -399,8 +399,7 @@ public class TermAutomatonQuery extends Query {
TermsEnum termsEnum = context.reader().terms(field).iterator(null);
termsEnum.seekExact(term, state);
- enums[ent.getKey()] = new EnumAndScorer(ent.getKey(),
- termsEnum.docsAndPositions(acceptDocs, null, 0));
+ enums[ent.getKey()] = new EnumAndScorer(ent.getKey(), termsEnum.postings(acceptDocs, null, PostingsEnum.FLAG_POSITIONS));
}
}
diff --git a/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonScorer.java b/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonScorer.java
index 106c307..e30efd7 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonScorer.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/search/TermAutomatonScorer.java
@@ -326,6 +326,36 @@ class TermAutomatonScorer extends Scorer {
}
@Override
+ public int nextPosition() throws IOException {
+ return -1; // TODO can we get positional information out of this Scorer?
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ return 0;
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ return null;
+ }
+
+ @Override
public int docID() {
return docID;
}
diff --git a/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java b/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java
index b8cfe3d..64137aa 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/codecs/idversion/TestIDVersionPostingsFormat.java
@@ -30,7 +30,6 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.Analyzer.TokenStreamComponents;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenFilter;
import org.apache.lucene.analysis.MockTokenizer;
@@ -39,7 +38,7 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.index.ConcurrentMergeScheduler;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@@ -331,9 +330,9 @@ public class TestIDVersionPostingsFormat extends LuceneTestCase {
if (VERBOSE) {
System.out.println(" found in seg=" + termsEnums[seg]);
}
- docsEnums[seg] = termsEnums[seg].docs(liveDocs[seg], docsEnums[seg], 0);
- int docID = docsEnums[seg].nextDoc();
- if (docID != DocsEnum.NO_MORE_DOCS) {
+ postingsEnums[seg] = termsEnums[seg].postings(liveDocs[seg], postingsEnums[seg], 0);
+ int docID = postingsEnums[seg].nextDoc();
+ if (docID != PostingsEnum.NO_MORE_DOCS) {
lastVersion = ((IDVersionSegmentTermsEnum) termsEnums[seg]).getVersion();
return docBases[seg] + docID;
}
diff --git a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java
index ffe8c34..6ba5e56 100644
--- a/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java
+++ b/lucene/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java
@@ -134,7 +134,7 @@ public class DuplicateFilterTest extends LuceneTestCase {
for (ScoreDoc hit : hits) {
StoredDocument d = searcher.doc(hit.doc);
String url = d.get(KEY_FIELD);
- DocsEnum td = TestUtil.docs(random(), reader,
+ PostingsEnum td = TestUtil.docs(random(), reader,
KEY_FIELD,
new BytesRef(url),
MultiFields.getLiveDocs(reader),
@@ -158,7 +158,7 @@ public class DuplicateFilterTest extends LuceneTestCase {
for (ScoreDoc hit : hits) {
StoredDocument d = searcher.doc(hit.doc);
String url = d.get(KEY_FIELD);
- DocsEnum td = TestUtil.docs(random(), reader,
+ PostingsEnum td = TestUtil.docs(random(), reader,
KEY_FIELD,
new BytesRef(url),
MultiFields.getLiveDocs(reader),
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/AbstractPrefixTreeFilter.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/AbstractPrefixTreeFilter.java
index 6a2e283..47f3bc5 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/AbstractPrefixTreeFilter.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/AbstractPrefixTreeFilter.java
@@ -20,7 +20,7 @@ package org.apache.lucene.spatial.prefix;
import java.io.IOException;
import com.spatial4j.core.shape.Shape;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Terms;
@@ -80,7 +80,7 @@ public abstract class AbstractPrefixTreeFilter extends Filter {
protected final int maxDoc;
protected TermsEnum termsEnum;//remember to check for null!
- protected DocsEnum docsEnum;
+ protected PostingsEnum postingsEnum;
public BaseTermsEnumTraverser(LeafReaderContext context, Bits acceptDocs) throws IOException {
this.context = context;
@@ -94,8 +94,8 @@ public abstract class AbstractPrefixTreeFilter extends Filter {
protected void collectDocs(BitSet bitSet) throws IOException {
assert termsEnum != null;
- docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
- bitSet.or(docsEnum);
+ postingsEnum = termsEnum.postings(acceptDocs, postingsEnum, PostingsEnum.FLAG_NONE);
+ bitSet.or(postingsEnum);
}
}
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeFilter.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeFilter.java
index 2ffa86b..108effe 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeFilter.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/ContainsPrefixTreeFilter.java
@@ -21,7 +21,7 @@ import com.spatial4j.core.shape.Shape;
import com.spatial4j.core.shape.SpatialRelation;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.spatial.prefix.tree.Cell;
@@ -176,9 +176,9 @@ public class ContainsPrefixTreeFilter extends AbstractPrefixTreeFilter {
private SmallDocSet collectDocs(Bits acceptContains) throws IOException {
SmallDocSet set = null;
- docsEnum = termsEnum.docs(acceptContains, docsEnum, DocsEnum.FLAG_NONE);
+ postingsEnum = termsEnum.postings(acceptContains, postingsEnum, PostingsEnum.FLAG_NONE);
int docid;
- while ((docid = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
+ while ((docid = postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
if (set == null) {
int size = termsEnum.docFreq();
if (size <= 0)
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeFacetCounter.java b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeFacetCounter.java
index 036346d..41d7cb8 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeFacetCounter.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/prefix/PrefixTreeFacetCounter.java
@@ -20,7 +20,7 @@ package org.apache.lucene.spatial.prefix;
import java.io.IOException;
import com.spatial4j.core.shape.Shape;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.DocIdSet;
@@ -169,8 +169,8 @@ public class PrefixTreeFacetCounter {
return termsEnum.docFreq();
}
int count = 0;
- docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
- while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ postingsEnum = termsEnum.postings(acceptDocs, postingsEnum, PostingsEnum.FLAG_NONE);
+ while (postingsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
count++;
}
return count;
@@ -180,8 +180,8 @@ public class PrefixTreeFacetCounter {
if (acceptDocs == null) {
return true;
}
- docsEnum = termsEnum.docs(acceptDocs, docsEnum, DocsEnum.FLAG_NONE);
- return (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+ postingsEnum = termsEnum.postings(acceptDocs, postingsEnum, PostingsEnum.FLAG_NONE);
+ return (postingsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
}
}.getDocIdSet();
diff --git a/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java b/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java
index bf5b726..cb29959 100644
--- a/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java
+++ b/lucene/spatial/src/java/org/apache/lucene/spatial/util/ShapeFieldCacheProvider.java
@@ -61,7 +61,7 @@ public abstract class ShapeFieldCacheProvider {
log.fine("Building Cache [" + reader.maxDoc() + "]");
idx = new ShapeFieldCache<>(reader.maxDoc(),defaultSize);
int count = 0;
- DocsEnum docs = null;
+ PostingsEnum docs = null;
Terms terms = reader.terms(shapeField);
TermsEnum te = null;
if (terms != null) {
@@ -70,7 +70,7 @@ public abstract class ShapeFieldCacheProvider {
while (term != null) {
T shape = readShape(term);
if( shape != null ) {
- docs = te.docs(null, docs, DocsEnum.FLAG_NONE);
+ docs = te.postings(null, docs, PostingsEnum.FLAG_NONE);
Integer docid = docs.nextDoc();
while (docid != DocIdSetIterator.NO_MORE_DOCS) {
idx.add( docid, shape );
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
index 7c05665..ae941f6 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/analyzing/BlendedInfixSuggester.java
@@ -29,7 +29,7 @@ import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.BinaryDocValues;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.MultiDocValues;
import org.apache.lucene.index.Terms;
@@ -263,7 +263,7 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester {
if (matchedTokens.contains(docTerm) || docTerm.startsWith(prefixToken)) {
- DocsAndPositionsEnum docPosEnum = it.docsAndPositions(null, null, DocsAndPositionsEnum.FLAG_OFFSETS);
+ PostingsEnum docPosEnum = it.postings(null, null, PostingsEnum.FLAG_OFFSETS);
docPosEnum.nextDoc();
// use the first occurrence of the term
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
index d08a72f..8d060e0 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
@@ -25,8 +25,7 @@ import org.apache.lucene.codecs.FieldsConsumer;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.index.AssertingLeafReader;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexOptions;
@@ -160,8 +159,7 @@ public final class AssertingPostingsFormat extends PostingsFormat {
termsEnum = terms.iterator(termsEnum);
BytesRefBuilder lastTerm = null;
- DocsEnum docsEnum = null;
- DocsAndPositionsEnum posEnum = null;
+ PostingsEnum postingsEnum = null;
boolean hasFreqs = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
boolean hasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
@@ -187,46 +185,46 @@ public final class AssertingPostingsFormat extends PostingsFormat {
int flags = 0;
if (hasPositions == false) {
if (hasFreqs) {
- flags = flags | DocsEnum.FLAG_FREQS;
+ flags = flags | PostingsEnum.FLAG_FREQS;
}
- docsEnum = termsEnum.docs(null, docsEnum, flags);
+ postingsEnum = termsEnum.postings(null, postingsEnum, flags);
} else {
+ flags = PostingsEnum.FLAG_POSITIONS;
if (hasPayloads) {
- flags |= DocsAndPositionsEnum.FLAG_PAYLOADS;
+ flags |= PostingsEnum.FLAG_PAYLOADS;
}
if (hasOffsets) {
- flags = flags | DocsAndPositionsEnum.FLAG_OFFSETS;
+ flags = flags | PostingsEnum.FLAG_OFFSETS;
}
- posEnum = termsEnum.docsAndPositions(null, posEnum, flags);
- docsEnum = posEnum;
+ postingsEnum = termsEnum.postings(null, postingsEnum, flags);
}
- assert docsEnum != null : "termsEnum=" + termsEnum + " hasPositions=" + hasPositions;
+ assert postingsEnum != null : "termsEnum=" + termsEnum + " hasPositions=" + hasPositions;
int lastDocID = -1;
while(true) {
- int docID = docsEnum.nextDoc();
- if (docID == DocsEnum.NO_MORE_DOCS) {
+ int docID = postingsEnum.nextDoc();
+ if (docID == PostingsEnum.NO_MORE_DOCS) {
break;
}
assert docID > lastDocID;
lastDocID = docID;
if (hasFreqs) {
- int freq = docsEnum.freq();
+ int freq = postingsEnum.freq();
assert freq > 0;
if (hasPositions) {
int lastPos = -1;
int lastStartOffset = -1;
for(int i=0;i= lastPos: "pos=" + pos + " vs lastPos=" + lastPos + " i=" + i + " freq=" + freq;
lastPos = pos;
if (hasOffsets) {
- int startOffset = posEnum.startOffset();
- int endOffset = posEnum.endOffset();
+ int startOffset = postingsEnum.startOffset();
+ int endOffset = postingsEnum.endOffset();
assert endOffset >= startOffset;
assert startOffset >= lastStartOffset;
lastStartOffset = startOffset;
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
index aeb3521..eca0164 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
@@ -34,8 +34,7 @@ import org.apache.lucene.codecs.FieldsConsumer;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.codecs.TermStats;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexFileNames;
@@ -252,8 +251,7 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat {
FixedBitSet docsSeen = new FixedBitSet(state.segmentInfo.getDocCount());
long sumTotalTermFreq = 0;
long sumDocFreq = 0;
- DocsEnum docsEnum = null;
- DocsAndPositionsEnum posEnum = null;
+ PostingsEnum postingsEnum = null;
int enumFlags;
IndexOptions indexOptions = fieldInfo.getIndexOptions();
@@ -265,18 +263,18 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat {
if (writeFreqs == false) {
enumFlags = 0;
} else if (writePositions == false) {
- enumFlags = DocsEnum.FLAG_FREQS;
+ enumFlags = PostingsEnum.FLAG_FREQS;
} else if (writeOffsets == false) {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS;
+ enumFlags = PostingsEnum.FLAG_PAYLOADS;
} else {
enumFlags = 0;
}
} else {
if (writePayloads) {
- enumFlags = DocsAndPositionsEnum.FLAG_PAYLOADS | DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = PostingsEnum.FLAG_PAYLOADS | PostingsEnum.FLAG_OFFSETS;
} else {
- enumFlags = DocsAndPositionsEnum.FLAG_OFFSETS;
+ enumFlags = PostingsEnum.FLAG_OFFSETS;
}
}
@@ -286,20 +284,13 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat {
break;
}
RAMPostingsWriterImpl postingsWriter = termsConsumer.startTerm(term);
-
- if (writePositions) {
- posEnum = termsEnum.docsAndPositions(null, posEnum, enumFlags);
- docsEnum = posEnum;
- } else {
- docsEnum = termsEnum.docs(null, docsEnum, enumFlags);
- posEnum = null;
- }
+ postingsEnum = termsEnum.postings(null, postingsEnum, enumFlags);
int docFreq = 0;
long totalTermFreq = 0;
while (true) {
- int docID = docsEnum.nextDoc();
- if (docID == DocsEnum.NO_MORE_DOCS) {
+ int docID = postingsEnum.nextDoc();
+ if (docID == PostingsEnum.NO_MORE_DOCS) {
break;
}
docsSeen.set(docID);
@@ -307,7 +298,7 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat {
int freq;
if (writeFreqs) {
- freq = docsEnum.freq();
+ freq = postingsEnum.freq();
totalTermFreq += freq;
} else {
freq = -1;
@@ -316,13 +307,13 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat {
postingsWriter.startDoc(docID, freq);
if (writePositions) {
for (int i=0;i doc : "backwards nextDoc from " + doc + " to " + nextDoc + " " + in;
if (nextDoc == DocIdSetIterator.NO_MORE_DOCS) {
state = DocsEnumState.FINISHED;
- } else {
- state = DocsEnumState.ITERATING;
- }
- assert super.docID() == nextDoc;
- return doc = nextDoc;
- }
-
- @Override
- public int advance(int target) throws IOException {
- assertThread("Docs enums", creationThread);
- assert state != DocsEnumState.FINISHED : "advance() called after NO_MORE_DOCS";
- assert target > doc : "target must be > docID(), got " + target + " <= " + doc;
- int advanced = super.advance(target);
- assert advanced >= target : "backwards advance from: " + target + " to: " + advanced;
- if (advanced == DocIdSetIterator.NO_MORE_DOCS) {
- state = DocsEnumState.FINISHED;
- } else {
- state = DocsEnumState.ITERATING;
- }
- assert super.docID() == advanced;
- return doc = advanced;
- }
-
- @Override
- public int docID() {
- assertThread("Docs enums", creationThread);
- assert doc == super.docID() : " invalid docID() in " + in.getClass() + " " + super.docID() + " instead of " + doc;
- return doc;
- }
-
- @Override
- public int freq() throws IOException {
- assertThread("Docs enums", creationThread);
- assert state != DocsEnumState.START : "freq() called before nextDoc()/advance()";
- assert state != DocsEnumState.FINISHED : "freq() called after NO_MORE_DOCS";
- int freq = super.freq();
- assert freq > 0;
- return freq;
- }
- }
-
- static class AssertingDocsAndPositionsEnum extends FilterDocsAndPositionsEnum {
- private final Thread creationThread = Thread.currentThread();
- private DocsEnumState state = DocsEnumState.START;
- private int positionMax = 0;
- private int positionCount = 0;
- private int doc;
-
- public AssertingDocsAndPositionsEnum(DocsAndPositionsEnum in) {
- super(in);
- int docid = in.docID();
- assert docid == -1 : "invalid initial doc id: " + docid;
- doc = -1;
- }
-
- @Override
- public int nextDoc() throws IOException {
- assertThread("Docs enums", creationThread);
- assert state != DocsEnumState.FINISHED : "nextDoc() called after NO_MORE_DOCS";
- int nextDoc = super.nextDoc();
- assert nextDoc > doc : "backwards nextDoc from " + doc + " to " + nextDoc;
- positionCount = 0;
- if (nextDoc == DocIdSetIterator.NO_MORE_DOCS) {
- state = DocsEnumState.FINISHED;
positionMax = 0;
} else {
state = DocsEnumState.ITERATING;
positionMax = super.freq();
}
+ positionCount = 0;
assert super.docID() == nextDoc;
return doc = nextDoc;
}
@@ -372,7 +297,6 @@ public class AssertingLeafReader extends FilterLeafReader {
assert target > doc : "target must be > docID(), got " + target + " <= " + doc;
int advanced = super.advance(target);
assert advanced >= target : "backwards advance from: " + target + " to: " + advanced;
- positionCount = 0;
if (advanced == DocIdSetIterator.NO_MORE_DOCS) {
state = DocsEnumState.FINISHED;
positionMax = 0;
@@ -380,6 +304,7 @@ public class AssertingLeafReader extends FilterLeafReader {
state = DocsEnumState.ITERATING;
positionMax = super.freq();
}
+ positionCount = 0;
assert super.docID() == advanced;
return doc = advanced;
}
@@ -403,7 +328,6 @@ public class AssertingLeafReader extends FilterLeafReader {
@Override
public int nextPosition() throws IOException {
- assertThread("Docs enums", creationThread);
assert state != DocsEnumState.START : "nextPosition() called before nextDoc()/advance()";
assert state != DocsEnumState.FINISHED : "nextPosition() called after NO_MORE_DOCS";
assert positionCount < positionMax : "nextPosition() called more than freq() times!";
@@ -415,7 +339,6 @@ public class AssertingLeafReader extends FilterLeafReader {
@Override
public int startOffset() throws IOException {
- assertThread("Docs enums", creationThread);
assert state != DocsEnumState.START : "startOffset() called before nextDoc()/advance()";
assert state != DocsEnumState.FINISHED : "startOffset() called after NO_MORE_DOCS";
assert positionCount > 0 : "startOffset() called before nextPosition()!";
@@ -424,7 +347,6 @@ public class AssertingLeafReader extends FilterLeafReader {
@Override
public int endOffset() throws IOException {
- assertThread("Docs enums", creationThread);
assert state != DocsEnumState.START : "endOffset() called before nextDoc()/advance()";
assert state != DocsEnumState.FINISHED : "endOffset() called after NO_MORE_DOCS";
assert positionCount > 0 : "endOffset() called before nextPosition()!";
@@ -432,17 +354,32 @@ public class AssertingLeafReader extends FilterLeafReader {
}
@Override
+ public int startPosition() throws IOException {
+ assert state != DocsEnumState.START : "startPosition() called before nextDoc()/advance()";
+ assert state != DocsEnumState.FINISHED : "startPosition() called after NO_MORE_DOCS";
+ assert positionCount > 0 : "startPosition() called before nextPosition()!";
+ return super.startPosition();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ assert state != DocsEnumState.START : "endPosition() called before nextDoc()/advance()";
+ assert state != DocsEnumState.FINISHED : "endPosition() called after NO_MORE_DOCS";
+ assert positionCount > 0 : "endPosition() called before nextPosition()!";
+ return super.endPosition();
+ }
+
+ @Override
public BytesRef getPayload() throws IOException {
- assertThread("Docs enums", creationThread);
assert state != DocsEnumState.START : "getPayload() called before nextDoc()/advance()";
assert state != DocsEnumState.FINISHED : "getPayload() called after NO_MORE_DOCS";
assert positionCount > 0 : "getPayload() called before nextPosition()!";
BytesRef payload = super.getPayload();
- assert payload == null || payload.isValid() && payload.length > 0 : "getPayload() returned payload with invalid length!";
+ assert payload == null || payload.length > 0 : "getPayload() returned payload with invalid length!";
return payload;
}
}
-
+
/** Wraps a NumericDocValues but with additional asserts */
public static class AssertingNumericDocValues extends NumericDocValues {
private final Thread creationThread = Thread.currentThread();
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
index 16845f9..5d970ff 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseDocValuesFormatTestCase.java
@@ -17,8 +17,6 @@ package org.apache.lucene.index;
* limitations under the License.
*/
-import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
-
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
@@ -61,6 +59,8 @@ import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.TestUtil;
+import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
+
/**
* Abstract class to do basic tests for a docvalues format.
* NOTE: This test focuses on the docvalues impl, nothing else.
@@ -1154,8 +1154,8 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
for (Entry entry : entrySet) {
// pk lookup
- DocsEnum termDocsEnum = slowR.termDocsEnum(new Term("id", entry.getKey()));
- int docId = termDocsEnum.nextDoc();
+ PostingsEnum termPostingsEnum = slowR.termDocsEnum(new Term("id", entry.getKey()));
+ int docId = termPostingsEnum.nextDoc();
expected = new BytesRef(entry.getValue());
final BytesRef actual = docValues.get(docId);
assertEquals(expected, actual);
@@ -2083,7 +2083,7 @@ public abstract class BaseDocValuesFormatTestCase extends BaseIndexFileFormatTes
);
}
}
-
+
public void testSortedNumericsMultipleValuesVsStoredFields() throws Exception {
assumeTrue("Codec does not support SORTED_NUMERIC", codecSupportsSortedNumeric());
int numIterations = atLeast(1);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
index 6024c7e..45fdec1 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
@@ -97,7 +97,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
// Sometimes use .advance():
SKIPPING,
- // Sometimes reuse the Docs/AndPositionsEnum across terms:
+ // Sometimes reuse the PostingsEnum across terms:
REUSE_ENUMS,
// Sometimes pass non-null live docs:
@@ -121,7 +121,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
/** Given the same random seed this always enumerates the
* same random postings */
- private static class SeedPostings extends DocsAndPositionsEnum {
+ private static class SeedPostings extends PostingsEnum {
// Used only to generate docIDs; this way if you pull w/
// or w/o positions you get the same docID sequence:
private final Random docRandom;
@@ -234,7 +234,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
return 0;
}
assert posUpto < freq;
-
+
if (posUpto == 0 && random.nextBoolean()) {
// Sometimes index pos = 0
} else if (posSpacing == 1) {
@@ -270,7 +270,17 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
posUpto++;
return pos;
}
-
+
+ @Override
+ public int startPosition() throws IOException {
+ return pos;
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ return pos;
+ }
+
@Override
public int startOffset() {
return startOffset;
@@ -414,10 +424,10 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
// NOTE: sort of silly: we enum all the docs just to
// get the maxDoc
- DocsEnum docsEnum = getSeedPostings(term, termSeed, false, IndexOptions.DOCS, true);
+ PostingsEnum postingsEnum = getSeedPostings(term, termSeed, false, IndexOptions.DOCS, true);
int doc;
int lastDoc = 0;
- while((doc = docsEnum.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
+ while((doc = postingsEnum.nextDoc()) != PostingsEnum.NO_MORE_DOCS) {
lastDoc = doc;
}
maxDoc = Math.max(lastDoc, maxDoc);
@@ -639,32 +649,27 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
}
@Override
- public final DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public final PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
if (liveDocs != null) {
throw new IllegalArgumentException("liveDocs must be null");
}
- if ((flags & DocsEnum.FLAG_FREQS) != 0 && maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS) < 0) {
- return null;
- }
- return getSeedPostings(current.getKey().utf8ToString(), current.getValue().seed, false, maxAllowed, allowPayloads);
- }
-
- @Override
- public final DocsAndPositionsEnum docsAndPositions(Bits liveDocs, DocsAndPositionsEnum reuse, int flags) throws IOException {
- if (liveDocs != null) {
- throw new IllegalArgumentException("liveDocs must be null");
- }
- if (maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
- return null;
- }
- if ((flags & DocsAndPositionsEnum.FLAG_OFFSETS) != 0 && maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) < 0) {
- return null;
+ if (PostingsEnum.requiresPositions(flags)) {
+ if (maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) {
+ return null;
+ }
+ if ((flags & PostingsEnum.FLAG_OFFSETS) == PostingsEnum.FLAG_OFFSETS && maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) < 0) {
+ return null;
+ }
+ if ((flags & PostingsEnum.FLAG_PAYLOADS) == PostingsEnum.FLAG_PAYLOADS && allowPayloads == false) {
+ return null;
+ }
}
- if ((flags & DocsAndPositionsEnum.FLAG_PAYLOADS) != 0 && allowPayloads == false) {
+ if ((flags & PostingsEnum.FLAG_FREQS) != 0 && maxAllowed.compareTo(IndexOptions.DOCS_AND_FREQS) < 0) {
return null;
}
return getSeedPostings(current.getKey().utf8ToString(), current.getValue().seed, false, maxAllowed, allowPayloads);
}
+
}
// TODO maybe instead of @BeforeClass just make a single test run: build postings & index & test it?
@@ -745,8 +750,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
private static class ThreadState {
// Only used with REUSE option:
- public DocsEnum reuseDocsEnum;
- public DocsAndPositionsEnum reuseDocsAndPositionsEnum;
+ public PostingsEnum reusePostingsEnum;
}
private void verifyEnum(ThreadState threadState,
@@ -809,78 +813,74 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
boolean doCheckPayloads = options.contains(Option.PAYLOADS) && allowPositions && fieldInfo.hasPayloads() && (alwaysTestMax || random().nextInt(3) <= 2);
- DocsEnum prevDocsEnum = null;
+ PostingsEnum prevPostingsEnum = null;
- DocsEnum docsEnum;
- DocsAndPositionsEnum docsAndPositionsEnum;
+ PostingsEnum postingsEnum;
if (!doCheckPositions) {
if (allowPositions && random().nextInt(10) == 7) {
// 10% of the time, even though we will not check positions, pull a DocsAndPositions enum
if (options.contains(Option.REUSE_ENUMS) && random().nextInt(10) < 9) {
- prevDocsEnum = threadState.reuseDocsAndPositionsEnum;
+ prevPostingsEnum = threadState.reusePostingsEnum;
}
- int flags = 0;
+ int flags = PostingsEnum.FLAG_POSITIONS;
if (alwaysTestMax || random().nextBoolean()) {
- flags |= DocsAndPositionsEnum.FLAG_OFFSETS;
+ flags |= PostingsEnum.FLAG_OFFSETS;
}
if (alwaysTestMax || random().nextBoolean()) {
- flags |= DocsAndPositionsEnum.FLAG_PAYLOADS;
+ flags |= PostingsEnum.FLAG_PAYLOADS;
}
if (VERBOSE) {
- System.out.println(" get DocsAndPositionsEnum (but we won't check positions) flags=" + flags);
+ System.out.println(" get DocsEnum (but we won't check positions) flags=" + flags);
}
- threadState.reuseDocsAndPositionsEnum = termsEnum.docsAndPositions(liveDocs, (DocsAndPositionsEnum) prevDocsEnum, flags);
- docsEnum = threadState.reuseDocsAndPositionsEnum;
- docsAndPositionsEnum = threadState.reuseDocsAndPositionsEnum;
+ threadState.reusePostingsEnum = termsEnum.postings(liveDocs, prevPostingsEnum, flags);
+ postingsEnum = threadState.reusePostingsEnum;
} else {
if (VERBOSE) {
System.out.println(" get DocsEnum");
}
if (options.contains(Option.REUSE_ENUMS) && random().nextInt(10) < 9) {
- prevDocsEnum = threadState.reuseDocsEnum;
+ prevPostingsEnum = threadState.reusePostingsEnum;
}
- threadState.reuseDocsEnum = termsEnum.docs(liveDocs, prevDocsEnum, doCheckFreqs ? DocsEnum.FLAG_FREQS : DocsEnum.FLAG_NONE);
- docsEnum = threadState.reuseDocsEnum;
- docsAndPositionsEnum = null;
+ threadState.reusePostingsEnum = termsEnum.postings(liveDocs, prevPostingsEnum, doCheckFreqs ? PostingsEnum.FLAG_FREQS : PostingsEnum.FLAG_NONE);
+ postingsEnum = threadState.reusePostingsEnum;
}
} else {
if (options.contains(Option.REUSE_ENUMS) && random().nextInt(10) < 9) {
- prevDocsEnum = threadState.reuseDocsAndPositionsEnum;
+ prevPostingsEnum = threadState.reusePostingsEnum;
}
- int flags = 0;
+ int flags = PostingsEnum.FLAG_POSITIONS;
if (alwaysTestMax || doCheckOffsets || random().nextInt(3) == 1) {
- flags |= DocsAndPositionsEnum.FLAG_OFFSETS;
+ flags |= PostingsEnum.FLAG_OFFSETS;
}
if (alwaysTestMax || doCheckPayloads|| random().nextInt(3) == 1) {
- flags |= DocsAndPositionsEnum.FLAG_PAYLOADS;
+ flags |= PostingsEnum.FLAG_PAYLOADS;
}
if (VERBOSE) {
- System.out.println(" get DocsAndPositionsEnum flags=" + flags);
+ System.out.println(" get DocsEnum flags=" + flags);
}
- threadState.reuseDocsAndPositionsEnum = termsEnum.docsAndPositions(liveDocs, (DocsAndPositionsEnum) prevDocsEnum, flags);
- docsEnum = threadState.reuseDocsAndPositionsEnum;
- docsAndPositionsEnum = threadState.reuseDocsAndPositionsEnum;
+ threadState.reusePostingsEnum = termsEnum.postings(liveDocs, prevPostingsEnum, flags);
+ postingsEnum = threadState.reusePostingsEnum;
}
- assertNotNull("null DocsEnum", docsEnum);
- int initialDocID = docsEnum.docID();
- assertEquals("inital docID should be -1" + docsEnum, -1, initialDocID);
+ assertNotNull("null DocsEnum", postingsEnum);
+ int initialDocID = postingsEnum.docID();
+ assertEquals("inital docID should be -1" + postingsEnum, -1, initialDocID);
if (VERBOSE) {
- if (prevDocsEnum == null) {
- System.out.println(" got enum=" + docsEnum);
- } else if (prevDocsEnum == docsEnum) {
- System.out.println(" got reuse enum=" + docsEnum);
+ if (prevPostingsEnum == null) {
+ System.out.println(" got enum=" + postingsEnum);
+ } else if (prevPostingsEnum == postingsEnum) {
+ System.out.println(" got reuse enum=" + postingsEnum);
} else {
- System.out.println(" got enum=" + docsEnum + " (reuse of " + prevDocsEnum + " failed)");
+ System.out.println(" got enum=" + postingsEnum + " (reuse of " + prevPostingsEnum + " failed)");
}
}
@@ -930,10 +930,10 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
while (expected.upto <= stopAt) {
if (expected.upto == stopAt) {
if (stopAt == expected.docFreq) {
- assertEquals("DocsEnum should have ended but didn't", DocsEnum.NO_MORE_DOCS, docsEnum.nextDoc());
+ assertEquals("DocsEnum should have ended but didn't", PostingsEnum.NO_MORE_DOCS, postingsEnum.nextDoc());
// Common bug is to forget to set this.doc=NO_MORE_DOCS in the enum!:
- assertEquals("DocsEnum should have ended but didn't", DocsEnum.NO_MORE_DOCS, docsEnum.docID());
+ assertEquals("DocsEnum should have ended but didn't", PostingsEnum.NO_MORE_DOCS, postingsEnum.docID());
}
break;
}
@@ -944,7 +944,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
// Pick target we know exists:
final int skipCount = TestUtil.nextInt(random(), 1, skipInc);
for(int skip=0;skip= stopAt) {
- int target = random().nextBoolean() ? maxDoc : DocsEnum.NO_MORE_DOCS;
+ int target = random().nextBoolean() ? maxDoc : PostingsEnum.NO_MORE_DOCS;
if (VERBOSE) {
System.out.println(" now advance to end (target=" + target + ")");
}
- assertEquals("DocsEnum should have ended but didn't", DocsEnum.NO_MORE_DOCS, docsEnum.advance(target));
+ assertEquals("DocsEnum should have ended but didn't", PostingsEnum.NO_MORE_DOCS, postingsEnum.advance(target));
break;
} else {
if (VERBOSE) {
if (targetDocID != -1) {
- System.out.println(" now advance to random target=" + targetDocID + " (" + expected.upto + " of " + stopAt + ") current=" + docsEnum.docID());
+ System.out.println(" now advance to random target=" + targetDocID + " (" + expected.upto + " of " + stopAt + ") current=" + postingsEnum.docID());
} else {
- System.out.println(" now advance to known-exists target=" + expected.docID() + " (" + expected.upto + " of " + stopAt + ") current=" + docsEnum.docID());
+ System.out.println(" now advance to known-exists target=" + expected.docID() + " (" + expected.upto + " of " + stopAt + ") current=" + postingsEnum.docID());
}
}
- int docID = docsEnum.advance(targetDocID != -1 ? targetDocID : expected.docID());
+ int docID = postingsEnum.advance(targetDocID != -1 ? targetDocID : expected.docID());
assertEquals("docID is wrong", expected.docID(), docID);
}
} else {
@@ -980,9 +980,9 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
if (VERBOSE) {
System.out.println(" now nextDoc to " + expected.docID() + " (" + expected.upto + " of " + stopAt + ")");
}
- int docID = docsEnum.nextDoc();
+ int docID = postingsEnum.nextDoc();
assertEquals("docID is wrong", expected.docID(), docID);
- if (docID == DocsEnum.NO_MORE_DOCS) {
+ if (docID == PostingsEnum.NO_MORE_DOCS) {
break;
}
}
@@ -991,12 +991,12 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
if (VERBOSE) {
System.out.println(" now freq()=" + expected.freq());
}
- int freq = docsEnum.freq();
+ int freq = postingsEnum.freq();
assertEquals("freq is wrong", expected.freq(), freq);
}
if (doCheckPositions) {
- int freq = docsEnum.freq();
+ int freq = postingsEnum.freq();
int numPosToConsume;
if (!alwaysTestMax && options.contains(Option.PARTIAL_POS_CONSUME) && random().nextInt(5) == 1) {
numPosToConsume = random().nextInt(freq);
@@ -1009,7 +1009,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
if (VERBOSE) {
System.out.println(" now nextPosition to " + pos);
}
- assertEquals("position is wrong", pos, docsAndPositionsEnum.nextPosition());
+ assertEquals("position is wrong", pos, postingsEnum.nextPosition());
if (doCheckPayloads) {
BytesRef expectedPayload = expected.getPayload();
@@ -1018,9 +1018,9 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
System.out.println(" now check expectedPayload length=" + (expectedPayload == null ? 0 : expectedPayload.length));
}
if (expectedPayload == null || expectedPayload.length == 0) {
- assertNull("should not have payload", docsAndPositionsEnum.getPayload());
+ assertNull("should not have payload", postingsEnum.getPayload());
} else {
- BytesRef payload = docsAndPositionsEnum.getPayload();
+ BytesRef payload = postingsEnum.getPayload();
assertNotNull("should have payload but doesn't", payload);
assertEquals("payload length is wrong", expectedPayload.length, payload.length);
@@ -1032,7 +1032,7 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
// make a deep copy
payload = BytesRef.deepCopyOf(payload);
- assertEquals("2nd call to getPayload returns something different!", payload, docsAndPositionsEnum.getPayload());
+ assertEquals("2nd call to getPayload returns something different!", payload, postingsEnum.getPayload());
}
} else {
if (VERBOSE) {
@@ -1046,8 +1046,8 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
if (VERBOSE) {
System.out.println(" now check offsets: startOff=" + expected.startOffset() + " endOffset=" + expected.endOffset());
}
- assertEquals("startOffset is wrong", expected.startOffset(), docsAndPositionsEnum.startOffset());
- assertEquals("endOffset is wrong", expected.endOffset(), docsAndPositionsEnum.endOffset());
+ assertEquals("startOffset is wrong", expected.startOffset(), postingsEnum.startOffset());
+ assertEquals("endOffset is wrong", expected.endOffset(), postingsEnum.endOffset());
} else {
if (VERBOSE) {
System.out.println(" skip check offsets");
@@ -1057,8 +1057,8 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
if (VERBOSE) {
System.out.println(" now check offsets are -1");
}
- assertEquals("startOffset isn't -1", -1, docsAndPositionsEnum.startOffset());
- assertEquals("endOffset isn't -1", -1, docsAndPositionsEnum.endOffset());
+ assertEquals("startOffset isn't -1", -1, postingsEnum.startOffset());
+ assertEquals("endOffset isn't -1", -1, postingsEnum.endOffset());
}
}
}
@@ -1406,6 +1406,41 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
IOUtils.rm(path);
}
}
+
+ protected boolean isPostingsEnumReuseImplemented() {
+ return true;
+ }
+
+ public void testPostingsEnumReuse() throws Exception {
+
+ Path path = createTempDir("testPostingsEnumReuse");
+ Directory dir = newFSDirectory(path);
+
+ FieldsProducer fieldsProducer = buildIndex(dir, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, random().nextBoolean(), true);
+ Collections.shuffle(allTerms, random());
+ FieldAndTerm fieldAndTerm = allTerms.get(0);
+
+ Terms terms = fieldsProducer.terms(fieldAndTerm.field);
+ TermsEnum te = terms.iterator(null);
+
+ te.seekExact(fieldAndTerm.term);
+ checkReuse(te, PostingsEnum.FLAG_FREQS, PostingsEnum.FLAG_ALL, false);
+ if (isPostingsEnumReuseImplemented())
+ checkReuse(te, PostingsEnum.FLAG_ALL, PostingsEnum.FLAG_ALL, true);
+
+ fieldsProducer.close();
+ dir.close();
+ IOUtils.rm(path);
+ }
+
+ protected static void checkReuse(TermsEnum termsEnum, int firstFlags, int secondFlags, boolean shouldReuse) throws IOException {
+ PostingsEnum postings1 = termsEnum.postings(null, null, firstFlags);
+ PostingsEnum postings2 = termsEnum.postings(null, postings1, secondFlags);
+ if (shouldReuse)
+ assertSame("Expected PostingsEnum " + postings1.getClass().getName() + " to be reused", postings1, postings2);
+ else
+ assertNotSame("Expected PostingsEnum " + postings1.getClass().getName() + " to not be reused", postings1, postings2);
+ }
public void testJustEmptyField() throws Exception {
Directory dir = newDirectory();
@@ -1483,8 +1518,8 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
TermsEnum termsEnum = terms.iterator(null);
BytesRef term = termsEnum.next();
if (term != null) {
- DocsEnum docsEnum = termsEnum.docs(null, null);
- assertTrue(docsEnum.nextDoc() == DocsEnum.NO_MORE_DOCS);
+ PostingsEnum postingsEnum = termsEnum.postings(null, null);
+ assertTrue(postingsEnum.nextDoc() == PostingsEnum.NO_MORE_DOCS);
}
}
ir.close();
@@ -1569,27 +1604,24 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
assert terms != null;
TermsEnum termsEnum = terms.iterator(null);
- DocsEnum docs = null;
+ PostingsEnum docs = null;
while(termsEnum.next() != null) {
BytesRef term = termsEnum.term();
-
- if (random().nextBoolean()) {
- docs = termsEnum.docs(null, docs, DocsEnum.FLAG_FREQS);
- } else if (docs instanceof DocsAndPositionsEnum) {
- docs = termsEnum.docsAndPositions(null, (DocsAndPositionsEnum) docs, 0);
+ boolean noPositions = random().nextBoolean();
+ if (noPositions) {
+ docs = termsEnum.postings(null, docs, PostingsEnum.FLAG_FREQS);
} else {
- docs = termsEnum.docsAndPositions(null, null, 0);
+ docs = termsEnum.postings(null, null, PostingsEnum.FLAG_POSITIONS);
}
int docFreq = 0;
long totalTermFreq = 0;
- while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+ while (docs.nextDoc() != PostingsEnum.NO_MORE_DOCS) {
docFreq++;
totalTermFreq += docs.freq();
- if (docs instanceof DocsAndPositionsEnum) {
- DocsAndPositionsEnum posEnum = (DocsAndPositionsEnum) docs;
- int limit = TestUtil.nextInt(random(), 1, docs.freq());
- for(int i=0;i termsEnum = new ThreadLocal<>();
- private final ThreadLocal docsEnum = new ThreadLocal<>();
- private final ThreadLocal docsAndPositionsEnum = new ThreadLocal<>();
+ private final ThreadLocal docsEnum = new ThreadLocal<>();
+ private final ThreadLocal docsAndPositionsEnum = new ThreadLocal<>();
protected void assertEquals(RandomTokenStream tk, FieldType ft, Terms terms) throws IOException {
assertEquals(1, terms.getDocCount());
@@ -440,27 +439,27 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
assertEquals(1, termsEnum.docFreq());
final FixedBitSet bits = new FixedBitSet(1);
- DocsEnum docsEnum = termsEnum.docs(bits, random().nextBoolean() ? null : this.docsEnum.get());
- assertEquals(DocsEnum.NO_MORE_DOCS, docsEnum.nextDoc());
+ PostingsEnum postingsEnum = termsEnum.postings(bits, random().nextBoolean() ? null : this.docsEnum.get());
+ assertEquals(PostingsEnum.NO_MORE_DOCS, postingsEnum.nextDoc());
bits.set(0);
- docsEnum = termsEnum.docs(random().nextBoolean() ? bits : null, random().nextBoolean() ? null : docsEnum);
- assertNotNull(docsEnum);
- assertEquals(0, docsEnum.nextDoc());
- assertEquals(0, docsEnum.docID());
- assertEquals(tk.freqs.get(termsEnum.term().utf8ToString()), (Integer) docsEnum.freq());
- assertEquals(DocsEnum.NO_MORE_DOCS, docsEnum.nextDoc());
- this.docsEnum.set(docsEnum);
+ postingsEnum = termsEnum.postings(random().nextBoolean() ? bits : null, random().nextBoolean() ? null : postingsEnum);
+ assertNotNull(postingsEnum);
+ assertEquals(0, postingsEnum.nextDoc());
+ assertEquals(0, postingsEnum.docID());
+ assertEquals(tk.freqs.get(termsEnum.term().utf8ToString()), (Integer) postingsEnum.freq());
+ assertEquals(PostingsEnum.NO_MORE_DOCS, postingsEnum.nextDoc());
+ this.docsEnum.set(postingsEnum);
bits.clear(0);
- DocsAndPositionsEnum docsAndPositionsEnum = termsEnum.docsAndPositions(bits, random().nextBoolean() ? null : this.docsAndPositionsEnum.get());
+ PostingsEnum docsAndPositionsEnum = termsEnum.postings(bits, random().nextBoolean() ? null : this.docsEnum.get(), PostingsEnum.FLAG_POSITIONS);
assertEquals(ft.storeTermVectorOffsets() || ft.storeTermVectorPositions(), docsAndPositionsEnum != null);
if (docsAndPositionsEnum != null) {
- assertEquals(DocsEnum.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
+ assertEquals(PostingsEnum.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
}
bits.set(0);
- docsAndPositionsEnum = termsEnum.docsAndPositions(random().nextBoolean() ? bits : null, random().nextBoolean() ? null : docsAndPositionsEnum);
+ docsAndPositionsEnum = termsEnum.postings(random().nextBoolean() ? bits : null, random().nextBoolean() ? null : docsAndPositionsEnum, PostingsEnum.FLAG_POSITIONS);
assertEquals(ft.storeTermVectorOffsets() || ft.storeTermVectorPositions(), docsAndPositionsEnum != null);
if (terms.hasPositions() || terms.hasOffsets()) {
assertEquals(0, docsAndPositionsEnum.nextDoc());
@@ -515,9 +514,9 @@ public abstract class BaseTermVectorsFormatTestCase extends BaseIndexFileFormatT
// ok
}
}
- assertEquals(DocsEnum.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
+ assertEquals(PostingsEnum.NO_MORE_DOCS, docsAndPositionsEnum.nextDoc());
}
- this.docsAndPositionsEnum.set(docsAndPositionsEnum);
+ this.docsEnum.set(docsAndPositionsEnum);
}
assertNull(termsEnum.next());
for (int i = 0; i < 5; ++i) {
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/PerThreadPKLookup.java b/lucene/test-framework/src/java/org/apache/lucene/index/PerThreadPKLookup.java
index 26c8912..f1554ab 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/PerThreadPKLookup.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/PerThreadPKLookup.java
@@ -35,7 +35,7 @@ import org.apache.lucene.util.BytesRef;
public class PerThreadPKLookup {
protected final TermsEnum[] termsEnums;
- protected final DocsEnum[] docsEnums;
+ protected final PostingsEnum[] postingsEnums;
protected final Bits[] liveDocs;
protected final int[] docBases;
protected final int numSegs;
@@ -54,7 +54,7 @@ public class PerThreadPKLookup {
});
termsEnums = new TermsEnum[leaves.size()];
- docsEnums = new DocsEnum[leaves.size()];
+ postingsEnums = new PostingsEnum[leaves.size()];
liveDocs = new Bits[leaves.size()];
docBases = new int[leaves.size()];
int numSegs = 0;
@@ -78,9 +78,9 @@ public class PerThreadPKLookup {
public int lookup(BytesRef id) throws IOException {
for(int seg=0;seg(FS_DIRECTORIES);
CORE_DIRECTORIES.add("RAMDirectory");
- };
+ }
- /** A {@link FilterCachingPolicy} that randomly caches. */
+ /** A {@link org.apache.lucene.search.FilterCachingPolicy} that randomly caches. */
public static final FilterCachingPolicy MAYBE_CACHE_POLICY = new FilterCachingPolicy() {
@Override
@@ -482,7 +465,7 @@ public abstract class LuceneTestCase extends Assert {
}
};
-
+
// -----------------------------------------------------------------
// Fields initialized in class or instance rules.
// -----------------------------------------------------------------
@@ -1305,10 +1288,6 @@ public abstract class LuceneTestCase extends Assert {
String fsdirClass = TEST_DIRECTORY;
if (fsdirClass.equals("random")) {
fsdirClass = RandomPicks.randomFrom(random(), FS_DIRECTORIES);
- if (fsdirClass.equals("SimpleFSDirectory")) {
- // pick again
- fsdirClass = RandomPicks.randomFrom(random(), FS_DIRECTORIES);
- }
}
Class extends FSDirectory> clazz;
@@ -1347,7 +1326,7 @@ public abstract class LuceneTestCase extends Assert {
if (rarely(random) && !bare) {
directory = new NRTCachingDirectory(directory, random.nextDouble(), random.nextDouble());
}
-
+
if (bare) {
BaseDirectoryWrapper base = new BaseDirectoryWrapper(directory);
closeAfterSuite(new CloseableDirectory(base, suiteFailureMarker));
@@ -1468,7 +1447,7 @@ public abstract class LuceneTestCase extends Assert {
/**
* Return a random Locale from the available locales on the system.
- * @see LUCENE-4020
+ * @see "https://issues.apache.org/jira/browse/LUCENE-4020"
*/
public static Locale randomLocale(Random random) {
Locale locales[] = Locale.getAvailableLocales();
@@ -1477,7 +1456,7 @@ public abstract class LuceneTestCase extends Assert {
/**
* Return a random TimeZone from the available timezones on the system
- * @see LUCENE-4020
+ * @see "https://issues.apache.org/jira/browse/LUCENE-4020"
*/
public static TimeZone randomTimeZone(Random random) {
String tzIds[] = TimeZone.getAvailableIDs();
@@ -1514,10 +1493,6 @@ public abstract class LuceneTestCase extends Assert {
if (clazzName.equals("random")) {
if (rarely(random)) {
clazzName = RandomPicks.randomFrom(random, CORE_DIRECTORIES);
- if (clazzName.equals("SimpleFSDirectory")) {
- // pick again
- clazzName = RandomPicks.randomFrom(random, CORE_DIRECTORIES);
- }
} else {
clazzName = "RAMDirectory";
}
@@ -1937,61 +1912,61 @@ public abstract class LuceneTestCase extends Assert {
public void assertTermsEnumEquals(String info, IndexReader leftReader, TermsEnum leftTermsEnum, TermsEnum rightTermsEnum, boolean deep) throws IOException {
BytesRef term;
Bits randomBits = new RandomBits(leftReader.maxDoc(), random().nextDouble(), random());
- DocsAndPositionsEnum leftPositions = null;
- DocsAndPositionsEnum rightPositions = null;
- DocsEnum leftDocs = null;
- DocsEnum rightDocs = null;
+ PostingsEnum leftPositions = null;
+ PostingsEnum rightPositions = null;
+ PostingsEnum leftDocs = null;
+ PostingsEnum rightDocs = null;
while ((term = leftTermsEnum.next()) != null) {
assertEquals(info, term, rightTermsEnum.next());
assertTermStatsEquals(info, leftTermsEnum, rightTermsEnum);
if (deep) {
- assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
- assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
+ assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_ALL));
+ assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_ALL));
assertPositionsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
+ leftPositions = leftTermsEnum.postings(null, leftPositions, PostingsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.postings(null, rightPositions, PostingsEnum.FLAG_ALL));
assertPositionsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(),
- leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
- rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
+ leftPositions = leftTermsEnum.postings(randomBits, leftPositions, PostingsEnum.FLAG_ALL),
+ rightPositions = rightTermsEnum.postings(randomBits, rightPositions, PostingsEnum.FLAG_ALL));
// with freqs:
- assertDocsEnumEquals(info, leftDocs = leftTermsEnum.docs(null, leftDocs),
- rightDocs = rightTermsEnum.docs(null, rightDocs),
+ assertDocsEnumEquals(info, leftDocs = leftTermsEnum.postings(null, leftDocs),
+ rightDocs = rightTermsEnum.postings(null, rightDocs),
true);
- assertDocsEnumEquals(info, leftDocs = leftTermsEnum.docs(randomBits, leftDocs),
- rightDocs = rightTermsEnum.docs(randomBits, rightDocs),
+ assertDocsEnumEquals(info, leftDocs = leftTermsEnum.postings(randomBits, leftDocs),
+ rightDocs = rightTermsEnum.postings(randomBits, rightDocs),
true);
// w/o freqs:
- assertDocsEnumEquals(info, leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
- rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE),
+ assertDocsEnumEquals(info, leftDocs = leftTermsEnum.postings(null, leftDocs, PostingsEnum.FLAG_NONE),
+ rightDocs = rightTermsEnum.postings(null, rightDocs, PostingsEnum.FLAG_NONE),
false);
- assertDocsEnumEquals(info, leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
- rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE),
+ assertDocsEnumEquals(info, leftDocs = leftTermsEnum.postings(randomBits, leftDocs, PostingsEnum.FLAG_NONE),
+ rightDocs = rightTermsEnum.postings(randomBits, rightDocs, PostingsEnum.FLAG_NONE),
false);
// with freqs:
assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(),
- leftDocs = leftTermsEnum.docs(null, leftDocs),
- rightDocs = rightTermsEnum.docs(null, rightDocs),
+ leftDocs = leftTermsEnum.postings(null, leftDocs),
+ rightDocs = rightTermsEnum.postings(null, rightDocs),
true);
assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(),
- leftDocs = leftTermsEnum.docs(randomBits, leftDocs),
- rightDocs = rightTermsEnum.docs(randomBits, rightDocs),
+ leftDocs = leftTermsEnum.postings(randomBits, leftDocs),
+ rightDocs = rightTermsEnum.postings(randomBits, rightDocs),
true);
// w/o freqs:
assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(),
- leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
- rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE),
+ leftDocs = leftTermsEnum.postings(null, leftDocs, PostingsEnum.FLAG_NONE),
+ rightDocs = rightTermsEnum.postings(null, rightDocs, PostingsEnum.FLAG_NONE),
false);
assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(),
- leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
- rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE),
+ leftDocs = leftTermsEnum.postings(randomBits, leftDocs, PostingsEnum.FLAG_NONE),
+ rightDocs = rightTermsEnum.postings(randomBits, rightDocs, PostingsEnum.FLAG_NONE),
false);
}
}
@@ -2002,7 +1977,7 @@ public abstract class LuceneTestCase extends Assert {
/**
* checks docs + freqs + positions + payloads, sequentially
*/
- public void assertDocsAndPositionsEnumEquals(String info, DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws IOException {
+ public void assertDocsAndPositionsEnumEquals(String info, PostingsEnum leftDocs, PostingsEnum rightDocs) throws IOException {
if (leftDocs == null || rightDocs == null) {
assertNull(leftDocs);
assertNull(rightDocs);
@@ -2028,7 +2003,7 @@ public abstract class LuceneTestCase extends Assert {
/**
* checks docs + freqs, sequentially
*/
- public void assertDocsEnumEquals(String info, DocsEnum leftDocs, DocsEnum rightDocs, boolean hasFreqs) throws IOException {
+ public void assertDocsEnumEquals(String info, PostingsEnum leftDocs, PostingsEnum rightDocs, boolean hasFreqs) throws IOException {
if (leftDocs == null) {
assertNull(rightDocs);
return;
@@ -2048,7 +2023,7 @@ public abstract class LuceneTestCase extends Assert {
/**
* checks advancing docs
*/
- public void assertDocsSkippingEquals(String info, IndexReader leftReader, int docFreq, DocsEnum leftDocs, DocsEnum rightDocs, boolean hasFreqs) throws IOException {
+ public void assertDocsSkippingEquals(String info, IndexReader leftReader, int docFreq, PostingsEnum leftDocs, PostingsEnum rightDocs, boolean hasFreqs) throws IOException {
if (leftDocs == null) {
assertNull(rightDocs);
return;
@@ -2081,7 +2056,7 @@ public abstract class LuceneTestCase extends Assert {
/**
* checks advancing docs + positions
*/
- public void assertPositionsSkippingEquals(String info, IndexReader leftReader, int docFreq, DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws IOException {
+ public void assertPositionsSkippingEquals(String info, IndexReader leftReader, int docFreq, PostingsEnum leftDocs, PostingsEnum rightDocs) throws IOException {
if (leftDocs == null || rightDocs == null) {
assertNull(leftDocs);
assertNull(rightDocs);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
index aa1f18a..015209e 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/util/TestUtil.java
@@ -46,6 +46,8 @@ import java.util.regex.PatternSyntaxException;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.DocValuesFormat;
import org.apache.lucene.codecs.PostingsFormat;
@@ -71,8 +73,7 @@ import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.ConcurrentMergeScheduler;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.DocValuesType;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FilterLeafReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
@@ -92,8 +93,8 @@ import org.apache.lucene.index.TieredMergePolicy;
import org.apache.lucene.mockfile.FilterFileSystem;
import org.apache.lucene.mockfile.WindowsFS;
import org.apache.lucene.search.FieldDoc;
-import org.apache.lucene.search.FilteredQuery.FilterStrategy;
import org.apache.lucene.search.FilteredQuery;
+import org.apache.lucene.search.FilteredQuery.FilterStrategy;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
@@ -102,8 +103,6 @@ import org.apache.lucene.store.FilterDirectory;
import org.apache.lucene.store.NoLockFactory;
import org.junit.Assert;
-import com.carrotsearch.randomizedtesting.generators.RandomInts;
-import com.carrotsearch.randomizedtesting.generators.RandomPicks;
/**
* General utility methods for Lucene unit tests.
@@ -997,7 +996,7 @@ public final class TestUtil {
// Returns a DocsEnum, but randomly sometimes uses a
// DocsAndFreqsEnum, DocsAndPositionsEnum. Returns null
// if field/term doesn't exist:
- public static DocsEnum docs(Random random, IndexReader r, String field, BytesRef term, Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public static PostingsEnum docs(Random random, IndexReader r, String field, BytesRef term, Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
final Terms terms = MultiFields.getTerms(r, field);
if (terms == null) {
return null;
@@ -1011,25 +1010,24 @@ public final class TestUtil {
// Returns a DocsEnum from a positioned TermsEnum, but
// randomly sometimes uses a DocsAndFreqsEnum, DocsAndPositionsEnum.
- public static DocsEnum docs(Random random, TermsEnum termsEnum, Bits liveDocs, DocsEnum reuse, int flags) throws IOException {
+ public static PostingsEnum docs(Random random, TermsEnum termsEnum, Bits liveDocs, PostingsEnum reuse, int flags) throws IOException {
if (random.nextBoolean()) {
if (random.nextBoolean()) {
final int posFlags;
switch (random.nextInt(4)) {
- case 0: posFlags = 0; break;
- case 1: posFlags = DocsAndPositionsEnum.FLAG_OFFSETS; break;
- case 2: posFlags = DocsAndPositionsEnum.FLAG_PAYLOADS; break;
- default: posFlags = DocsAndPositionsEnum.FLAG_OFFSETS | DocsAndPositionsEnum.FLAG_PAYLOADS; break;
+ case 0: posFlags = PostingsEnum.FLAG_POSITIONS; break;
+ case 1: posFlags = PostingsEnum.FLAG_OFFSETS; break;
+ case 2: posFlags = PostingsEnum.FLAG_PAYLOADS; break;
+ default: posFlags = PostingsEnum.FLAG_OFFSETS | PostingsEnum.FLAG_PAYLOADS; break;
}
- // TODO: cast to DocsAndPositionsEnum?
- DocsAndPositionsEnum docsAndPositions = termsEnum.docsAndPositions(liveDocs, null, posFlags);
+ PostingsEnum docsAndPositions = termsEnum.postings(liveDocs, null, posFlags);
if (docsAndPositions != null) {
return docsAndPositions;
}
}
- flags |= DocsEnum.FLAG_FREQS;
+ flags |= PostingsEnum.FLAG_FREQS;
}
- return termsEnum.docs(liveDocs, reuse, flags);
+ return termsEnum.postings(liveDocs, reuse, flags);
}
public static CharSequence stringToCharSequence(String string, Random random) {
diff --git a/solr/build.xml b/solr/build.xml
index 8379f71..b97175a 100644
--- a/solr/build.xml
+++ b/solr/build.xml
@@ -378,7 +378,7 @@
-
+
@@ -579,7 +579,7 @@
-
+
diff --git a/solr/common-build.xml b/solr/common-build.xml
index 41dd7f5..7de0b25 100644
--- a/solr/common-build.xml
+++ b/solr/common-build.xml
@@ -41,10 +41,10 @@
-
+
-
+
diff --git a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java b/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
index a4cdc48..9cb3ac0 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
@@ -385,18 +385,18 @@ public class LukeRequestHandler extends RequestHandlerBase
// Is there a better way to do this? Shouldn't actually be very costly
// to do it this way.
private static StoredDocument getFirstLiveDoc(Terms terms, LeafReader reader) throws IOException {
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
TermsEnum termsEnum = terms.iterator(null);
BytesRef text;
// Deal with the chance that the first bunch of terms are in deleted documents. Is there a better way?
- for (int idx = 0; idx < 1000 && docsEnum == null; ++idx) {
+ for (int idx = 0; idx < 1000 && postingsEnum == null; ++idx) {
text = termsEnum.next();
if (text == null) { // Ran off the end of the terms enum without finding any live docs with that field in them.
return null;
}
- docsEnum = termsEnum.docs(reader.getLiveDocs(), docsEnum, DocsEnum.FLAG_NONE);
- if (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
- return reader.document(docsEnum.docID());
+ postingsEnum = termsEnum.postings(reader.getLiveDocs(), postingsEnum, PostingsEnum.FLAG_NONE);
+ if (postingsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+ return reader.document(postingsEnum.docID());
}
}
return null;
diff --git a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
index 1a60660..89b9db7 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/ExpandComponent.java
@@ -21,21 +21,21 @@ import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.HashMap;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
-import java.util.Iterator;
-import java.util.Arrays;
import com.carrotsearch.hppc.IntObjectOpenHashMap;
-import com.carrotsearch.hppc.LongOpenHashSet;
+import com.carrotsearch.hppc.IntOpenHashSet;
+import com.carrotsearch.hppc.LongObjectMap;
import com.carrotsearch.hppc.LongObjectOpenHashMap;
+import com.carrotsearch.hppc.LongOpenHashSet;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import com.carrotsearch.hppc.cursors.LongCursor;
import com.carrotsearch.hppc.cursors.LongObjectCursor;
-import com.carrotsearch.hppc.IntOpenHashSet;
import com.carrotsearch.hppc.cursors.ObjectCursor;
-import com.carrotsearch.hppc.LongObjectMap;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.FieldInfo;
@@ -73,11 +73,11 @@ import org.apache.solr.core.PluginInfo;
import org.apache.solr.core.SolrCore;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.schema.FieldType;
+import org.apache.solr.schema.StrField;
+import org.apache.solr.schema.TrieDoubleField;
import org.apache.solr.schema.TrieFloatField;
import org.apache.solr.schema.TrieIntField;
import org.apache.solr.schema.TrieLongField;
-import org.apache.solr.schema.TrieDoubleField;
-import org.apache.solr.schema.StrField;
import org.apache.solr.search.CollapsingQParserPlugin;
import org.apache.solr.search.DocIterator;
import org.apache.solr.search.DocList;
diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
index a3520ea..95b4f02 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/QueryComponent.java
@@ -17,6 +17,21 @@
package org.apache.solr.handler.component;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.net.URL;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
@@ -94,20 +109,6 @@ import org.apache.solr.util.SolrPluginUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.net.URL;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
/**
* TODO!
@@ -1349,6 +1350,36 @@ public class QueryComponent extends SearchComponent
}
@Override
+ public int nextPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endPosition() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int startOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int endOffset() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public BytesRef getPayload() throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
public int nextDoc() throws IOException {
throw new UnsupportedOperationException();
}
diff --git a/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java b/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
index 3c5b24f..9bbb5a4 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
@@ -24,7 +24,7 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
-import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
@@ -563,7 +563,7 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore
Listleaves = indexSearcher.getTopReaderContext().leaves();
TermsEnum termsEnum = null;
- DocsEnum docsEnum = null;
+ PostingsEnum postingsEnum = null;
for(LeafReaderContext leaf : leaves) {
LeafReader reader = leaf.reader();
int docBase = leaf.docBase;
@@ -574,9 +574,9 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore
while(it.hasNext()) {
BytesRef ref = it.next();
if(termsEnum.seekExact(ref)) {
- docsEnum = termsEnum.docs(liveDocs, docsEnum);
- int doc = docsEnum.nextDoc();
- if(doc != DocsEnum.NO_MORE_DOCS) {
+ postingsEnum = termsEnum.postings(liveDocs, postingsEnum);
+ int doc = postingsEnum.nextDoc();
+ if(doc != PostingsEnum.NO_MORE_DOCS) {
//Found the document.
int p = boosted.get(ref);
boostDocs.put(doc+docBase, p);
@@ -637,7 +637,7 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore
private int bottomVal;
private int topVal;
private TermsEnum termsEnum;
- private DocsEnum docsEnum;
+ private PostingsEnum postingsEnum;
Set seen = new HashSet<>(elevations.ids.size());
@Override
@@ -692,13 +692,13 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore
for (String id : elevations.ids) {
term.copyChars(id);
if (seen.contains(id) == false && termsEnum.seekExact(term.get())) {
- docsEnum = termsEnum.docs(liveDocs, docsEnum, DocsEnum.FLAG_NONE);
- if (docsEnum != null) {
- int docId = docsEnum.nextDoc();
+ postingsEnum = termsEnum.postings(liveDocs, postingsEnum, PostingsEnum.FLAG_NONE);
+ if (postingsEnum != null) {
+ int docId = postingsEnum.nextDoc();
if (docId == DocIdSetIterator.NO_MORE_DOCS ) continue; // must have been deleted
termValues[ordSet.put(docId)] = term.toBytesRef();
seen.add(id);
- assert docsEnum.nextDoc() == DocIdSetIterator.NO_MORE_DOCS;
+ assert postingsEnum.nextDoc() == DocIdSetIterator.NO_MORE_DOCS;
}
}
}
diff --git a/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java b/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
index e807daa..2df05d2 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
@@ -1,19 +1,17 @@
package org.apache.solr.handler.component;
import java.io.IOException;
-import java.util.Arrays;
import java.util.ArrayList;
-import java.util.Collection;
+import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
-import java.util.LinkedHashSet;
-import java.util.Set;
import java.util.Iterator;
+import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
-import java.util.Map.Entry;
+import java.util.Set;
-import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.Fields;
import org.apache.lucene.index.IndexReader;
@@ -24,17 +22,15 @@ import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.BytesRef;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.params.CommonParams;
-import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.params.TermVectorParams;
import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.StrUtils;
import org.apache.solr.core.SolrCore;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.schema.SchemaField;
-import org.apache.solr.search.ReturnFields;
import org.apache.solr.search.DocList;
import org.apache.solr.search.DocListAndSet;
+import org.apache.solr.search.ReturnFields;
import org.apache.solr.search.SolrIndexSearcher;
import org.apache.solr.search.SolrReturnFields;
import org.apache.solr.util.SolrPluginUtils;
@@ -335,7 +331,7 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar
docNL.add(field, fieldNL);
BytesRef text;
- DocsAndPositionsEnum dpEnum = null;
+ PostingsEnum dpEnum = null;
while((text = termsEnum.next()) != null) {
String term = text.utf8ToString();
NamedList