Index: lucene/CHANGES.txt =================================================================== --- lucene/CHANGES.txt (revision 931597) +++ lucene/CHANGES.txt (working copy) @@ -178,8 +178,14 @@ DocsEnum, DocsAndPositionsEnum). One big difference is that field and terms are now enumerated separately: a TermsEnum provides a BytesRef (wraps a byte[]) per term within a single field, not a - Term. + Term. Another is that when asking for a Docs/AndPositionsEnum, you + now specify the skipDocs explicitly (typically this will be the + deleted docs, but in general you can provide any Bits). +* LUCENE-1458, LUCENE-2111: IndexReader now directly exposes its + deleted docs (getDeletedDocs), providing a new Bits interface to + directly query by doc ID. + Bug fixes * LUCENE-2119: Don't throw NegativeArraySizeException if you pass Index: lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java (revision 931509) +++ lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java (working copy) @@ -66,9 +66,7 @@ Query q = new TermQuery(new Term("body","body")); // test id, bounded on both ends - FieldCacheRangeFilter fcrf; - result = search.search(q,fcrf = FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,T), numDocs).scoreDocs; - assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q, FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,T), numDocs).scoreDocs; assertEquals("find all", numDocs, result.length); result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,F), numDocs).scoreDocs; @@ -213,9 +211,7 @@ Query q = new TermQuery(new Term("body","body")); // test id, bounded on both ends - FieldCacheRangeFilter fcrf; - result = search.search(q,fcrf=FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs; - assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs; assertEquals("find all", numDocs, result.length); result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs; @@ -305,9 +301,7 @@ // test id, bounded on both ends - FieldCacheRangeFilter fcrf; - result = search.search(q,fcrf=FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs; - assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs; assertEquals("find all", numDocs, result.length); result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs; @@ -397,9 +391,7 @@ // test id, bounded on both ends - FieldCacheRangeFilter fcrf; - result = search.search(q,fcrf=FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs; - assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs; assertEquals("find all", numDocs, result.length); result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs; @@ -529,7 +521,7 @@ assertEquals("infinity special case", 0, result.length); } - // test using a sparse index (with deleted docs). The DocIdSet should be not cacheable, as it uses TermDocs if the range contains 0 + // test using a sparse index (with deleted docs). public void testSparseIndex() throws IOException { RAMDirectory dir = new RAMDirectory(); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new SimpleAnalyzer(TEST_VERSION_CURRENT))); @@ -550,27 +542,21 @@ assertTrue(reader.hasDeletions()); ScoreDoc[] result; - FieldCacheRangeFilter fcrf; Query q = new TermQuery(new Term("body","body")); - result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 20),T,T), 100).scoreDocs; - assertFalse("DocIdSet must be not cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 20),T,T), 100).scoreDocs; assertEquals("find all", 40, result.length); - result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 0),Byte.valueOf((byte) 20),T,T), 100).scoreDocs; - assertFalse("DocIdSet must be not cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 0),Byte.valueOf((byte) 20),T,T), 100).scoreDocs; assertEquals("find all", 20, result.length); - result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 0),T,T), 100).scoreDocs; - assertFalse("DocIdSet must be not cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 0),T,T), 100).scoreDocs; assertEquals("find all", 20, result.length); - result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 10),Byte.valueOf((byte) 20),T,T), 100).scoreDocs; - assertTrue("DocIdSet must be cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 10),Byte.valueOf((byte) 20),T,T), 100).scoreDocs; assertEquals("find all", 11, result.length); - result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) -10),T,T), 100).scoreDocs; - assertTrue("DocIdSet must be cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) -10),T,T), 100).scoreDocs; assertEquals("find all", 11, result.length); } Index: lucene/src/test/org/apache/lucene/index/TestIndexReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexReader.java (revision 931509) +++ lucene/src/test/org/apache/lucene/index/TestIndexReader.java (working copy) @@ -1570,7 +1570,6 @@ // LUCENE-1579: Ensure that on a cloned reader, segments // reuse the doc values arrays in FieldCache public void testFieldCacheReuseAfterClone() throws Exception { - //Codec.DEBUG = true; Directory dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT))); Document doc = new Document(); Index: lucene/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java =================================================================== --- lucene/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java (revision 931509) +++ lucene/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java (working copy) @@ -518,68 +518,94 @@ abstract boolean matchDoc(int doc) throws ArrayIndexOutOfBoundsException; /** - * this DocIdSet is cacheable, if it can ignore deletions + * this DocIdSet is always cacheable (does not go back + * to the reader for iteration) */ @Override public boolean isCacheable() { - return canIgnoreDeletedDocs || !reader.hasDeletions(); + return true; } @Override public DocIdSetIterator iterator() throws IOException { - // Synchronization needed because deleted docs BitVector - // can change after call to hasDeletions until TermDocs creation. - // We only use an iterator with termDocs, when this was requested (e.g. - // range contains 0) - // and the index has deletions - final Bits skipDocs; - synchronized (reader) { - if (isCacheable()) { - skipDocs = null; - } else { - skipDocs = MultiFields.getDeletedDocs(reader); - } - } - final int maxDoc = reader.maxDoc(); + final Bits skipDocs = canIgnoreDeletedDocs ? null : MultiFields.getDeletedDocs(reader); - // a DocIdSetIterator generating docIds by - // incrementing a variable & checking skipDocs - - return new DocIdSetIterator() { - private int doc = -1; - @Override - public int docID() { - return doc; - } - - @Override - public int nextDoc() { - try { - do { - doc++; - } while ((skipDocs != null && doc < maxDoc && skipDocs.get(doc)) - || !matchDoc(doc)); + if (skipDocs == null) { + // Specialization optimization disregard deletions + return new DocIdSetIterator() { + private int doc = -1; + @Override + public int docID() { return doc; - } catch (ArrayIndexOutOfBoundsException e) { - return doc = NO_MORE_DOCS; } - } - @Override - public int advance(int target) { - try { - doc = target; - while (!matchDoc(doc)) { - doc++; + @Override + public int nextDoc() { + try { + do { + doc++; + } while (!matchDoc(doc)); + return doc; + } catch (ArrayIndexOutOfBoundsException e) { + return doc = NO_MORE_DOCS; } + } + + @Override + public int advance(int target) { + try { + doc = target; + while (!matchDoc(doc)) { + doc++; + } + return doc; + } catch (ArrayIndexOutOfBoundsException e) { + return doc = NO_MORE_DOCS; + } + } + }; + } else { + // Must consult deletions + + final int maxDoc = reader.maxDoc(); + + // a DocIdSetIterator generating docIds by + // incrementing a variable & checking skipDocs - + return new DocIdSetIterator() { + private int doc = -1; + @Override + public int docID() { return doc; - } catch (ArrayIndexOutOfBoundsException e) { - return doc = NO_MORE_DOCS; } + + @Override + public int nextDoc() { + try { + do { + doc++; + } while ((doc < maxDoc && skipDocs.get(doc)) || !matchDoc(doc)); + return doc; + } catch (ArrayIndexOutOfBoundsException e) { + return doc = NO_MORE_DOCS; + } + } + + @Override + public int advance(int target) { + try { + doc = target; + while ((doc < maxDoc && skipDocs.get(doc)) || !matchDoc(doc)) { + doc++; + } + return doc; + } catch (ArrayIndexOutOfBoundsException e) { + return doc = NO_MORE_DOCS; + } - } - }; + } + }; + } } } - } Index: lucene/src/java/org/apache/lucene/search/TermScorer.java =================================================================== --- lucene/src/java/org/apache/lucene/search/TermScorer.java (revision 931509) +++ lucene/src/java/org/apache/lucene/search/TermScorer.java (working copy) @@ -160,7 +160,7 @@ // not found in readahead cache, seek underlying stream int newDoc = docsEnum.advance(target); //System.out.println("ts.advance docsEnum=" + docsEnum); - if (newDoc != DocsEnum.NO_MORE_DOCS) { + if (newDoc != NO_MORE_DOCS) { doc = newDoc; freq = docsEnum.freq(); } else { Index: lucene/src/java/org/apache/lucene/index/CheckIndex.java =================================================================== --- lucene/src/java/org/apache/lucene/index/CheckIndex.java (revision 931509) +++ lucene/src/java/org/apache/lucene/index/CheckIndex.java (working copy) @@ -282,7 +282,7 @@ return checkIndex(null); } - protected Status checkIndex(List onlySegments) throws IOException { + public Status checkIndex(List onlySegments) throws IOException { return checkIndex(onlySegments, CodecProvider.getDefault()); } @@ -298,7 +298,7 @@ *

WARNING: make sure * you only call this when the index is not opened by any * writer. */ - protected Status checkIndex(List onlySegments, CodecProvider codecs) throws IOException { + public Status checkIndex(List onlySegments, CodecProvider codecs) throws IOException { NumberFormat nf = NumberFormat.getInstance(); SegmentInfos sis = new SegmentInfos(); Status result = new Status(); Index: lucene/src/java/org/apache/lucene/index/SegmentMerger.java =================================================================== --- lucene/src/java/org/apache/lucene/index/SegmentMerger.java (revision 931509) +++ lucene/src/java/org/apache/lucene/index/SegmentMerger.java (working copy) @@ -572,11 +572,14 @@ docBase = new ReaderUtil.Gather(readers.get(i)) { @Override protected void add(int base, IndexReader r) throws IOException { - subReaders.add(r); - fields.add(r.fields()); - slices.add(new ReaderUtil.Slice(base, r.maxDoc(), fields.size()-1)); - bits.add(r.getDeletedDocs()); - bitsStarts.add(base); + final Fields f = r.fields(); + if (f != null) { + subReaders.add(r); + fields.add(f); + slices.add(new ReaderUtil.Slice(base, r.maxDoc(), fields.size()-1)); + bits.add(r.getDeletedDocs()); + bitsStarts.add(base); + } } }.run(docBase); } Index: lucene/src/java/org/apache/lucene/index/DocumentsWriter.java =================================================================== --- lucene/src/java/org/apache/lucene/index/DocumentsWriter.java (revision 931509) +++ lucene/src/java/org/apache/lucene/index/DocumentsWriter.java (working copy) @@ -1050,58 +1050,60 @@ // Delete by term if (deletesFlushed.terms.size() > 0) { - try { - Fields fields = reader.fields(); - TermsEnum termsEnum = null; + Fields fields = reader.fields(); + if (fields == null) { + // This reader has no postings + return false; + } + + TermsEnum termsEnum = null; - String currentField = null; - BytesRef termRef = new BytesRef(); - DocsEnum docs = null; + String currentField = null; + BytesRef termRef = new BytesRef(); + DocsEnum docs = null; - for (Entry entry: deletesFlushed.terms.entrySet()) { - Term term = entry.getKey(); - // Since we visit terms sorted, we gain performance - // by re-using the same TermsEnum and seeking only - // forwards - if (term.field() != currentField) { - assert currentField == null || currentField.compareTo(term.field()) < 0; - currentField = term.field(); - Terms terms = fields.terms(currentField); - if (terms != null) { - termsEnum = terms.iterator(); - } else { - termsEnum = null; - } + for (Entry entry: deletesFlushed.terms.entrySet()) { + Term term = entry.getKey(); + // Since we visit terms sorted, we gain performance + // by re-using the same TermsEnum and seeking only + // forwards + if (term.field() != currentField) { + assert currentField == null || currentField.compareTo(term.field()) < 0; + currentField = term.field(); + Terms terms = fields.terms(currentField); + if (terms != null) { + termsEnum = terms.iterator(); + } else { + termsEnum = null; } + } - if (termsEnum == null) { - continue; - } - assert checkDeleteTerm(term); + if (termsEnum == null) { + continue; + } + assert checkDeleteTerm(term); - termRef.copy(term.text()); + termRef.copy(term.text()); - if (termsEnum.seek(termRef, false) == TermsEnum.SeekStatus.FOUND) { - DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs); + if (termsEnum.seek(termRef, false) == TermsEnum.SeekStatus.FOUND) { + DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs); - if (docsEnum != null) { - docs = docsEnum; - int limit = entry.getValue().getNum(); - while (true) { - final int docID = docs.nextDoc(); - if (docID == DocsEnum.NO_MORE_DOCS || docIDStart+docID >= limit) { - break; - } - reader.deleteDocument(docID); - any = true; + if (docsEnum != null) { + docs = docsEnum; + int limit = entry.getValue().getNum(); + while (true) { + final int docID = docs.nextDoc(); + if (docID == DocsEnum.NO_MORE_DOCS || docIDStart+docID >= limit) { + break; } + reader.deleteDocument(docID); + any = true; } } } - } finally { - //docs.close(); } } + // Delete by docID for (Integer docIdInt : deletesFlushed.docIDs) { int docID = docIdInt.intValue(); Index: lucene/src/java/org/apache/lucene/index/SegmentWriteState.java =================================================================== --- lucene/src/java/org/apache/lucene/index/SegmentWriteState.java (revision 931509) +++ lucene/src/java/org/apache/lucene/index/SegmentWriteState.java (working copy) @@ -26,9 +26,6 @@ import org.apache.lucene.index.codecs.CodecProvider; /** - * This class is not meant for public usage; it's only - * public in order to expose access across packages. It's - * used internally when updating the index. * @lucene.experimental */ public class SegmentWriteState { Index: lucene/src/java/org/apache/lucene/index/codecs/sep/SingleIntIndexInput.java =================================================================== --- lucene/src/java/org/apache/lucene/index/codecs/sep/SingleIntIndexInput.java (revision 931509) +++ lucene/src/java/org/apache/lucene/index/codecs/sep/SingleIntIndexInput.java (working copy) @@ -66,17 +66,13 @@ class Index extends IntIndexInput.Index { private long fp; - // nocmmit: only for asserts - boolean first = true; @Override public void read(IndexInput indexIn, boolean absolute) throws IOException { if (absolute) { fp = indexIn.readVLong(); - first = false; } else { - assert !first; fp += indexIn.readVLong(); } } @@ -84,7 +80,6 @@ @Override public void set(IntIndexInput.Index other) { fp = ((Index) other).fp; - first = false; } @Override @@ -100,7 +95,6 @@ @Override public Object clone() { Index other = new Index(); - other.first = first; other.fp = fp; return other; } Index: lucene/src/java/org/apache/lucene/index/MultiFields.java =================================================================== --- lucene/src/java/org/apache/lucene/index/MultiFields.java (revision 931509) +++ lucene/src/java/org/apache/lucene/index/MultiFields.java (working copy) @@ -77,8 +77,11 @@ new ReaderUtil.Gather(r) { @Override protected void add(int base, IndexReader r) throws IOException { - fields.add(r.fields()); - slices.add(new ReaderUtil.Slice(base, r.maxDoc(), fields.size()-1)); + final Fields f = r.fields(); + if (f != null) { + fields.add(f); + slices.add(new ReaderUtil.Slice(base, r.maxDoc(), fields.size()-1)); + } } }.run(); Index: lucene/src/java/org/apache/lucene/util/UnicodeUtil.java =================================================================== --- lucene/src/java/org/apache/lucene/util/UnicodeUtil.java (revision 931509) +++ lucene/src/java/org/apache/lucene/util/UnicodeUtil.java (working copy) @@ -79,12 +79,6 @@ public int[] offsets = new int[10]; public int length; - /* - public String toString() { - return new String(result, 0, length); - } - */ - public void setLength(int newLength) { if (result.length < newLength) result = ArrayUtil.grow(result, newLength); Index: lucene/backwards/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java =================================================================== --- lucene/backwards/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java (revision 931509) +++ lucene/backwards/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java (working copy) @@ -66,9 +66,7 @@ Query q = new TermQuery(new Term("body","body")); // test id, bounded on both ends - FieldCacheRangeFilter fcrf; - result = search.search(q,fcrf = FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,T), numDocs).scoreDocs; - assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q, FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,T), numDocs).scoreDocs; assertEquals("find all", numDocs, result.length); result = search.search(q,FieldCacheRangeFilter.newStringRange("id",minIP,maxIP,T,F), numDocs).scoreDocs; @@ -213,9 +211,7 @@ Query q = new TermQuery(new Term("body","body")); // test id, bounded on both ends - FieldCacheRangeFilter fcrf; - result = search.search(q,fcrf=FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs; - assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs; assertEquals("find all", numDocs, result.length); result = search.search(q,FieldCacheRangeFilter.newShortRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs; @@ -305,9 +301,7 @@ // test id, bounded on both ends - FieldCacheRangeFilter fcrf; - result = search.search(q,fcrf=FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs; - assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs; assertEquals("find all", numDocs, result.length); result = search.search(q,FieldCacheRangeFilter.newIntRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs; @@ -397,9 +391,7 @@ // test id, bounded on both ends - FieldCacheRangeFilter fcrf; - result = search.search(q,fcrf=FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs; - assertTrue(fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,T), numDocs).scoreDocs; assertEquals("find all", numDocs, result.length); result = search.search(q,FieldCacheRangeFilter.newLongRange("id",minIdO,maxIdO,T,F), numDocs).scoreDocs; @@ -529,7 +521,7 @@ assertEquals("infinity special case", 0, result.length); } - // test using a sparse index (with deleted docs). The DocIdSet should be not cacheable, as it uses TermDocs if the range contains 0 + // test using a sparse index (with deleted docs). public void testSparseIndex() throws IOException { RAMDirectory dir = new RAMDirectory(); IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), T, IndexWriter.MaxFieldLength.LIMITED); @@ -550,27 +542,21 @@ assertTrue(reader.hasDeletions()); ScoreDoc[] result; - FieldCacheRangeFilter fcrf; Query q = new TermQuery(new Term("body","body")); - result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 20),T,T), 100).scoreDocs; - assertFalse("DocIdSet must be not cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 20),T,T), 100).scoreDocs; assertEquals("find all", 40, result.length); - result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 0),Byte.valueOf((byte) 20),T,T), 100).scoreDocs; - assertFalse("DocIdSet must be not cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 0),Byte.valueOf((byte) 20),T,T), 100).scoreDocs; assertEquals("find all", 20, result.length); - result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 0),T,T), 100).scoreDocs; - assertFalse("DocIdSet must be not cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) 0),T,T), 100).scoreDocs; assertEquals("find all", 20, result.length); - result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 10),Byte.valueOf((byte) 20),T,T), 100).scoreDocs; - assertTrue("DocIdSet must be cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) 10),Byte.valueOf((byte) 20),T,T), 100).scoreDocs; assertEquals("find all", 11, result.length); - result = search.search(q,fcrf=FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) -10),T,T), 100).scoreDocs; - assertTrue("DocIdSet must be cacheable", fcrf.getDocIdSet(reader.getSequentialSubReaders()[0]).isCacheable()); + result = search.search(q,FieldCacheRangeFilter.newByteRange("id",Byte.valueOf((byte) -20),Byte.valueOf((byte) -10),T,T), 100).scoreDocs; assertEquals("find all", 11, result.length); }