Index: lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java (working copy) @@ -20,12 +20,15 @@ import java.io.BufferedReader; import java.io.InputStream; import java.io.InputStreamReader; +import java.util.Random; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; @@ -55,6 +58,13 @@ public class TestFuzzyQuery2 extends LuceneTestCase { /** epsilon for score comparisons */ static final float epsilon = 0.00001f; + private Random random; + + @Override + public void setUp() throws Exception { + super.setUp(); + random = newRandom(); + } public void testFromTestData() throws Exception { // TODO: randomize! @@ -78,8 +88,8 @@ int terms = (int) Math.pow(2, bits); RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new MockAnalyzer(MockTokenizer.KEYWORD, false), - IndexWriter.MaxFieldLength.UNLIMITED); + RandomIndexWriter writer = new RandomIndexWriter(random, dir, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.KEYWORD, false))); Document doc = new Document(); Field field = new Field("field", "", Field.Store.NO, Field.Index.ANALYZED); @@ -88,12 +98,11 @@ for (int i = 0; i < terms; i++) { field.setValue(mapInt(codePointTable, i)); writer.addDocument(doc); - } + } - writer.optimize(); - writer.close(); - - IndexSearcher searcher = new IndexSearcher(dir); + IndexReader r = writer.getReader(); + IndexSearcher searcher = new IndexSearcher(r); + writer.close(); String line; while ((line = reader.readLine()) != null) { String params[] = line.split(","); @@ -113,6 +122,7 @@ } } searcher.close(); + r.close(); dir.close(); } Index: lucene/src/test/org/apache/lucene/search/TestNot.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestNot.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestNot.java (working copy) @@ -19,8 +19,9 @@ import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.analysis.MockAnalyzer; @@ -39,21 +40,24 @@ public void testNot() throws Exception { RAMDirectory store = new RAMDirectory(); - IndexWriter writer = new IndexWriter(store, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), store, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); Document d1 = new Document(); d1.add(new Field("field", "a b", Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(d1); - writer.optimize(); - writer.close(); + IndexReader reader = writer.getReader(); - Searcher searcher = new IndexSearcher(store, true); + Searcher searcher = new IndexSearcher(reader); QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer()); Query query = parser.parse("a NOT b"); //System.out.println(query); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(0, hits.length); + writer.close(); + searcher.close(); + reader.close(); + store.close(); } } Index: lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java (working copy) @@ -24,8 +24,8 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.TimeLimitingCollector.TimeExceededException; import org.apache.lucene.store.Directory; @@ -51,6 +51,9 @@ private static final int N_THREADS = 50; private Searcher searcher; + private Directory directory; + private IndexReader reader; + private final String FIELD_NAME = "body"; private Query query; @@ -74,14 +77,16 @@ "blueberry strudel", "blueberry pizza", }; - Directory directory = new RAMDirectory(); - IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + directory = new RAMDirectory(); + RandomIndexWriter iw = new RandomIndexWriter(newRandom(), directory, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); for (int i=0; i> docs = new ArrayList>(); Document d = new Document(); Field f = new Field("f", "", Field.Store.NO, Field.Index.ANALYZED); d.add(f); - Random r = newRandom(); + Random r = random; int NUM_DOCS = 10*_TestUtil.getRandomMultiplier(); for(int i=0;i 0); - boolean shouldBePosVector = (hits[i].doc % 2 == 0) ? true : false; - assertTrue((shouldBePosVector == false) || (shouldBePosVector == true && (vector[0] instanceof TermPositionVector == true))); - - boolean shouldBeOffVector = (hits[i].doc % 3 == 0) ? true : false; - assertTrue((shouldBeOffVector == false) || (shouldBeOffVector == true && (vector[0] instanceof TermPositionVector == true))); - - if(shouldBePosVector || shouldBeOffVector){ - TermPositionVector posVec = (TermPositionVector)vector[0]; - BytesRef [] terms = posVec.getTerms(); - assertTrue(terms != null && terms.length > 0); + for (int j = 0; j < terms.length; j++) { + int[] positions = posVec.getTermPositions(j); + TermVectorOffsetInfo[] offsets = posVec.getOffsets(j); - for (int j = 0; j < terms.length; j++) { - int [] positions = posVec.getTermPositions(j); - TermVectorOffsetInfo [] offsets = posVec.getOffsets(j); - - if(shouldBePosVector){ - assertTrue(positions != null); - assertTrue(positions.length > 0); - } - else - assertTrue(positions == null); - - if(shouldBeOffVector){ - assertTrue(offsets != null); - assertTrue(offsets.length > 0); - } - else - assertTrue(offsets == null); - } - } - else{ - try{ - assertTrue(false); - } - catch(ClassCastException ignore){ - TermFreqVector freqVec = vector[0]; - BytesRef [] terms = freqVec.getTerms(); - assertTrue(terms != null && terms.length > 0); - } + if (shouldBePosVector) { + assertTrue(positions != null); + assertTrue(positions.length > 0); + } else assertTrue(positions == null); + if (shouldBeOffVector) { + assertTrue(offsets != null); + assertTrue(offsets.length > 0); + } else assertTrue(offsets == null); } - + } else { + try { + assertTrue(false); + } catch (ClassCastException ignore) { + TermFreqVector freqVec = vector[0]; + BytesRef[] terms = freqVec.getTerms(); + assertTrue(terms != null && terms.length > 0); + } + } - } catch (IOException e) { - assertTrue(false); } } @@ -216,7 +218,7 @@ } } - public void testKnownSetOfDocuments() { + public void testKnownSetOfDocuments() throws IOException { String test1 = "eating chocolate in a computer lab"; //6 terms String test2 = "computer in a computer lab"; //5 terms String test3 = "a chocolate lab grows old"; //5 terms @@ -242,112 +244,109 @@ setupDoc(testDoc3, test3); Document testDoc4 = new Document(); setupDoc(testDoc4, test4); - + Directory dir = new MockRAMDirectory(); - try { - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( - TEST_VERSION_CURRENT, - new MockAnalyzer(MockTokenizer.SIMPLE, true)) - .setOpenMode(OpenMode.CREATE)); - writer.addDocument(testDoc1); - writer.addDocument(testDoc2); - writer.addDocument(testDoc3); - writer.addDocument(testDoc4); - writer.close(); - IndexSearcher knownSearcher = new IndexSearcher(dir, true); - FieldsEnum fields = MultiFields.getFields(knownSearcher.reader).iterator(); - - DocsEnum docs = null; - while(fields.next() != null) { - TermsEnum terms = fields.terms(); - while(terms.next() != null) { - String text = terms.term().utf8ToString(); - docs = terms.docs(MultiFields.getDeletedDocs(knownSearcher.reader), docs); - - while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) { - int docId = docs.docID(); - int freq = docs.freq(); - //System.out.println("Doc Id: " + docId + " freq " + freq); - TermFreqVector vector = knownSearcher.reader.getTermFreqVector(docId, "field"); - //float tf = sim.tf(freq); - //float idf = sim.idf(knownSearcher.docFreq(term), knownSearcher.maxDoc()); - //float qNorm = sim.queryNorm() - //This is fine since we don't have stop words - //float lNorm = sim.lengthNorm("field", vector.getTerms().length); - //float coord = sim.coord() - //System.out.println("TF: " + tf + " IDF: " + idf + " LenNorm: " + lNorm); - assertTrue(vector != null); - BytesRef[] vTerms = vector.getTerms(); - int [] freqs = vector.getTermFrequencies(); - for (int i = 0; i < vTerms.length; i++) - { - if (text.equals(vTerms[i].utf8ToString())) - { - assertTrue(freqs[i] == freq); - } - } + RandomIndexWriter writer = new RandomIndexWriter(random, dir, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)) + .setOpenMode(OpenMode.CREATE)); + writer.addDocument(testDoc1); + writer.addDocument(testDoc2); + writer.addDocument(testDoc3); + writer.addDocument(testDoc4); + IndexReader reader = writer.getReader(); + writer.close(); + IndexSearcher knownSearcher = new IndexSearcher(reader); + FieldsEnum fields = MultiFields.getFields(knownSearcher.reader).iterator(); + + DocsEnum docs = null; + while(fields.next() != null) { + TermsEnum terms = fields.terms(); + while(terms.next() != null) { + String text = terms.term().utf8ToString(); + docs = terms.docs(MultiFields.getDeletedDocs(knownSearcher.reader), docs); + + while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) { + int docId = docs.docID(); + int freq = docs.freq(); + //System.out.println("Doc Id: " + docId + " freq " + freq); + TermFreqVector vector = knownSearcher.reader.getTermFreqVector(docId, "field"); + //float tf = sim.tf(freq); + //float idf = sim.idf(knownSearcher.docFreq(term), knownSearcher.maxDoc()); + //float qNorm = sim.queryNorm() + //This is fine since we don't have stop words + //float lNorm = sim.lengthNorm("field", vector.getTerms().length); + //float coord = sim.coord() + //System.out.println("TF: " + tf + " IDF: " + idf + " LenNorm: " + lNorm); + assertTrue(vector != null); + BytesRef[] vTerms = vector.getTerms(); + int [] freqs = vector.getTermFrequencies(); + for (int i = 0; i < vTerms.length; i++) + { + if (text.equals(vTerms[i].utf8ToString())) + { + assertTrue(freqs[i] == freq); + } } } - //System.out.println("--------"); } - Query query = new TermQuery(new Term("field", "chocolate")); - ScoreDoc[] hits = knownSearcher.search(query, null, 1000).scoreDocs; - //doc 3 should be the first hit b/c it is the shortest match - assertTrue(hits.length == 3); - /*System.out.println("Hit 0: " + hits.id(0) + " Score: " + hits.score(0) + " String: " + hits.doc(0).toString()); + //System.out.println("--------"); + } + Query query = new TermQuery(new Term("field", "chocolate")); + ScoreDoc[] hits = knownSearcher.search(query, null, 1000).scoreDocs; + //doc 3 should be the first hit b/c it is the shortest match + assertTrue(hits.length == 3); + /*System.out.println("Hit 0: " + hits.id(0) + " Score: " + hits.score(0) + " String: " + hits.doc(0).toString()); System.out.println("Explain: " + knownSearcher.explain(query, hits.id(0))); System.out.println("Hit 1: " + hits.id(1) + " Score: " + hits.score(1) + " String: " + hits.doc(1).toString()); System.out.println("Explain: " + knownSearcher.explain(query, hits.id(1))); System.out.println("Hit 2: " + hits.id(2) + " Score: " + hits.score(2) + " String: " + hits.doc(2).toString()); System.out.println("Explain: " + knownSearcher.explain(query, hits.id(2)));*/ - assertTrue(hits[0].doc == 2); - assertTrue(hits[1].doc == 3); - assertTrue(hits[2].doc == 0); - TermFreqVector vector = knownSearcher.reader.getTermFreqVector(hits[1].doc, "field"); - assertTrue(vector != null); - //System.out.println("Vector: " + vector); - BytesRef[] terms = vector.getTerms(); - int [] freqs = vector.getTermFrequencies(); - assertTrue(terms != null && terms.length == 10); - for (int i = 0; i < terms.length; i++) { - String term = terms[i].utf8ToString(); - //System.out.println("Term: " + term); - int freq = freqs[i]; - assertTrue(test4.indexOf(term) != -1); - Integer freqInt = test4Map.get(term); - assertTrue(freqInt != null); - assertTrue(freqInt.intValue() == freq); + assertTrue(hits[0].doc == 2); + assertTrue(hits[1].doc == 3); + assertTrue(hits[2].doc == 0); + TermFreqVector vector = knownSearcher.reader.getTermFreqVector(hits[1].doc, "field"); + assertTrue(vector != null); + //System.out.println("Vector: " + vector); + BytesRef[] terms = vector.getTerms(); + int [] freqs = vector.getTermFrequencies(); + assertTrue(terms != null && terms.length == 10); + for (int i = 0; i < terms.length; i++) { + String term = terms[i].utf8ToString(); + //System.out.println("Term: " + term); + int freq = freqs[i]; + assertTrue(test4.indexOf(term) != -1); + Integer freqInt = test4Map.get(term); + assertTrue(freqInt != null); + assertTrue(freqInt.intValue() == freq); + } + SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator()); + knownSearcher.reader.getTermFreqVector(hits[1].doc, mapper); + SortedSet vectorEntrySet = mapper.getTermVectorEntrySet(); + assertTrue("mapper.getTermVectorEntrySet() Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10); + TermVectorEntry last = null; + for (final TermVectorEntry tve : vectorEntrySet) { + if (tve != null && last != null) + { + assertTrue("terms are not properly sorted", last.getFrequency() >= tve.getFrequency()); + Integer expectedFreq = test4Map.get(tve.getTerm().utf8ToString()); + //we expect double the expectedFreq, since there are two fields with the exact same text and we are collapsing all fields + assertTrue("Frequency is not correct:", tve.getFrequency() == 2*expectedFreq.intValue()); } - SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator()); - knownSearcher.reader.getTermFreqVector(hits[1].doc, mapper); - SortedSet vectorEntrySet = mapper.getTermVectorEntrySet(); - assertTrue("mapper.getTermVectorEntrySet() Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10); - TermVectorEntry last = null; - for (final TermVectorEntry tve : vectorEntrySet) { - if (tve != null && last != null) - { - assertTrue("terms are not properly sorted", last.getFrequency() >= tve.getFrequency()); - Integer expectedFreq = test4Map.get(tve.getTerm().utf8ToString()); - //we expect double the expectedFreq, since there are two fields with the exact same text and we are collapsing all fields - assertTrue("Frequency is not correct:", tve.getFrequency() == 2*expectedFreq.intValue()); - } - last = tve; - - } - - FieldSortedTermVectorMapper fieldMapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator()); - knownSearcher.reader.getTermFreqVector(hits[1].doc, fieldMapper); - Map> map = fieldMapper.getFieldToTerms(); - assertTrue("map Size: " + map.size() + " is not: " + 2, map.size() == 2); - vectorEntrySet = map.get("field"); - assertTrue("vectorEntrySet is null and it shouldn't be", vectorEntrySet != null); - assertTrue("vectorEntrySet Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10); - knownSearcher.close(); - } catch (IOException e) { - e.printStackTrace(); - assertTrue(false); + last = tve; + } + + FieldSortedTermVectorMapper fieldMapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator()); + knownSearcher.reader.getTermFreqVector(hits[1].doc, fieldMapper); + Map> map = fieldMapper.getFieldToTerms(); + assertTrue("map Size: " + map.size() + " is not: " + 2, map.size() == 2); + vectorEntrySet = map.get("field"); + assertTrue("vectorEntrySet is null and it shouldn't be", vectorEntrySet != null); + assertTrue("vectorEntrySet Size: " + vectorEntrySet.size() + " is not: " + 10, vectorEntrySet.size() == 10); + knownSearcher.close(); + reader.close(); + dir.close(); } private void setupDoc(Document doc, String text) @@ -361,8 +360,8 @@ // Test only a few docs having vectors public void testRareVectors() throws IOException { - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)) + RandomIndexWriter writer = new RandomIndexWriter(random, directory, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)) .setOpenMode(OpenMode.CREATE)); for (int i = 0; i < 100; i++) { Document doc = new Document(); @@ -377,8 +376,9 @@ writer.addDocument(doc); } + IndexReader reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(directory, true); + searcher = new IndexSearcher(reader); Query query = new TermQuery(new Term("field", "hundred")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; @@ -388,14 +388,15 @@ assertTrue(vector != null); assertTrue(vector.length == 1); } + reader.close(); } // In a single doc, for the same field, mix the term // vectors up public void testMixedVectrosVectors() throws IOException { - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( - TEST_VERSION_CURRENT, + RandomIndexWriter writer = new RandomIndexWriter(random, directory, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true)).setOpenMode(OpenMode.CREATE)); Document doc = new Document(); doc.add(new Field("field", "one", @@ -409,9 +410,10 @@ doc.add(new Field("field", "one", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); writer.addDocument(doc); + IndexReader reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(directory, true); + searcher = new IndexSearcher(reader); Query query = new TermQuery(new Term("field", "one")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; @@ -437,6 +439,7 @@ assertEquals(4*i, offsets[i].getStartOffset()); assertEquals(4*i+3, offsets[i].getEndOffset()); } + reader.close(); } private static class TestTermVectorMapper extends TermVectorMapper { Index: lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java (working copy) @@ -26,8 +26,9 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericField; -import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; @@ -44,7 +45,8 @@ final Random rnd = newRandom(); RAMDirectory directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(rnd, directory, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); DecimalFormat format = new DecimalFormat("00000000000", new DecimalFormatSymbols(Locale.US)); @@ -56,10 +58,11 @@ doc.add(new NumericField("trie", Field.Store.NO, true).setIntValue(value)); } writer.addDocument(doc); - } + } + IndexReader reader = writer.getReader(); writer.close(); - Searcher searcher=new IndexSearcher(directory, true); + Searcher searcher=new IndexSearcher(reader); for (int i=0; i<50*_TestUtil.getRandomMultiplier(); i++) { int lower=rnd.nextInt(Integer.MAX_VALUE); int upper=rnd.nextInt(Integer.MAX_VALUE); @@ -73,7 +76,7 @@ assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", trTopDocs.totalHits, nrTopDocs.totalHits ); } searcher.close(); - + reader.close(); directory.close(); } Index: lucene/src/test/org/apache/lucene/search/TestTopDocsCollector.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestTopDocsCollector.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestTopDocsCollector.java (working copy) @@ -22,8 +22,8 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; @@ -93,10 +93,11 @@ private static final float MAX_SCORE = 9.17561f; private Directory dir = new RAMDirectory(); + private IndexReader reader; private TopDocsCollector doSearch(int numResults) throws IOException { Query q = new MatchAllDocsQuery(); - IndexSearcher searcher = new IndexSearcher(dir, true); + IndexSearcher searcher = new IndexSearcher(reader); TopDocsCollector tdc = new MyTopsDocCollector(numResults); searcher.search(q, tdc); searcher.close(); @@ -109,15 +110,17 @@ // populate an index with 30 documents, this should be enough for the test. // The documents have no content - the test uses MatchAllDocsQuery(). - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); for (int i = 0; i < 30; i++) { writer.addDocument(new Document()); } + reader = writer.getReader(); writer.close(); } @Override protected void tearDown() throws Exception { + reader.close(); dir.close(); dir = null; super.tearDown(); Index: lucene/src/test/org/apache/lucene/search/TestTermScorer.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestTermScorer.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestTermScorer.java (working copy) @@ -26,152 +26,155 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.RAMDirectory; -public class TestTermScorer extends LuceneTestCase -{ - protected RAMDirectory directory; - private static final String FIELD = "field"; +public class TestTermScorer extends LuceneTestCase { + protected RAMDirectory directory; + private static final String FIELD = "field"; + + protected String[] values = new String[] {"all", "dogs dogs", "like", + "playing", "fetch", "all"}; + protected IndexSearcher indexSearcher; + protected IndexReader indexReader; + + public TestTermScorer(String s) { + super(s); + } + + @Override + protected void setUp() throws Exception { + super.setUp(); + directory = new RAMDirectory(); + + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + for (int i = 0; i < values.length; i++) { + Document doc = new Document(); + doc + .add(new Field(FIELD, values[i], Field.Store.YES, + Field.Index.ANALYZED)); + writer.addDocument(doc); + } + indexReader = writer.getReader(); + writer.close(); + indexSearcher = new IndexSearcher(indexReader); + } + + @Override + protected void tearDown() throws Exception { + indexSearcher.close(); + indexReader.close(); + directory.close(); + } - protected String[] values = new String[]{"all", "dogs dogs", "like", "playing", "fetch", "all"}; - protected IndexSearcher indexSearcher; - protected IndexReader indexReader; - - - public TestTermScorer(String s) - { - super(s); + public void test() throws IOException { + + Term allTerm = new Term(FIELD, "all"); + TermQuery termQuery = new TermQuery(allTerm); + + Weight weight = termQuery.weight(indexSearcher); + + Scorer ts = weight.scorer(indexSearcher.getIndexReader(), true, true); + // we have 2 documents with the term all in them, one document for all the + // other values + final List docs = new ArrayList(); + // must call next first + + ts.score(new Collector() { + private int base = 0; + private Scorer scorer; + + @Override + public void setScorer(Scorer scorer) throws IOException { + this.scorer = scorer; + } + + @Override + public void collect(int doc) throws IOException { + float score = scorer.score(); + doc = doc + base; + docs.add(new TestHit(doc, score)); + assertTrue("score " + score + " is not greater than 0", score > 0); + assertTrue("Doc: " + doc + " does not equal 0 or doc does not equal 5", + doc == 0 || doc == 5); + } + + @Override + public void setNextReader(IndexReader reader, int docBase) { + base = docBase; + } + + @Override + public boolean acceptsDocsOutOfOrder() { + return true; + } + }); + assertTrue("docs Size: " + docs.size() + " is not: " + 2, docs.size() == 2); + TestHit doc0 = docs.get(0); + TestHit doc5 = docs.get(1); + // The scores should be the same + assertTrue(doc0.score + " does not equal: " + doc5.score, + doc0.score == doc5.score); + /* + * Score should be (based on Default Sim.: All floats are approximate tf = 1 + * numDocs = 6 docFreq(all) = 2 idf = ln(6/3) + 1 = 1.693147 idf ^ 2 = + * 2.8667 boost = 1 lengthNorm = 1 //there is 1 term in every document coord + * = 1 sumOfSquaredWeights = (idf * boost) ^ 2 = 1.693147 ^ 2 = 2.8667 + * queryNorm = 1 / (sumOfSquaredWeights)^0.5 = 1 /(1.693147) = 0.590 + * + * score = 1 * 2.8667 * 1 * 1 * 0.590 = 1.69 + */ + assertTrue(doc0.score + " does not equal: " + 1.6931472f, + doc0.score == 1.6931472f); + } + + public void testNext() throws Exception { + + Term allTerm = new Term(FIELD, "all"); + TermQuery termQuery = new TermQuery(allTerm); + + Weight weight = termQuery.weight(indexSearcher); + + Scorer ts = weight.scorer(indexSearcher.getIndexReader(), true, true); + assertTrue("next did not return a doc", + ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); + assertTrue("score is not correct", ts.score() == 1.6931472f); + assertTrue("next did not return a doc", + ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); + assertTrue("score is not correct", ts.score() == 1.6931472f); + assertTrue("next returned a doc and it should not have", + ts.nextDoc() == DocIdSetIterator.NO_MORE_DOCS); + } + + public void testAdvance() throws Exception { + + Term allTerm = new Term(FIELD, "all"); + TermQuery termQuery = new TermQuery(allTerm); + + Weight weight = termQuery.weight(indexSearcher); + + Scorer ts = weight.scorer(indexSearcher.getIndexReader(), true, true); + assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS); + // The next doc should be doc 5 + assertTrue("doc should be number 5", ts.docID() == 5); + } + + private class TestHit { + public int doc; + public float score; + + public TestHit(int doc, float score) { + this.doc = doc; + this.score = score; } - + @Override - protected void setUp() throws Exception { - super.setUp(); - directory = new RAMDirectory(); - - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); - for (int i = 0; i < values.length; i++) { - Document doc = new Document(); - doc.add(new Field(FIELD, values[i], Field.Store.YES, Field.Index.ANALYZED)); - writer.addDocument(doc); - } - writer.close(); - indexSearcher = new IndexSearcher(directory, false); - indexReader = indexSearcher.getIndexReader(); - - + public String toString() { + return "TestHit{" + "doc=" + doc + ", score=" + score + "}"; } - - public void test() throws IOException { - - Term allTerm = new Term(FIELD, "all"); - TermQuery termQuery = new TermQuery(allTerm); - - Weight weight = termQuery.weight(indexSearcher); - - Scorer ts = weight.scorer(indexSearcher.getIndexReader(), - true, true); - //we have 2 documents with the term all in them, one document for all the other values - final List docs = new ArrayList(); - //must call next first - - - ts.score(new Collector() { - private int base = 0; - private Scorer scorer; - @Override - public void setScorer(Scorer scorer) throws IOException { - this.scorer = scorer; - } - - @Override - public void collect(int doc) throws IOException { - float score = scorer.score(); - doc = doc + base; - docs.add(new TestHit(doc, score)); - assertTrue("score " + score + " is not greater than 0", score > 0); - assertTrue("Doc: " + doc + " does not equal 0 or doc does not equal 5", - doc == 0 || doc == 5); - } - @Override - public void setNextReader(IndexReader reader, int docBase) { - base = docBase; - } - @Override - public boolean acceptsDocsOutOfOrder() { - return true; - } - }); - assertTrue("docs Size: " + docs.size() + " is not: " + 2, docs.size() == 2); - TestHit doc0 = docs.get(0); - TestHit doc5 = docs.get(1); - //The scores should be the same - assertTrue(doc0.score + " does not equal: " + doc5.score, doc0.score == doc5.score); - /* - Score should be (based on Default Sim.: - All floats are approximate - tf = 1 - numDocs = 6 - docFreq(all) = 2 - idf = ln(6/3) + 1 = 1.693147 - idf ^ 2 = 2.8667 - boost = 1 - lengthNorm = 1 //there is 1 term in every document - coord = 1 - sumOfSquaredWeights = (idf * boost) ^ 2 = 1.693147 ^ 2 = 2.8667 - queryNorm = 1 / (sumOfSquaredWeights)^0.5 = 1 /(1.693147) = 0.590 - - score = 1 * 2.8667 * 1 * 1 * 0.590 = 1.69 - - */ - assertTrue(doc0.score + " does not equal: " + 1.6931472f, doc0.score == 1.6931472f); - } - - public void testNext() throws Exception { - - Term allTerm = new Term(FIELD, "all"); - TermQuery termQuery = new TermQuery(allTerm); - - Weight weight = termQuery.weight(indexSearcher); - - Scorer ts = weight.scorer(indexSearcher.getIndexReader(), - true, true); - assertTrue("next did not return a doc", ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); - assertTrue("score is not correct", ts.score() == 1.6931472f); - assertTrue("next did not return a doc", ts.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); - assertTrue("score is not correct", ts.score() == 1.6931472f); - assertTrue("next returned a doc and it should not have", ts.nextDoc() == DocIdSetIterator.NO_MORE_DOCS); - } - - public void testAdvance() throws Exception { - - Term allTerm = new Term(FIELD, "all"); - TermQuery termQuery = new TermQuery(allTerm); - - Weight weight = termQuery.weight(indexSearcher); - - Scorer ts = weight.scorer(indexSearcher.getIndexReader(), - true, true); - assertTrue("Didn't skip", ts.advance(3) != DocIdSetIterator.NO_MORE_DOCS); - //The next doc should be doc 5 - assertTrue("doc should be number 5", ts.docID() == 5); - } - - private class TestHit { - public int doc; - public float score; - - public TestHit(int doc, float score) { - this.doc = doc; - this.score = score; - } - - @Override - public String toString() { - return "TestHit{" + "doc=" + doc + ", score=" + score + "}"; - } - } - + } + } Index: lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java (working copy) @@ -21,9 +21,9 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.index.MultiFields; @@ -40,66 +40,72 @@ public TestPhrasePrefixQuery(String name) { super(name); } - - /** + + /** * */ - public void testPhrasePrefix() - throws IOException - { - RAMDirectory indexStore = new RAMDirectory(); - IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); - Document doc1 = new Document(); - Document doc2 = new Document(); - Document doc3 = new Document(); - Document doc4 = new Document(); - Document doc5 = new Document(); - doc1.add(new Field("body", "blueberry pie", Field.Store.YES, Field.Index.ANALYZED)); - doc2.add(new Field("body", "blueberry strudel", Field.Store.YES, Field.Index.ANALYZED)); - doc3.add(new Field("body", "blueberry pizza", Field.Store.YES, Field.Index.ANALYZED)); - doc4.add(new Field("body", "blueberry chewing gum", Field.Store.YES, Field.Index.ANALYZED)); - doc5.add(new Field("body", "piccadilly circus", Field.Store.YES, Field.Index.ANALYZED)); - writer.addDocument(doc1); - writer.addDocument(doc2); - writer.addDocument(doc3); - writer.addDocument(doc4); - writer.addDocument(doc5); - writer.optimize(); - writer.close(); - - IndexSearcher searcher = new IndexSearcher(indexStore, true); - - //PhrasePrefixQuery query1 = new PhrasePrefixQuery(); - MultiPhraseQuery query1 = new MultiPhraseQuery(); - //PhrasePrefixQuery query2 = new PhrasePrefixQuery(); - MultiPhraseQuery query2 = new MultiPhraseQuery(); - query1.add(new Term("body", "blueberry")); - query2.add(new Term("body", "strawberry")); - - LinkedList termsWithPrefix = new LinkedList(); - IndexReader ir = IndexReader.open(indexStore, true); - - // this TermEnum gives "piccadilly", "pie" and "pizza". - String prefix = "pi"; - TermsEnum te = MultiFields.getFields(ir).terms("body").iterator(); - te.seek(new BytesRef(prefix)); - do { - String s = te.term().utf8ToString(); - if (s.startsWith(prefix)) { - termsWithPrefix.add(new Term("body", s)); - } else { - break; - } - } while (te.next() != null); - - query1.add(termsWithPrefix.toArray(new Term[0])); - query2.add(termsWithPrefix.toArray(new Term[0])); - - ScoreDoc[] result; - result = searcher.search(query1, null, 1000).scoreDocs; - assertEquals(2, result.length); - - result = searcher.search(query2, null, 1000).scoreDocs; - assertEquals(0, result.length); - } + public void testPhrasePrefix() throws IOException { + RAMDirectory indexStore = new RAMDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), indexStore, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + Document doc1 = new Document(); + Document doc2 = new Document(); + Document doc3 = new Document(); + Document doc4 = new Document(); + Document doc5 = new Document(); + doc1.add(new Field("body", "blueberry pie", Field.Store.YES, + Field.Index.ANALYZED)); + doc2.add(new Field("body", "blueberry strudel", Field.Store.YES, + Field.Index.ANALYZED)); + doc3.add(new Field("body", "blueberry pizza", Field.Store.YES, + Field.Index.ANALYZED)); + doc4.add(new Field("body", "blueberry chewing gum", Field.Store.YES, + Field.Index.ANALYZED)); + doc5.add(new Field("body", "piccadilly circus", Field.Store.YES, + Field.Index.ANALYZED)); + writer.addDocument(doc1); + writer.addDocument(doc2); + writer.addDocument(doc3); + writer.addDocument(doc4); + writer.addDocument(doc5); + IndexReader reader = writer.getReader(); + writer.close(); + + IndexSearcher searcher = new IndexSearcher(reader); + + // PhrasePrefixQuery query1 = new PhrasePrefixQuery(); + MultiPhraseQuery query1 = new MultiPhraseQuery(); + // PhrasePrefixQuery query2 = new PhrasePrefixQuery(); + MultiPhraseQuery query2 = new MultiPhraseQuery(); + query1.add(new Term("body", "blueberry")); + query2.add(new Term("body", "strawberry")); + + LinkedList termsWithPrefix = new LinkedList(); + + // this TermEnum gives "piccadilly", "pie" and "pizza". + String prefix = "pi"; + TermsEnum te = MultiFields.getFields(reader).terms("body").iterator(); + te.seek(new BytesRef(prefix)); + do { + String s = te.term().utf8ToString(); + if (s.startsWith(prefix)) { + termsWithPrefix.add(new Term("body", s)); + } else { + break; + } + } while (te.next() != null); + + query1.add(termsWithPrefix.toArray(new Term[0])); + query2.add(termsWithPrefix.toArray(new Term[0])); + + ScoreDoc[] result; + result = searcher.search(query1, null, 1000).scoreDocs; + assertEquals(2, result.length); + + result = searcher.search(query2, null, 1000).scoreDocs; + assertEquals(0, result.length); + searcher.close(); + reader.close(); + indexStore.close(); + } } Index: lucene/src/test/org/apache/lucene/search/TestWildcard.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestWildcard.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestWildcard.java (working copy) @@ -23,19 +23,28 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.Field.Index; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.store.RAMDirectory; import java.io.IOException; +import java.util.Random; /** * TestWildcard tests the '*' and '?' wildcard characters. */ public class TestWildcard extends LuceneTestCase { + private Random random; + + @Override + public void setUp() throws Exception { + super.setUp(); + random = newRandom(); + } + public void testEquals() { WildcardQuery wq1 = new WildcardQuery(new Term("field", "b*a")); WildcardQuery wq2 = new WildcardQuery(new Term("field", "b*a")); @@ -193,14 +202,13 @@ private RAMDirectory getIndexStore(String field, String[] contents) throws IOException { RAMDirectory indexStore = new RAMDirectory(); - IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig( + RandomIndexWriter writer = new RandomIndexWriter(random, indexStore, new IndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); for (int i = 0; i < contents.length; ++i) { Document doc = new Document(); doc.add(new Field(field, contents[i], Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(doc); } - writer.optimize(); writer.close(); return indexStore; @@ -251,7 +259,8 @@ // prepare the index RAMDirectory dir = new RAMDirectory(); - IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter iw = new RandomIndexWriter(random, dir, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); for (int i = 0; i < docs.length; i++) { Document doc = new Document(); doc.add(new Field(field,docs[i],Store.NO,Index.ANALYZED)); Index: lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/BaseTestRangeFilter.java (working copy) @@ -17,131 +17,134 @@ * limitations under the License. */ +import java.io.IOException; import java.util.Random; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.RAMDirectory; public class BaseTestRangeFilter extends LuceneTestCase { - - public static final boolean F = false; - public static final boolean T = true; + + public static final boolean F = false; + public static final boolean T = true; + + protected Random rand; + + /** + * Collation interacts badly with hyphens -- collation produces different + * ordering than Unicode code-point ordering -- so two indexes are created: + * one which can't have negative random integers, for testing collated ranges, + * and the other which can have negative random integers, for all other tests. + */ + class TestIndex { + int maxR; + int minR; + boolean allowNegativeRandomInts; + RAMDirectory index = new RAMDirectory(); - protected Random rand; - - /** - * Collation interacts badly with hyphens -- collation produces different - * ordering than Unicode code-point ordering -- so two indexes are created: - * one which can't have negative random integers, for testing collated - * ranges, and the other which can have negative random integers, for all - * other tests. - */ - class TestIndex { - int maxR; - int minR; - boolean allowNegativeRandomInts; - RAMDirectory index = new RAMDirectory(); - - TestIndex(int minR, int maxR, boolean allowNegativeRandomInts) { - this.minR = minR; - this.maxR = maxR; - this.allowNegativeRandomInts = allowNegativeRandomInts; - } + TestIndex(int minR, int maxR, boolean allowNegativeRandomInts) { + this.minR = minR; + this.maxR = maxR; + this.allowNegativeRandomInts = allowNegativeRandomInts; } - TestIndex signedIndex = new TestIndex(Integer.MAX_VALUE, Integer.MIN_VALUE, true); - TestIndex unsignedIndex = new TestIndex(Integer.MAX_VALUE, 0, false); + } + + IndexReader signedIndexReader; + IndexReader unsignedIndexReader; + + TestIndex signedIndexDir = new TestIndex(Integer.MAX_VALUE, Integer.MIN_VALUE, true); + TestIndex unsignedIndexDir = new TestIndex(Integer.MAX_VALUE, 0, false); + + int minId = 0; + int maxId = 10000; + + static final int intLength = Integer.toString(Integer.MAX_VALUE).length(); + + /** + * a simple padding function that should work with any int + */ + public static String pad(int n) { + StringBuilder b = new StringBuilder(40); + String p = "0"; + if (n < 0) { + p = "-"; + n = Integer.MAX_VALUE + n + 1; + } + b.append(p); + String s = Integer.toString(n); + for (int i = s.length(); i <= intLength; i++) { + b.append("0"); + } + b.append(s); - int minId = 0; - int maxId = 10000; - - static final int intLength = Integer.toString(Integer.MAX_VALUE).length(); + return b.toString(); + } + + protected void setUp() throws Exception { + super.setUp(); + rand = newRandom(); + signedIndexReader = build(rand, signedIndexDir); + unsignedIndexReader = build(rand, unsignedIndexDir); + } + + protected void tearDown() throws Exception { + signedIndexReader.close(); + unsignedIndexReader.close(); + super.tearDown(); + } + + private IndexReader build(Random random, TestIndex index) throws IOException { + /* build an index */ + RandomIndexWriter writer = new RandomIndexWriter(random, index.index, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()) + .setOpenMode(OpenMode.CREATE)); - /** - * a simple padding function that should work with any int - */ - public static String pad(int n) { - StringBuilder b = new StringBuilder(40); - String p = "0"; - if (n < 0) { - p = "-"; - n = Integer.MAX_VALUE + n + 1; - } - b.append(p); - String s = Integer.toString(n); - for (int i = s.length(); i <= intLength; i++) { - b.append("0"); - } - b.append(s); - - return b.toString(); + for (int d = minId; d <= maxId; d++) { + Document doc = new Document(); + doc.add(new Field("id", pad(d), Field.Store.YES, + Field.Index.NOT_ANALYZED)); + int r = index.allowNegativeRandomInts ? rand.nextInt() : rand + .nextInt(Integer.MAX_VALUE); + if (index.maxR < r) { + index.maxR = r; + } + if (r < index.minR) { + index.minR = r; + } + doc.add(new Field("rand", pad(r), Field.Store.YES, + Field.Index.NOT_ANALYZED)); + doc.add(new Field("body", "body", Field.Store.YES, + Field.Index.NOT_ANALYZED)); + writer.addDocument(doc); } - - public BaseTestRangeFilter(String name) { - super(name); - rand = newRandom(); - build(signedIndex); - build(unsignedIndex); + + IndexReader ir = writer.getReader(); + writer.close(); + return ir; + } + + public void testPad() { + + int[] tests = new int[] {-9999999, -99560, -100, -3, -1, 0, 3, 9, 10, 1000, + 999999999}; + for (int i = 0; i < tests.length - 1; i++) { + int a = tests[i]; + int b = tests[i + 1]; + String aa = pad(a); + String bb = pad(b); + String label = a + ":" + aa + " vs " + b + ":" + bb; + assertEquals("length of " + label, aa.length(), bb.length()); + assertTrue("compare less than " + label, aa.compareTo(bb) < 0); } - public BaseTestRangeFilter() { - rand = newRandom(); - build(signedIndex); - build(unsignedIndex); - } - private void build(TestIndex index) { - try { - - /* build an index */ - IndexWriter writer = new IndexWriter(index.index, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer()) - .setOpenMode(OpenMode.CREATE)); - - for (int d = minId; d <= maxId; d++) { - Document doc = new Document(); - doc.add(new Field("id",pad(d), Field.Store.YES, Field.Index.NOT_ANALYZED)); - int r= index.allowNegativeRandomInts - ? rand.nextInt() : rand.nextInt(Integer.MAX_VALUE); - if (index.maxR < r) { - index.maxR = r; - } - if (r < index.minR) { - index.minR = r; - } - doc.add(new Field("rand",pad(r), Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(new Field("body","body", Field.Store.YES, Field.Index.NOT_ANALYZED)); - writer.addDocument(doc); - } - - writer.optimize(); - writer.close(); - - } catch (Exception e) { - throw new RuntimeException("can't build index", e); - } - - } - - public void testPad() { - - int[] tests = new int[] { - -9999999, -99560, -100, -3, -1, 0, 3, 9, 10, 1000, 999999999 - }; - for (int i = 0; i < tests.length - 1; i++) { - int a = tests[i]; - int b = tests[i+1]; - String aa = pad(a); - String bb = pad(b); - String label = a + ":" + aa + " vs " + b + ":" + bb; - assertEquals("length of " + label, aa.length(), bb.length()); - assertTrue("compare less than " + label, aa.compareTo(bb) < 0); - } - - } - + } + } Index: lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java (working copy) @@ -18,13 +18,17 @@ */ import java.io.IOException; +import java.util.Random; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.automaton.Automaton; @@ -32,15 +36,18 @@ import org.apache.lucene.util.automaton.BasicOperations; public class TestAutomatonQuery extends LuceneTestCase { + private Directory directory; + private IndexReader reader; private IndexSearcher searcher; - + private final String FN = "field"; public void setUp() throws Exception { super.setUp(); - RAMDirectory directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new MockAnalyzer(), true, - IndexWriter.MaxFieldLength.LIMITED); + Random random = newRandom(); + directory = new RAMDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(random, directory, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); Document doc = new Document(); Field titleField = new Field("title", "some title", Field.Store.NO, Field.Index.ANALYZED); @@ -57,13 +64,15 @@ field.setValue("doc three has some different stuff" + " with numbers 1234 5678.9 and letter b"); writer.addDocument(doc); - writer.optimize(); + reader = writer.getReader(); + searcher = new IndexSearcher(reader); writer.close(); - searcher = new IndexSearcher(directory, true); } public void tearDown() throws Exception { searcher.close(); + reader.close(); + directory.close(); super.tearDown(); } Index: lucene/src/test/org/apache/lucene/search/TestBoolean2.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestBoolean2.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestBoolean2.java (working copy) @@ -23,8 +23,8 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexReader; import org.apache.lucene.queryParser.ParseException; @@ -42,6 +42,7 @@ private IndexSearcher searcher; private IndexSearcher bigSearcher; private IndexReader reader; + private Random rnd; private static int NUM_EXTRA_DOCS = 6000; public static final String field = "field"; @@ -51,8 +52,9 @@ @Override protected void setUp() throws Exception { super.setUp(); + rnd = newRandom(); RAMDirectory directory = new RAMDirectory(); - IndexWriter writer= new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer= new RandomIndexWriter(rnd, directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); for (int i = 0; i < docFields.length; i++) { Document doc = new Document(); doc.add(new Field(field, docFields[i], Field.Store.NO, Field.Index.ANALYZED)); @@ -69,14 +71,14 @@ int docCount = 0; do { final Directory copy = new RAMDirectory(dir2); - IndexWriter w = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter w = new RandomIndexWriter(rnd, dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); w.addIndexes(new Directory[] {copy}); docCount = w.maxDoc(); w.close(); mulFactor *= 2; } while(docCount < 3000); - IndexWriter w = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter w = new RandomIndexWriter(rnd, dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); Document doc = new Document(); doc.add(new Field("field2", "xxx", Field.Store.NO, Field.Index.ANALYZED)); for(int i=0;i - * same as TestRankingSimilarity in TestRanking.zip from - * http://issues.apache.org/jira/browse/LUCENE-323 - *

- */ - private static class TestSimilarity extends DefaultSimilarity { - - public TestSimilarity() { - } - @Override - public float tf(float freq) { - if (freq > 0.0f) return 1.0f; - else return 0.0f; - } - @Override - public float lengthNorm(String fieldName, int numTerms) { - return 1.0f; - } - @Override - public float idf(int docFreq, int numDocs) { - return 1.0f; - } +public class TestDisjunctionMaxQuery extends LuceneTestCase { + + /** threshold for comparing floats */ + public static final float SCORE_COMP_THRESH = 0.0000f; + + /** + * Similarity to eliminate tf, idf and lengthNorm effects to isolate test + * case. + * + *

+ * same as TestRankingSimilarity in TestRanking.zip from + * http://issues.apache.org/jira/browse/LUCENE-323 + *

+ */ + private static class TestSimilarity extends DefaultSimilarity { + + public TestSimilarity() {} + + @Override + public float tf(float freq) { + if (freq > 0.0f) return 1.0f; + else return 0.0f; } - - public Similarity sim = new TestSimilarity(); - public Directory index; - public IndexReader r; - public IndexSearcher s; - + @Override - protected void setUp() throws Exception { - super.setUp(); - - index = new RAMDirectory(); - IndexWriter writer = new IndexWriter(index, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setSimilarity(sim)); - - // hed is the most important field, dek is secondary - - // d1 is an "ok" match for: albino elephant - { - Document d1 = new Document(); - d1.add(new Field("id", "d1", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id", "d1")); - d1.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "elephant")); - d1.add(new Field("dek", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("dek", "elephant")); - writer.addDocument(d1); - } - - // d2 is a "good" match for: albino elephant - { - Document d2 = new Document(); - d2.add(new Field("id", "d2", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id", "d2")); - d2.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "elephant")); - d2.add(new Field("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("dek", "albino")); - d2.add(new Field("dek", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("dek", "elephant")); - writer.addDocument(d2); - } - - // d3 is a "better" match for: albino elephant - { - Document d3 = new Document(); - d3.add(new Field("id", "d3", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id", "d3")); - d3.add(new Field("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "albino")); - d3.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "elephant")); - writer.addDocument(d3); - } - - // d4 is the "best" match for: albino elephant - { - Document d4 = new Document(); - d4.add(new Field("id", "d4", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id", "d4")); - d4.add(new Field("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "albino")); - d4.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "elephant")); - d4.add(new Field("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("dek", "albino")); - writer.addDocument(d4); - } - - writer.close(); - - r = IndexReader.open(index, true); - s = new IndexSearcher(r); - s.setSimilarity(sim); + public float lengthNorm(String fieldName, int numTerms) { + return 1.0f; } - + + @Override + public float idf(int docFreq, int numDocs) { + return 1.0f; + } + } + + public Similarity sim = new TestSimilarity(); + public Directory index; + public IndexReader r; + public IndexSearcher s; + + @Override + protected void setUp() throws Exception { + super.setUp(); + + index = new RAMDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), index, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()) + .setSimilarity(sim)); + + // hed is the most important field, dek is secondary + + // d1 is an "ok" match for: albino elephant + { + Document d1 = new Document(); + d1.add(new Field("id", "d1", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id", + // "d1")); + d1 + .add(new Field("hed", "elephant", Field.Store.YES, + Field.Index.ANALYZED));// Field.Text("hed", "elephant")); + d1 + .add(new Field("dek", "elephant", Field.Store.YES, + Field.Index.ANALYZED));// Field.Text("dek", "elephant")); + writer.addDocument(d1); + } + + // d2 is a "good" match for: albino elephant + { + Document d2 = new Document(); + d2.add(new Field("id", "d2", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id", + // "d2")); + d2 + .add(new Field("hed", "elephant", Field.Store.YES, + Field.Index.ANALYZED));// Field.Text("hed", "elephant")); + d2.add(new Field("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("dek", + // "albino")); + d2 + .add(new Field("dek", "elephant", Field.Store.YES, + Field.Index.ANALYZED));// Field.Text("dek", "elephant")); + writer.addDocument(d2); + } + + // d3 is a "better" match for: albino elephant + { + Document d3 = new Document(); + d3.add(new Field("id", "d3", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id", + // "d3")); + d3.add(new Field("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("hed", + // "albino")); + d3 + .add(new Field("hed", "elephant", Field.Store.YES, + Field.Index.ANALYZED));// Field.Text("hed", "elephant")); + writer.addDocument(d3); + } + + // d4 is the "best" match for: albino elephant + { + Document d4 = new Document(); + d4.add(new Field("id", "d4", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id", + // "d4")); + d4.add(new Field("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("hed", + // "albino")); + d4 + .add(new Field("hed", "elephant", Field.Store.YES, + Field.Index.ANALYZED));// Field.Text("hed", "elephant")); + d4.add(new Field("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("dek", + // "albino")); + writer.addDocument(d4); + } + + r = writer.getReader(); + writer.close(); + s = new IndexSearcher(r); + s.setSimilarity(sim); + } + + @Override + protected void tearDown() throws Exception { + s.close(); + r.close(); + index.close(); + super.tearDown(); + } + public void testSkipToFirsttimeMiss() throws IOException { final DisjunctionMaxQuery dq = new DisjunctionMaxQuery(0.0f); - dq.add(tq("id","d1")); - dq.add(tq("dek","DOES_NOT_EXIST")); - - QueryUtils.check(dq,s); - + dq.add(tq("id", "d1")); + dq.add(tq("dek", "DOES_NOT_EXIST")); + + QueryUtils.check(dq, s); + final Weight dw = dq.weight(s); final Scorer ds = dw.scorer(r, true, false); final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS; if (skipOk) { - fail("firsttime skipTo found a match? ... " + r.document(ds.docID()).get("id")); + fail("firsttime skipTo found a match? ... " + + r.document(ds.docID()).get("id")); } } - + public void testSkipToFirsttimeHit() throws IOException { final DisjunctionMaxQuery dq = new DisjunctionMaxQuery(0.0f); - dq.add(tq("dek","albino")); - dq.add(tq("dek","DOES_NOT_EXIST")); - - QueryUtils.check(dq,s); - + dq.add(tq("dek", "albino")); + dq.add(tq("dek", "DOES_NOT_EXIST")); + + QueryUtils.check(dq, s); + final Weight dw = dq.weight(s); final Scorer ds = dw.scorer(r, true, false); - assertTrue("firsttime skipTo found no match", ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS); + assertTrue("firsttime skipTo found no match", + ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS); assertEquals("found wrong docid", "d4", r.document(ds.docID()).get("id")); } - + public void testSimpleEqualScores1() throws Exception { - + DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f); - q.add(tq("hed","albino")); - q.add(tq("hed","elephant")); - QueryUtils.check(q,s); - + q.add(tq("hed", "albino")); + q.add(tq("hed", "elephant")); + QueryUtils.check(q, s); + ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; - + try { - assertEquals("all docs should match " + q.toString(), - 4, h.length); - + assertEquals("all docs should match " + q.toString(), 4, h.length); + float score = h[0].score; for (int i = 1; i < h.length; i++) { - assertEquals("score #" + i + " is not the same", - score, h[i].score, SCORE_COMP_THRESH); + assertEquals("score #" + i + " is not the same", score, h[i].score, + SCORE_COMP_THRESH); } } catch (Error e) { - printHits("testSimpleEqualScores1",h,s); + printHits("testSimpleEqualScores1", h, s); throw e; } - + } - - public void testSimpleEqualScores2() throws Exception { - - DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f); - q.add(tq("dek","albino")); - q.add(tq("dek","elephant")); - QueryUtils.check(q,s); - - - ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; - - try { - assertEquals("3 docs should match " + q.toString(), - 3, h.length); - float score = h[0].score; - for (int i = 1; i < h.length; i++) { - assertEquals("score #" + i + " is not the same", - score, h[i].score, SCORE_COMP_THRESH); - } - } catch (Error e) { - printHits("testSimpleEqualScores2",h, s); - throw e; - } - + + public void testSimpleEqualScores2() throws Exception { + + DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f); + q.add(tq("dek", "albino")); + q.add(tq("dek", "elephant")); + QueryUtils.check(q, s); + + ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; + + try { + assertEquals("3 docs should match " + q.toString(), 3, h.length); + float score = h[0].score; + for (int i = 1; i < h.length; i++) { + assertEquals("score #" + i + " is not the same", score, h[i].score, + SCORE_COMP_THRESH); + } + } catch (Error e) { + printHits("testSimpleEqualScores2", h, s); + throw e; } - - public void testSimpleEqualScores3() throws Exception { - - DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f); - q.add(tq("hed","albino")); - q.add(tq("hed","elephant")); - q.add(tq("dek","albino")); - q.add(tq("dek","elephant")); - QueryUtils.check(q,s); - - - ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; - - try { - assertEquals("all docs should match " + q.toString(), - 4, h.length); - float score = h[0].score; - for (int i = 1; i < h.length; i++) { - assertEquals("score #" + i + " is not the same", - score, h[i].score, SCORE_COMP_THRESH); - } - } catch (Error e) { - printHits("testSimpleEqualScores3",h, s); - throw e; - } - + + } + + public void testSimpleEqualScores3() throws Exception { + + DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f); + q.add(tq("hed", "albino")); + q.add(tq("hed", "elephant")); + q.add(tq("dek", "albino")); + q.add(tq("dek", "elephant")); + QueryUtils.check(q, s); + + ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; + + try { + assertEquals("all docs should match " + q.toString(), 4, h.length); + float score = h[0].score; + for (int i = 1; i < h.length; i++) { + assertEquals("score #" + i + " is not the same", score, h[i].score, + SCORE_COMP_THRESH); + } + } catch (Error e) { + printHits("testSimpleEqualScores3", h, s); + throw e; } - - public void testSimpleTiebreaker() throws Exception { - - DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.01f); - q.add(tq("dek","albino")); - q.add(tq("dek","elephant")); - QueryUtils.check(q,s); - - - ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; - - try { - assertEquals("3 docs should match " + q.toString(), - 3, h.length); - assertEquals("wrong first", "d2", s.doc(h[0].doc).get("id")); - float score0 = h[0].score; - float score1 = h[1].score; - float score2 = h[2].score; - assertTrue("d2 does not have better score then others: " + - score0 + " >? " + score1, - score0 > score1); - assertEquals("d4 and d1 don't have equal scores", - score1, score2, SCORE_COMP_THRESH); - } catch (Error e) { - printHits("testSimpleTiebreaker",h, s); - throw e; - } + + } + + public void testSimpleTiebreaker() throws Exception { + + DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.01f); + q.add(tq("dek", "albino")); + q.add(tq("dek", "elephant")); + QueryUtils.check(q, s); + + ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; + + try { + assertEquals("3 docs should match " + q.toString(), 3, h.length); + assertEquals("wrong first", "d2", s.doc(h[0].doc).get("id")); + float score0 = h[0].score; + float score1 = h[1].score; + float score2 = h[2].score; + assertTrue("d2 does not have better score then others: " + score0 + + " >? " + score1, score0 > score1); + assertEquals("d4 and d1 don't have equal scores", score1, score2, + SCORE_COMP_THRESH); + } catch (Error e) { + printHits("testSimpleTiebreaker", h, s); + throw e; } - - public void testBooleanRequiredEqualScores() throws Exception { - - BooleanQuery q = new BooleanQuery(); - { - DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.0f); - q1.add(tq("hed","albino")); - q1.add(tq("dek","albino")); - q.add(q1,BooleanClause.Occur.MUST);//true,false); - QueryUtils.check(q1,s); - - } - { - DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.0f); - q2.add(tq("hed","elephant")); - q2.add(tq("dek","elephant")); - q.add(q2, BooleanClause.Occur.MUST);//true,false); - QueryUtils.check(q2,s); - } - - QueryUtils.check(q,s); - - ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; - - try { - assertEquals("3 docs should match " + q.toString(), - 3, h.length); - float score = h[0].score; - for (int i = 1; i < h.length; i++) { - assertEquals("score #" + i + " is not the same", - score, h[i].score, SCORE_COMP_THRESH); - } - } catch (Error e) { - printHits("testBooleanRequiredEqualScores1",h, s); - throw e; - } + } + + public void testBooleanRequiredEqualScores() throws Exception { + + BooleanQuery q = new BooleanQuery(); + { + DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.0f); + q1.add(tq("hed", "albino")); + q1.add(tq("dek", "albino")); + q.add(q1, BooleanClause.Occur.MUST);// true,false); + QueryUtils.check(q1, s); + } - - - public void testBooleanOptionalNoTiebreaker() throws Exception { - - BooleanQuery q = new BooleanQuery(); - { - DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.0f); - q1.add(tq("hed","albino")); - q1.add(tq("dek","albino")); - q.add(q1, BooleanClause.Occur.SHOULD);//false,false); - } - { - DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.0f); - q2.add(tq("hed","elephant")); - q2.add(tq("dek","elephant")); - q.add(q2, BooleanClause.Occur.SHOULD);//false,false); - } - QueryUtils.check(q,s); - - - ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; - - try { - assertEquals("4 docs should match " + q.toString(), - 4, h.length); - float score = h[0].score; - for (int i = 1; i < h.length-1; i++) { /* note: -1 */ - assertEquals("score #" + i + " is not the same", - score, h[i].score, SCORE_COMP_THRESH); - } - assertEquals("wrong last", "d1", s.doc(h[h.length-1].doc).get("id")); - float score1 = h[h.length-1].score; - assertTrue("d1 does not have worse score then others: " + - score + " >? " + score1, - score > score1); - } catch (Error e) { - printHits("testBooleanOptionalNoTiebreaker",h, s); - throw e; - } + { + DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.0f); + q2.add(tq("hed", "elephant")); + q2.add(tq("dek", "elephant")); + q.add(q2, BooleanClause.Occur.MUST);// true,false); + QueryUtils.check(q2, s); } - - - public void testBooleanOptionalWithTiebreaker() throws Exception { - - BooleanQuery q = new BooleanQuery(); - { - DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.01f); - q1.add(tq("hed","albino")); - q1.add(tq("dek","albino")); - q.add(q1, BooleanClause.Occur.SHOULD);//false,false); - } - { - DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.01f); - q2.add(tq("hed","elephant")); - q2.add(tq("dek","elephant")); - q.add(q2, BooleanClause.Occur.SHOULD);//false,false); - } - QueryUtils.check(q,s); - - - ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; - - try { - - assertEquals("4 docs should match " + q.toString(), - 4, h.length); - - float score0 = h[0].score; - float score1 = h[1].score; - float score2 = h[2].score; - float score3 = h[3].score; - - String doc0 = s.doc(h[0].doc).get("id"); - String doc1 = s.doc(h[1].doc).get("id"); - String doc2 = s.doc(h[2].doc).get("id"); - String doc3 = s.doc(h[3].doc).get("id"); - - assertTrue("doc0 should be d2 or d4: " + doc0, - doc0.equals("d2") || doc0.equals("d4")); - assertTrue("doc1 should be d2 or d4: " + doc0, - doc1.equals("d2") || doc1.equals("d4")); - assertEquals("score0 and score1 should match", - score0, score1, SCORE_COMP_THRESH); - assertEquals("wrong third", "d3", doc2); - assertTrue("d3 does not have worse score then d2 and d4: " + - score1 + " >? " + score2, - score1 > score2); - - assertEquals("wrong fourth", "d1", doc3); - assertTrue("d1 does not have worse score then d3: " + - score2 + " >? " + score3, - score2 > score3); - - } catch (Error e) { - printHits("testBooleanOptionalWithTiebreaker",h, s); - throw e; - } - + + QueryUtils.check(q, s); + + ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; + + try { + assertEquals("3 docs should match " + q.toString(), 3, h.length); + float score = h[0].score; + for (int i = 1; i < h.length; i++) { + assertEquals("score #" + i + " is not the same", score, h[i].score, + SCORE_COMP_THRESH); + } + } catch (Error e) { + printHits("testBooleanRequiredEqualScores1", h, s); + throw e; } - - - public void testBooleanOptionalWithTiebreakerAndBoost() throws Exception { - - BooleanQuery q = new BooleanQuery(); - { - DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.01f); - q1.add(tq("hed","albino", 1.5f)); - q1.add(tq("dek","albino")); - q.add(q1, BooleanClause.Occur.SHOULD);//false,false); - } - { - DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.01f); - q2.add(tq("hed","elephant", 1.5f)); - q2.add(tq("dek","elephant")); - q.add(q2, BooleanClause.Occur.SHOULD);//false,false); - } - QueryUtils.check(q,s); - - - ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; - - try { - - assertEquals("4 docs should match " + q.toString(), - 4, h.length); - - float score0 = h[0].score; - float score1 = h[1].score; - float score2 = h[2].score; - float score3 = h[3].score; - - String doc0 = s.doc(h[0].doc).get("id"); - String doc1 = s.doc(h[1].doc).get("id"); - String doc2 = s.doc(h[2].doc).get("id"); - String doc3 = s.doc(h[3].doc).get("id"); - - assertEquals("doc0 should be d4: ", "d4", doc0); - assertEquals("doc1 should be d3: ", "d3", doc1); - assertEquals("doc2 should be d2: ", "d2", doc2); - assertEquals("doc3 should be d1: ", "d1", doc3); - - assertTrue("d4 does not have a better score then d3: " + - score0 + " >? " + score1, - score0 > score1); - assertTrue("d3 does not have a better score then d2: " + - score1 + " >? " + score2, - score1 > score2); - assertTrue("d3 does not have a better score then d1: " + - score2 + " >? " + score3, - score2 > score3); - - } catch (Error e) { - printHits("testBooleanOptionalWithTiebreakerAndBoost",h, s); - throw e; - } + } + + public void testBooleanOptionalNoTiebreaker() throws Exception { + + BooleanQuery q = new BooleanQuery(); + { + DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.0f); + q1.add(tq("hed", "albino")); + q1.add(tq("dek", "albino")); + q.add(q1, BooleanClause.Occur.SHOULD);// false,false); } - - - - - - - - /** macro */ - protected Query tq(String f, String t) { - return new TermQuery(new Term(f, t)); + { + DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.0f); + q2.add(tq("hed", "elephant")); + q2.add(tq("dek", "elephant")); + q.add(q2, BooleanClause.Occur.SHOULD);// false,false); } - /** macro */ - protected Query tq(String f, String t, float b) { - Query q = tq(f,t); - q.setBoost(b); - return q; + QueryUtils.check(q, s); + + ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; + + try { + assertEquals("4 docs should match " + q.toString(), 4, h.length); + float score = h[0].score; + for (int i = 1; i < h.length - 1; i++) { /* note: -1 */ + assertEquals("score #" + i + " is not the same", score, h[i].score, + SCORE_COMP_THRESH); + } + assertEquals("wrong last", "d1", s.doc(h[h.length - 1].doc).get("id")); + float score1 = h[h.length - 1].score; + assertTrue("d1 does not have worse score then others: " + score + " >? " + + score1, score > score1); + } catch (Error e) { + printHits("testBooleanOptionalNoTiebreaker", h, s); + throw e; } - - - protected void printHits(String test, ScoreDoc[] h, Searcher searcher) throws Exception { - - System.err.println("------- " + test + " -------"); - - DecimalFormat f = new DecimalFormat("0.000000000"); - - for (int i = 0; i < h.length; i++) { - Document d = searcher.doc(h[i].doc); - float score = h[i].score; - System.err.println("#" + i + ": " + f.format(score) + " - " + - d.get("id")); - } + } + + public void testBooleanOptionalWithTiebreaker() throws Exception { + + BooleanQuery q = new BooleanQuery(); + { + DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.01f); + q1.add(tq("hed", "albino")); + q1.add(tq("dek", "albino")); + q.add(q1, BooleanClause.Occur.SHOULD);// false,false); } - + { + DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.01f); + q2.add(tq("hed", "elephant")); + q2.add(tq("dek", "elephant")); + q.add(q2, BooleanClause.Occur.SHOULD);// false,false); + } + QueryUtils.check(q, s); + + ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; + + try { + + assertEquals("4 docs should match " + q.toString(), 4, h.length); + + float score0 = h[0].score; + float score1 = h[1].score; + float score2 = h[2].score; + float score3 = h[3].score; + + String doc0 = s.doc(h[0].doc).get("id"); + String doc1 = s.doc(h[1].doc).get("id"); + String doc2 = s.doc(h[2].doc).get("id"); + String doc3 = s.doc(h[3].doc).get("id"); + + assertTrue("doc0 should be d2 or d4: " + doc0, doc0.equals("d2") + || doc0.equals("d4")); + assertTrue("doc1 should be d2 or d4: " + doc0, doc1.equals("d2") + || doc1.equals("d4")); + assertEquals("score0 and score1 should match", score0, score1, + SCORE_COMP_THRESH); + assertEquals("wrong third", "d3", doc2); + assertTrue("d3 does not have worse score then d2 and d4: " + score1 + + " >? " + score2, score1 > score2); + + assertEquals("wrong fourth", "d1", doc3); + assertTrue("d1 does not have worse score then d3: " + score2 + " >? " + + score3, score2 > score3); + + } catch (Error e) { + printHits("testBooleanOptionalWithTiebreaker", h, s); + throw e; + } + + } + + public void testBooleanOptionalWithTiebreakerAndBoost() throws Exception { + + BooleanQuery q = new BooleanQuery(); + { + DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.01f); + q1.add(tq("hed", "albino", 1.5f)); + q1.add(tq("dek", "albino")); + q.add(q1, BooleanClause.Occur.SHOULD);// false,false); + } + { + DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.01f); + q2.add(tq("hed", "elephant", 1.5f)); + q2.add(tq("dek", "elephant")); + q.add(q2, BooleanClause.Occur.SHOULD);// false,false); + } + QueryUtils.check(q, s); + + ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; + + try { + + assertEquals("4 docs should match " + q.toString(), 4, h.length); + + float score0 = h[0].score; + float score1 = h[1].score; + float score2 = h[2].score; + float score3 = h[3].score; + + String doc0 = s.doc(h[0].doc).get("id"); + String doc1 = s.doc(h[1].doc).get("id"); + String doc2 = s.doc(h[2].doc).get("id"); + String doc3 = s.doc(h[3].doc).get("id"); + + assertEquals("doc0 should be d4: ", "d4", doc0); + assertEquals("doc1 should be d3: ", "d3", doc1); + assertEquals("doc2 should be d2: ", "d2", doc2); + assertEquals("doc3 should be d1: ", "d1", doc3); + + assertTrue("d4 does not have a better score then d3: " + score0 + " >? " + + score1, score0 > score1); + assertTrue("d3 does not have a better score then d2: " + score1 + " >? " + + score2, score1 > score2); + assertTrue("d3 does not have a better score then d1: " + score2 + " >? " + + score3, score2 > score3); + + } catch (Error e) { + printHits("testBooleanOptionalWithTiebreakerAndBoost", h, s); + throw e; + } + } + + /** macro */ + protected Query tq(String f, String t) { + return new TermQuery(new Term(f, t)); + } + + /** macro */ + protected Query tq(String f, String t, float b) { + Query q = tq(f, t); + q.setBoost(b); + return q; + } + + protected void printHits(String test, ScoreDoc[] h, Searcher searcher) + throws Exception { + + System.err.println("------- " + test + " -------"); + + DecimalFormat f = new DecimalFormat("0.000000000"); + + for (int i = 0; i < h.length; i++) { + Document d = searcher.doc(h[i].doc); + float score = h[i].score; + System.err + .println("#" + i + ": " + f.format(score) + " - " + d.get("id")); + } + } } Index: lucene/src/test/org/apache/lucene/search/TestSimilarity.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestSimilarity.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestSimilarity.java (working copy) @@ -23,6 +23,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.store.RAMDirectory; @@ -64,8 +65,9 @@ public void testSimilarity() throws Exception { RAMDirectory store = new RAMDirectory(); - IndexWriter writer = new IndexWriter(store, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer()).setSimilarity(new SimpleSimilarity())); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), store, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()) + .setSimilarity(new SimpleSimilarity())); Document d1 = new Document(); d1.add(new Field("field", "a c", Field.Store.YES, Field.Index.ANALYZED)); @@ -75,10 +77,10 @@ writer.addDocument(d1); writer.addDocument(d2); - writer.optimize(); + IndexReader reader = writer.getReader(); writer.close(); - Searcher searcher = new IndexSearcher(store, true); + Searcher searcher = new IndexSearcher(reader); searcher.setSimilarity(new SimpleSimilarity()); Term a = new Term("field", "a"); @@ -173,5 +175,9 @@ return true; } }); + + searcher.close(); + reader.close(); + store.close(); } } Index: lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java (working copy) @@ -40,16 +40,9 @@ */ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { - public TestFieldCacheRangeFilter(String name) { - super(name); - } - public TestFieldCacheRangeFilter() { - super(); - } - public void testRangeFilterId() throws IOException { - IndexReader reader = IndexReader.open(signedIndex.index, true); + IndexReader reader = signedIndexReader; IndexSearcher search = new IndexSearcher(reader); int medId = ((maxId - minId) / 2); @@ -133,11 +126,11 @@ public void testFieldCacheRangeFilterRand() throws IOException { - IndexReader reader = IndexReader.open(signedIndex.index, true); + IndexReader reader = signedIndexReader; IndexSearcher search = new IndexSearcher(reader); - String minRP = pad(signedIndex.minR); - String maxRP = pad(signedIndex.maxR); + String minRP = pad(signedIndexDir.minR); + String maxRP = pad(signedIndexDir.maxR); int numDocs = reader.numDocs(); @@ -196,7 +189,7 @@ public void testFieldCacheRangeFilterShorts() throws IOException { - IndexReader reader = IndexReader.open(signedIndex.index, true); + IndexReader reader = signedIndexReader; IndexSearcher search = new IndexSearcher(reader); int numDocs = reader.numDocs(); @@ -285,7 +278,7 @@ public void testFieldCacheRangeFilterInts() throws IOException { - IndexReader reader = IndexReader.open(signedIndex.index, true); + IndexReader reader = signedIndexReader; IndexSearcher search = new IndexSearcher(reader); int numDocs = reader.numDocs(); @@ -375,7 +368,7 @@ public void testFieldCacheRangeFilterLongs() throws IOException { - IndexReader reader = IndexReader.open(signedIndex.index, true); + IndexReader reader = signedIndexReader; IndexSearcher search = new IndexSearcher(reader); int numDocs = reader.numDocs(); @@ -467,7 +460,7 @@ public void testFieldCacheRangeFilterFloats() throws IOException { - IndexReader reader = IndexReader.open(signedIndex.index, true); + IndexReader reader = signedIndexReader; IndexSearcher search = new IndexSearcher(reader); int numDocs = reader.numDocs(); @@ -495,7 +488,7 @@ public void testFieldCacheRangeFilterDoubles() throws IOException { - IndexReader reader = IndexReader.open(signedIndex.index, true); + IndexReader reader = signedIndexReader; IndexSearcher search = new IndexSearcher(reader); int numDocs = reader.numDocs(); Index: lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java (working copy) @@ -17,10 +17,13 @@ * limitations under the License. */ +import java.util.Random; + import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; -import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; @@ -38,12 +41,12 @@ public void testOutOfOrderCollection() throws Exception { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + Random random = newRandom(); + RandomIndexWriter writer = new RandomIndexWriter(random, dir, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); for (int i = 0; i < 10; i++) { writer.addDocument(new Document()); } - writer.commit(); - writer.close(); boolean[] inOrder = new boolean[] { false, true }; String[] actualTSDCClass = new String[] { @@ -58,7 +61,8 @@ // Set minNrShouldMatch to 1 so that BQ will not optimize rewrite to return // the clause instead of BQ. bq.setMinimumNumberShouldMatch(1); - IndexSearcher searcher = new IndexSearcher(dir, true); + IndexReader reader = writer.getReader(); + IndexSearcher searcher = new IndexSearcher(reader); for (int i = 0; i < inOrder.length; i++) { TopDocsCollector tdc = TopScoreDocCollector.create(3, inOrder[i]); assertEquals("org.apache.lucene.search.TopScoreDocCollector$" + actualTSDCClass[i], tdc.getClass().getName()); @@ -71,6 +75,10 @@ assertEquals("expected doc Id " + j + " found " + sd[j].doc, j, sd[j].doc); } } + writer.close(); + searcher.close(); + reader.close(); + dir.close(); } } Index: lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java (working copy) @@ -26,8 +26,11 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; +import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; @@ -39,13 +42,17 @@ */ public class TestRegexpRandom extends LuceneTestCase { private Searcher searcher; + private IndexReader reader; + private Directory dir; private Random random; @Override protected void setUp() throws Exception { super.setUp(); - RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new MockAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); + random = newRandom(); + dir = new RAMDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(random, dir, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); Document doc = new Document(); Field field = new Field("field", "", Field.Store.NO, Field.Index.ANALYZED); @@ -57,9 +64,9 @@ writer.addDocument(doc); } - writer.optimize(); + reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(dir); + searcher = new IndexSearcher(reader); } private char N() { @@ -89,11 +96,12 @@ @Override protected void tearDown() throws Exception { searcher.close(); + reader.close(); + dir.close(); super.tearDown(); } public void testRegexps() throws Exception { - random = newRandom(System.nanoTime()); for (int i = 0; i < 100*_TestUtil.getRandomMultiplier(); i++) { assertPatternHits("NNNN", 1); assertPatternHits(".NNN", 10); Index: lucene/src/test/org/apache/lucene/search/TestSpanQueryFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestSpanQueryFilter.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestSpanQueryFilter.java (working copy) @@ -22,8 +22,8 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.store.Directory; @@ -40,17 +40,17 @@ public void testFilterWorks() throws Exception { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); for (int i = 0; i < 500; i++) { Document document = new Document(); document.add(new Field("field", English.intToEnglish(i) + " equals " + English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(document); } + IndexReader reader = writer.getReader(); writer.close(); - IndexReader reader = IndexReader.open(dir, true); - SpanTermQuery query = new SpanTermQuery(new Term("field", English.intToEnglish(10).trim())); SpanQueryFilter filter = new SpanQueryFilter(query); SpanFilterResult result = filter.bitSpans(reader); @@ -69,6 +69,7 @@ assertTrue("info.getPositions() Size: " + info.getPositions().size() + " is not: " + 2, info.getPositions().size() == 2); } reader.close(); + dir.close(); } int getDocIdSetSize(DocIdSet docIdSet) throws Exception { Index: lucene/src/test/org/apache/lucene/search/TestCustomSearcherSort.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestCustomSearcherSort.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestCustomSearcherSort.java (working copy) @@ -24,17 +24,13 @@ import java.util.Random; import java.util.TreeMap; -import junit.framework.Test; -import junit.framework.TestSuite; -import junit.textui.TestRunner; - import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.DateTools; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; @@ -43,231 +39,236 @@ /** * Unit test for sorting code. - * + * */ -public class TestCustomSearcherSort -extends LuceneTestCase -implements Serializable { - - private Directory index = null; - private Query query = null; - // reduced from 20000 to 2000 to speed up test... - private final static int INDEX_SIZE = 2000*_TestUtil.getRandomMultiplier(); - - public TestCustomSearcherSort (String name) { - super (name); - } - - public static void main (String[] argv) { - TestRunner.run (suite()); - } - - public static Test suite() { - return new TestSuite (TestCustomSearcherSort.class); - } - - - // create an index for testing - private Directory getIndex() - throws IOException { - RAMDirectory indexStore = new RAMDirectory (); - IndexWriter writer = new IndexWriter (indexStore, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); - RandomGen random = new RandomGen(newRandom()); - for (int i=0; i resultMap = new TreeMap(); - // store hits in TreeMap - TreeMap does not allow duplicates; existing entries are silently overwritten - for(int hitid=0;hitid resultMap = new TreeMap(); + // store hits in TreeMap - TreeMap does not allow duplicates; existing + // entries are silently overwritten + for (int hitid = 0; hitid < hitsByRank.length; ++hitid) { + resultMap.put(Integer.valueOf(hitsByRank[hitid].doc), // Key: Lucene + // Document ID + Integer.valueOf(hitid)); // Value: Hits-Objekt Index + } + + // now make a query using the sort criteria + ScoreDoc[] resultSort = searcher.search(query, null, Integer.MAX_VALUE, + sort).scoreDocs; checkHits(resultSort, "Sort by custom criteria: "); // check for duplicates - // besides the sorting both sets of hits must be identical - for(int hitid=0;hitid remove it from the Map. - // At the end the Map must be empty! - resultMap.remove(idHitDate); - } - if(resultMap.size()==0) { - // log("All hits matched"); - } else { - log("Couldn't match "+resultMap.size()+" hits."); - } - assertEquals(resultMap.size(), 0); + // besides the sorting both sets of hits must be identical + for (int hitid = 0; hitid < resultSort.length; ++hitid) { + Integer idHitDate = Integer.valueOf(resultSort[hitid].doc); // document ID + // from sorted + // search + if (!resultMap.containsKey(idHitDate)) { + log("ID " + idHitDate + " not found. Possibliy a duplicate."); + } + assertTrue(resultMap.containsKey(idHitDate)); // same ID must be in the + // Map from the rank-sorted + // search + // every hit must appear once in both result sets --> remove it from the + // Map. + // At the end the Map must be empty! + resultMap.remove(idHitDate); + } + if (resultMap.size() == 0) { + // log("All hits matched"); + } else { + log("Couldn't match " + resultMap.size() + " hits."); + } + assertEquals(resultMap.size(), 0); } - + /** * Check the hits for duplicates. + * * @param hits */ - private void checkHits(ScoreDoc[] hits, String prefix) { - if(hits!=null) { - Map idMap = new TreeMap(); - for(int docnum=0;docnum0) { + if (i>0 && searcher.getIndexReader().getSequentialSubReaders().length == 1) { assertEquals("Distinct term number is equal for all query types", lastTerms, terms); } lastTerms = terms; @@ -372,7 +379,7 @@ termCountT += tq.getTotalNumberOfTerms(); termCountC += cq.getTotalNumberOfTerms(); } - if (precisionStep == Integer.MAX_VALUE) { + if (precisionStep == Integer.MAX_VALUE && searcher.getIndexReader().getSequentialSubReaders().length == 1) { assertEquals("Total number of terms should be equal for unlimited precStep", termCountT, termCountC); } else if (VERBOSE) { System.out.println("Average number of terms during random search on '" + field + "':"); Index: lucene/src/test/org/apache/lucene/search/TestDocBoost.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestDocBoost.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestDocBoost.java (working copy) @@ -23,8 +23,8 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.*; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.RAMDirectory; @@ -40,8 +40,8 @@ public void testDocBoost() throws Exception { RAMDirectory store = new RAMDirectory(); - IndexWriter writer = new IndexWriter(store, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), store, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); Fieldable f1 = new Field("field", "word", Field.Store.YES, Field.Index.ANALYZED); Fieldable f2 = new Field("field", "word", Field.Store.YES, Field.Index.ANALYZED); @@ -63,12 +63,13 @@ writer.addDocument(d2); writer.addDocument(d3); writer.addDocument(d4); - writer.optimize(); + + IndexReader reader = writer.getReader(); writer.close(); final float[] scores = new float[4]; - new IndexSearcher(store, true).search + new IndexSearcher(reader).search (new TermQuery(new Term("field", "word")), new Collector() { private int base = 0; @@ -97,5 +98,8 @@ assertTrue(scores[i] > lastScore); lastScore = scores[i]; } + + reader.close(); + store.close(); } } Index: lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (working copy) @@ -23,8 +23,10 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericField; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCaseJ4; @@ -45,12 +47,15 @@ private static final int noDocs = 10000*_TestUtil.getRandomMultiplier(); private static RAMDirectory directory = null; + private static IndexReader reader = null; private static IndexSearcher searcher = null; @BeforeClass public static void beforeClass() throws Exception { directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + Random random = newStaticRandom(TestNumericRangeQuery64.class); + RandomIndexWriter writer = new RandomIndexWriter(random, directory, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); NumericField field8 = new NumericField("field8", 8, Field.Store.YES, true), @@ -86,15 +91,17 @@ writer.addDocument(doc); } - writer.optimize(); + reader = writer.getReader(); + searcher=new IndexSearcher(reader); writer.close(); - searcher=new IndexSearcher(directory, true); } @AfterClass public static void afterClass() throws Exception { searcher.close(); searcher = null; + reader.close(); + reader = null; directory.close(); directory = null; } @@ -150,7 +157,7 @@ assertEquals("First doc"+type, 2*distance+startOffset, Long.parseLong(doc.get(field)) ); doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc"+type, (1+count)*distance+startOffset, Long.parseLong(doc.get(field)) ); - if (i>0) { + if (i>0 && searcher.getIndexReader().getSequentialSubReaders().length == 1) { assertEquals("Distinct term number is equal for all query types", lastTerms, terms); } lastTerms = terms; @@ -391,7 +398,7 @@ termCountT += tq.getTotalNumberOfTerms(); termCountC += cq.getTotalNumberOfTerms(); } - if (precisionStep == Integer.MAX_VALUE) { + if (precisionStep == Integer.MAX_VALUE && searcher.getIndexReader().getSequentialSubReaders().length == 1) { assertEquals("Total number of terms should be equal for unlimited precStep", termCountT, termCountC); } else if (VERBOSE) { System.out.println("Average number of terms during random search on '" + field + "':"); Index: lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java (working copy) @@ -19,8 +19,9 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; @@ -37,16 +38,17 @@ String[] categories = new String[] {"/Computers", "/Computers/Mac", "/Computers/Windows"}; - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); for (int i = 0; i < categories.length; i++) { Document doc = new Document(); doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED)); writer.addDocument(doc); } - writer.close(); + IndexReader reader = writer.getReader(); PrefixQuery query = new PrefixQuery(new Term("category", "/Computers")); - IndexSearcher searcher = new IndexSearcher(directory, true); + IndexSearcher searcher = new IndexSearcher(reader); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("All documents in /Computers category and below", 3, hits.length); @@ -58,5 +60,9 @@ assertFalse(query.getTermsEnum(searcher.getIndexReader()) instanceof PrefixTermsEnum); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("everything", 3, hits.length); + writer.close(); + searcher.close(); + reader.close(); + directory.close(); } } Index: lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (working copy) @@ -18,10 +18,10 @@ */ import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.MultiFields; import org.apache.lucene.util.BytesRef; import org.apache.lucene.store.MockRAMDirectory; @@ -36,236 +36,245 @@ /** * This class tests the MultiPhraseQuery class. - * - * + * + * */ -public class TestMultiPhraseQuery extends LuceneTestCase -{ - public TestMultiPhraseQuery(String name) { - super(name); - } - - public void testPhrasePrefix() throws IOException { - MockRAMDirectory indexStore = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); - add("blueberry pie", writer); - add("blueberry strudel", writer); - add("blueberry pizza", writer); - add("blueberry chewing gum", writer); - add("bluebird pizza", writer); - add("bluebird foobar pizza", writer); - add("piccadilly circus", writer); - writer.optimize(); - writer.close(); - - IndexSearcher searcher = new IndexSearcher(indexStore, true); - - // search for "blueberry pi*": - MultiPhraseQuery query1 = new MultiPhraseQuery(); - // search for "strawberry pi*": - MultiPhraseQuery query2 = new MultiPhraseQuery(); - query1.add(new Term("body", "blueberry")); - query2.add(new Term("body", "strawberry")); - - LinkedList termsWithPrefix = new LinkedList(); - IndexReader ir = IndexReader.open(indexStore, true); - - // this TermEnum gives "piccadilly", "pie" and "pizza". - String prefix = "pi"; - TermsEnum te = MultiFields.getFields(ir).terms("body").iterator(); - te.seek(new BytesRef(prefix)); - do { - String s = te.term().utf8ToString(); - if (s.startsWith(prefix)) { - termsWithPrefix.add(new Term("body", s)); - } else { - break; - } - } while (te.next() != null); - - query1.add(termsWithPrefix.toArray(new Term[0])); - assertEquals("body:\"blueberry (piccadilly pie pizza)\"", query1.toString()); - query2.add(termsWithPrefix.toArray(new Term[0])); - assertEquals("body:\"strawberry (piccadilly pie pizza)\"", query2.toString()); - - ScoreDoc[] result; - result = searcher.search(query1, null, 1000).scoreDocs; - assertEquals(2, result.length); - result = searcher.search(query2, null, 1000).scoreDocs; - assertEquals(0, result.length); - - // search for "blue* pizza": - MultiPhraseQuery query3 = new MultiPhraseQuery(); - termsWithPrefix.clear(); - prefix = "blue"; - te.seek(new BytesRef(prefix)); - - do { - if (te.term().utf8ToString().startsWith(prefix)) - { - termsWithPrefix.add(new Term("body", te.term().utf8ToString())); - } - } while (te.next() != null); - ir.close(); - query3.add(termsWithPrefix.toArray(new Term[0])); - query3.add(new Term("body", "pizza")); - - result = searcher.search(query3, null, 1000).scoreDocs; - assertEquals(2, result.length); // blueberry pizza, bluebird pizza - assertEquals("body:\"(blueberry bluebird) pizza\"", query3.toString()); - - // test slop: - query3.setSlop(1); - result = searcher.search(query3, null, 1000).scoreDocs; - - // just make sure no exc: - searcher.explain(query3, 0); - - assertEquals(3, result.length); // blueberry pizza, bluebird pizza, bluebird foobar pizza - - MultiPhraseQuery query4 = new MultiPhraseQuery(); - try { - query4.add(new Term("field1", "foo")); - query4.add(new Term("field2", "foobar")); - fail(); - } catch(IllegalArgumentException e) { - // okay, all terms must belong to the same field - } - - searcher.close(); - indexStore.close(); - - } +public class TestMultiPhraseQuery extends LuceneTestCase { + public TestMultiPhraseQuery(String name) { + super(name); + } + + public void testPhrasePrefix() throws IOException { + MockRAMDirectory indexStore = new MockRAMDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), indexStore, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + add("blueberry pie", writer); + add("blueberry strudel", writer); + add("blueberry pizza", writer); + add("blueberry chewing gum", writer); + add("bluebird pizza", writer); + add("bluebird foobar pizza", writer); + add("piccadilly circus", writer); - private void add(String s, IndexWriter writer) throws IOException { - Document doc = new Document(); - doc.add(new Field("body", s, Field.Store.YES, Field.Index.ANALYZED)); - writer.addDocument(doc); + IndexReader reader = writer.getReader(); + IndexSearcher searcher = new IndexSearcher(reader); + + // search for "blueberry pi*": + MultiPhraseQuery query1 = new MultiPhraseQuery(); + // search for "strawberry pi*": + MultiPhraseQuery query2 = new MultiPhraseQuery(); + query1.add(new Term("body", "blueberry")); + query2.add(new Term("body", "strawberry")); + + LinkedList termsWithPrefix = new LinkedList(); + + // this TermEnum gives "piccadilly", "pie" and "pizza". + String prefix = "pi"; + TermsEnum te = MultiFields.getFields(reader).terms("body").iterator(); + te.seek(new BytesRef(prefix)); + do { + String s = te.term().utf8ToString(); + if (s.startsWith(prefix)) { + termsWithPrefix.add(new Term("body", s)); + } else { + break; + } + } while (te.next() != null); + + query1.add(termsWithPrefix.toArray(new Term[0])); + assertEquals("body:\"blueberry (piccadilly pie pizza)\"", query1.toString()); + query2.add(termsWithPrefix.toArray(new Term[0])); + assertEquals("body:\"strawberry (piccadilly pie pizza)\"", query2 + .toString()); + + ScoreDoc[] result; + result = searcher.search(query1, null, 1000).scoreDocs; + assertEquals(2, result.length); + result = searcher.search(query2, null, 1000).scoreDocs; + assertEquals(0, result.length); + + // search for "blue* pizza": + MultiPhraseQuery query3 = new MultiPhraseQuery(); + termsWithPrefix.clear(); + prefix = "blue"; + te.seek(new BytesRef(prefix)); + + do { + if (te.term().utf8ToString().startsWith(prefix)) { + termsWithPrefix.add(new Term("body", te.term().utf8ToString())); + } + } while (te.next() != null); + + query3.add(termsWithPrefix.toArray(new Term[0])); + query3.add(new Term("body", "pizza")); + + result = searcher.search(query3, null, 1000).scoreDocs; + assertEquals(2, result.length); // blueberry pizza, bluebird pizza + assertEquals("body:\"(blueberry bluebird) pizza\"", query3.toString()); + + // test slop: + query3.setSlop(1); + result = searcher.search(query3, null, 1000).scoreDocs; + + // just make sure no exc: + searcher.explain(query3, 0); + + assertEquals(3, result.length); // blueberry pizza, bluebird pizza, bluebird + // foobar pizza + + MultiPhraseQuery query4 = new MultiPhraseQuery(); + try { + query4.add(new Term("field1", "foo")); + query4.add(new Term("field2", "foobar")); + fail(); + } catch (IllegalArgumentException e) { + // okay, all terms must belong to the same field } - public void testBooleanQueryContainingSingleTermPrefixQuery() throws IOException { - // this tests against bug 33161 (now fixed) - // In order to cause the bug, the outer query must have more than one term - // and all terms required. - // The contained PhraseMultiQuery must contain exactly one term array. - - MockRAMDirectory indexStore = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); - add("blueberry pie", writer); - add("blueberry chewing gum", writer); - add("blue raspberry pie", writer); - writer.optimize(); - writer.close(); - - IndexSearcher searcher = new IndexSearcher(indexStore, true); - // This query will be equivalent to +body:pie +body:"blue*" - BooleanQuery q = new BooleanQuery(); - q.add(new TermQuery(new Term("body", "pie")), BooleanClause.Occur.MUST); - - MultiPhraseQuery trouble = new MultiPhraseQuery(); - trouble.add(new Term[] { - new Term("body", "blueberry"), - new Term("body", "blue") - }); - q.add(trouble, BooleanClause.Occur.MUST); - - // exception will be thrown here without fix - ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs; - - assertEquals("Wrong number of hits", 2, hits.length); - - // just make sure no exc: - searcher.explain(q, 0); - - searcher.close(); - indexStore.close(); + writer.close(); + searcher.close(); + reader.close(); + indexStore.close(); + } + + private void add(String s, RandomIndexWriter writer) throws IOException { + Document doc = new Document(); + doc.add(new Field("body", s, Field.Store.YES, Field.Index.ANALYZED)); + writer.addDocument(doc); + } + + public void testBooleanQueryContainingSingleTermPrefixQuery() + throws IOException { + // this tests against bug 33161 (now fixed) + // In order to cause the bug, the outer query must have more than one term + // and all terms required. + // The contained PhraseMultiQuery must contain exactly one term array. + MockRAMDirectory indexStore = new MockRAMDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), indexStore, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + add("blueberry pie", writer); + add("blueberry chewing gum", writer); + add("blue raspberry pie", writer); + + IndexReader reader = writer.getReader(); + IndexSearcher searcher = new IndexSearcher(reader); + // This query will be equivalent to +body:pie +body:"blue*" + BooleanQuery q = new BooleanQuery(); + q.add(new TermQuery(new Term("body", "pie")), BooleanClause.Occur.MUST); + + MultiPhraseQuery trouble = new MultiPhraseQuery(); + trouble.add(new Term[] {new Term("body", "blueberry"), + new Term("body", "blue")}); + q.add(trouble, BooleanClause.Occur.MUST); + + // exception will be thrown here without fix + ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs; + + assertEquals("Wrong number of hits", 2, hits.length); + + // just make sure no exc: + searcher.explain(q, 0); + + writer.close(); + searcher.close(); + reader.close(); + indexStore.close(); + } + public void testPhrasePrefixWithBooleanQuery() throws IOException { MockRAMDirectory indexStore = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), indexStore, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); add("This is a test", "object", writer); add("a note", "note", writer); - writer.close(); - IndexSearcher searcher = new IndexSearcher(indexStore, true); - + IndexReader reader = writer.getReader(); + IndexSearcher searcher = new IndexSearcher(reader); + // This query will be equivalent to +type:note +body:"a t*" BooleanQuery q = new BooleanQuery(); q.add(new TermQuery(new Term("type", "note")), BooleanClause.Occur.MUST); - + MultiPhraseQuery trouble = new MultiPhraseQuery(); trouble.add(new Term("body", "a")); - trouble.add(new Term[] { new Term("body", "test"), new Term("body", "this") }); + trouble + .add(new Term[] {new Term("body", "test"), new Term("body", "this")}); q.add(trouble, BooleanClause.Occur.MUST); - + // exception will be thrown here without fix for #35626: ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs; assertEquals("Wrong number of hits", 0, hits.length); + writer.close(); searcher.close(); + reader.close(); indexStore.close(); } - + public void testNoDocs() throws Exception { MockRAMDirectory indexStore = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(indexStore, new MockAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), indexStore, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); add("a note", "note", writer); - writer.close(); - - IndexSearcher searcher = new IndexSearcher(indexStore, true); - + + IndexReader reader = writer.getReader(); + IndexSearcher searcher = new IndexSearcher(reader); + MultiPhraseQuery q = new MultiPhraseQuery(); q.add(new Term("body", "a")); - q.add(new Term[] { new Term("body", "nope"), new Term("body", "nope") }); - assertEquals("Wrong number of hits", 0, searcher.search(q, null, 1).totalHits); - + q.add(new Term[] {new Term("body", "nope"), new Term("body", "nope")}); + assertEquals("Wrong number of hits", 0, + searcher.search(q, null, 1).totalHits); + // just make sure no exc: searcher.explain(q, 0); - + + writer.close(); searcher.close(); + reader.close(); indexStore.close(); } - public void testHashCodeAndEquals(){ + public void testHashCodeAndEquals() { MultiPhraseQuery query1 = new MultiPhraseQuery(); MultiPhraseQuery query2 = new MultiPhraseQuery(); assertEquals(query1.hashCode(), query2.hashCode()); - assertEquals(query1,query2); + assertEquals(query1, query2); - Term term1= new Term("someField","someText"); + Term term1 = new Term("someField", "someText"); query1.add(term1); query2.add(term1); assertEquals(query1.hashCode(), query2.hashCode()); - assertEquals(query1,query2); + assertEquals(query1, query2); - Term term2= new Term("someField","someMoreText"); + Term term2 = new Term("someField", "someMoreText"); query1.add(term2); - assertFalse(query1.hashCode()==query2.hashCode()); + assertFalse(query1.hashCode() == query2.hashCode()); assertFalse(query1.equals(query2)); query2.add(term2); assertEquals(query1.hashCode(), query2.hashCode()); - assertEquals(query1,query2); + assertEquals(query1, query2); } - - private void add(String s, String type, IndexWriter writer) throws IOException { + private void add(String s, String type, RandomIndexWriter writer) + throws IOException { Document doc = new Document(); doc.add(new Field("body", s, Field.Store.YES, Field.Index.ANALYZED)); doc.add(new Field("type", type, Field.Store.YES, Field.Index.NOT_ANALYZED)); writer.addDocument(doc); } - + // LUCENE-2526 public void testEmptyToString() { new MultiPhraseQuery().toString(); } - + } Index: lucene/src/test/org/apache/lucene/search/TestBooleanPrefixQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestBooleanPrefixQuery.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestBooleanPrefixQuery.java (working copy) @@ -22,8 +22,8 @@ import junit.framework.TestSuite; import junit.textui.TestRunner; import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexReader; import org.apache.lucene.analysis.MockAnalyzer; @@ -33,8 +33,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.BooleanQuery; -import java.io.IOException; - /** * **/ @@ -79,29 +77,27 @@ Query rw1 = null; Query rw2 = null; IndexReader reader = null; - try { - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, new IndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); - for (int i = 0; i < categories.length; i++) { - Document doc = new Document(); - doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED)); - writer.addDocument(doc); - } - writer.close(); + for (int i = 0; i < categories.length; i++) { + Document doc = new Document(); + doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED)); + writer.addDocument(doc); + } + reader = writer.getReader(); + writer.close(); - reader = IndexReader.open(directory, true); - PrefixQuery query = new PrefixQuery(new Term("category", "foo")); - rw1 = query.rewrite(reader); + PrefixQuery query = new PrefixQuery(new Term("category", "foo")); + rw1 = query.rewrite(reader); - BooleanQuery bq = new BooleanQuery(); - bq.add(query, BooleanClause.Occur.MUST); + BooleanQuery bq = new BooleanQuery(); + bq.add(query, BooleanClause.Occur.MUST); - rw2 = bq.rewrite(reader); - } catch (IOException e) { - fail(e.getMessage()); - } + rw2 = bq.rewrite(reader); assertEquals("Number of Clauses Mismatch", getCount(reader, rw1), getCount(reader, rw2)); + reader.close(); + directory.close(); } } Index: lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java (working copy) @@ -20,9 +20,9 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.store.RAMDirectory; @@ -41,6 +41,7 @@ public class TestFilteredQuery extends LuceneTestCase { private IndexSearcher searcher; + private IndexReader reader; private RAMDirectory directory; private Query query; private Filter filter; @@ -49,7 +50,8 @@ protected void setUp() throws Exception { super.setUp(); directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter (directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter (newRandom(), directory, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); Document doc = new Document(); doc.add (new Field("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED)); @@ -71,10 +73,10 @@ doc.add (new Field("sorter", "c", Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument (doc); - writer.optimize (); + reader = writer.getReader(); writer.close (); - searcher = new IndexSearcher (directory, true); + searcher = new IndexSearcher (reader); query = new TermQuery (new Term ("field", "three")); filter = newStaticFilterB(); } @@ -95,6 +97,7 @@ @Override protected void tearDown() throws Exception { searcher.close(); + reader.close(); directory.close(); super.tearDown(); } Index: lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java (working copy) @@ -23,8 +23,8 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.store.MockRAMDirectory; import java.util.ArrayList; @@ -39,16 +39,17 @@ public void testMissingTerms() throws Exception { String fieldName = "field1"; MockRAMDirectory rd = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(rd, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter w = new RandomIndexWriter(newRandom(), rd, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); for (int i = 0; i < 100; i++) { Document doc = new Document(); int term = i * 10; //terms are units of 10; doc.add(new Field(fieldName, "" + term, Field.Store.YES, Field.Index.NOT_ANALYZED)); w.addDocument(doc); } + IndexReader reader = w.getReader(); w.close(); - IndexReader reader = IndexReader.open(rd, true); IndexSearcher searcher = new IndexSearcher(reader); int numDocs = reader.numDocs(); ScoreDoc[] results; Index: lucene/src/test/org/apache/lucene/search/TestTermRangeFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestTermRangeFilter.java (revision 963689) +++ lucene/src/test/org/apache/lucene/search/TestTermRangeFilter.java (working copy) @@ -23,8 +23,8 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -32,385 +32,448 @@ /** * A basic 'positive' Unit test class for the TermRangeFilter class. - * + * *

- * NOTE: at the moment, this class only tests for 'positive' results, - * it does not verify the results to ensure there are no 'false positives', - * nor does it adequately test 'negative' results. It also does not test - * that garbage in results in an Exception. + * NOTE: at the moment, this class only tests for 'positive' results, it does + * not verify the results to ensure there are no 'false positives', nor does it + * adequately test 'negative' results. It also does not test that garbage in + * results in an Exception. */ public class TestTermRangeFilter extends BaseTestRangeFilter { - - public TestTermRangeFilter(String name) { - super(name); - } - public TestTermRangeFilter() { - super(); - } - - public void testRangeFilterId() throws IOException { - - IndexReader reader = IndexReader.open(signedIndex.index, true); - IndexSearcher search = new IndexSearcher(reader); - - int medId = ((maxId - minId) / 2); - - String minIP = pad(minId); - String maxIP = pad(maxId); - String medIP = pad(medId); + + public void testRangeFilterId() throws IOException { - int numDocs = reader.numDocs(); - - assertEquals("num of docs", numDocs, 1+ maxId - minId); - - ScoreDoc[] result; - Query q = new TermQuery(new Term("body","body")); - - // test id, bounded on both ends - - result = search.search(q,new TermRangeFilter("id",minIP,maxIP,T,T), numDocs).scoreDocs; - assertEquals("find all", numDocs, result.length); - - result = search.search(q,new TermRangeFilter("id",minIP,maxIP,T,F), numDocs).scoreDocs; - assertEquals("all but last", numDocs-1, result.length); - - result = search.search(q,new TermRangeFilter("id",minIP,maxIP,F,T), numDocs).scoreDocs; - assertEquals("all but first", numDocs-1, result.length); - - result = search.search(q,new TermRangeFilter("id",minIP,maxIP,F,F), numDocs).scoreDocs; - assertEquals("all but ends", numDocs-2, result.length); + IndexReader reader = signedIndexReader; + IndexSearcher search = new IndexSearcher(reader); - result = search.search(q,new TermRangeFilter("id",medIP,maxIP,T,T), numDocs).scoreDocs; - assertEquals("med and up", 1+ maxId-medId, result.length); - - result = search.search(q,new TermRangeFilter("id",minIP,medIP,T,T), numDocs).scoreDocs; - assertEquals("up to med", 1+ medId-minId, result.length); - - // unbounded id - - result = search.search(q,new TermRangeFilter("id",minIP,null,T,F), numDocs).scoreDocs; - assertEquals("min and up", numDocs, result.length); - - result = search.search(q,new TermRangeFilter("id",null,maxIP,F,T), numDocs).scoreDocs; - assertEquals("max and down", numDocs, result.length); - - result = search.search(q,new TermRangeFilter("id",minIP,null,F,F), numDocs).scoreDocs; - assertEquals("not min, but up", numDocs-1, result.length); - - result = search.search(q,new TermRangeFilter("id",null,maxIP,F,F), numDocs).scoreDocs; - assertEquals("not max, but down", numDocs-1, result.length); - - result = search.search(q,new TermRangeFilter("id",medIP,maxIP,T,F), numDocs).scoreDocs; - assertEquals("med and up, not max", maxId-medId, result.length); - - result = search.search(q,new TermRangeFilter("id",minIP,medIP,F,T), numDocs).scoreDocs; - assertEquals("not min, up to med", medId-minId, result.length); - - // very small sets - - result = search.search(q,new TermRangeFilter("id",minIP,minIP,F,F), numDocs).scoreDocs; - assertEquals("min,min,F,F", 0, result.length); - result = search.search(q,new TermRangeFilter("id",medIP,medIP,F,F), numDocs).scoreDocs; - assertEquals("med,med,F,F", 0, result.length); - result = search.search(q,new TermRangeFilter("id",maxIP,maxIP,F,F), numDocs).scoreDocs; - assertEquals("max,max,F,F", 0, result.length); - - result = search.search(q,new TermRangeFilter("id",minIP,minIP,T,T), numDocs).scoreDocs; - assertEquals("min,min,T,T", 1, result.length); - result = search.search(q,new TermRangeFilter("id",null,minIP,F,T), numDocs).scoreDocs; - assertEquals("nul,min,F,T", 1, result.length); - - result = search.search(q,new TermRangeFilter("id",maxIP,maxIP,T,T), numDocs).scoreDocs; - assertEquals("max,max,T,T", 1, result.length); - result = search.search(q,new TermRangeFilter("id",maxIP,null,T,F), numDocs).scoreDocs; - assertEquals("max,nul,T,T", 1, result.length); - - result = search.search(q,new TermRangeFilter("id",medIP,medIP,T,T), numDocs).scoreDocs; - assertEquals("med,med,T,T", 1, result.length); - - } - - public void testRangeFilterIdCollating() throws IOException { - - IndexReader reader = IndexReader.open(signedIndex.index, true); - IndexSearcher search = new IndexSearcher(reader); - - Collator c = Collator.getInstance(Locale.ENGLISH); - - int medId = ((maxId - minId) / 2); - - String minIP = pad(minId); - String maxIP = pad(maxId); - String medIP = pad(medId); - - int numDocs = reader.numDocs(); - - assertEquals("num of docs", numDocs, 1+ maxId - minId); - - Query q = new TermQuery(new Term("body","body")); - - // test id, bounded on both ends - int numHits = search.search(q,new TermRangeFilter("id",minIP,maxIP,T,T,c), 1000).totalHits; - assertEquals("find all", numDocs, numHits); - - numHits = search.search(q,new TermRangeFilter("id",minIP,maxIP,T,F,c), 1000).totalHits; - assertEquals("all but last", numDocs-1, numHits); - - numHits = search.search(q,new TermRangeFilter("id",minIP,maxIP,F,T,c), 1000).totalHits; - assertEquals("all but first", numDocs-1, numHits); - - numHits = search.search(q,new TermRangeFilter("id",minIP,maxIP,F,F,c), 1000).totalHits; - assertEquals("all but ends", numDocs-2, numHits); - - numHits = search.search(q,new TermRangeFilter("id",medIP,maxIP,T,T,c), 1000).totalHits; - assertEquals("med and up", 1+ maxId-medId, numHits); - - numHits = search.search(q,new TermRangeFilter("id",minIP,medIP,T,T,c), 1000).totalHits; - assertEquals("up to med", 1+ medId-minId, numHits); - - // unbounded id - - numHits = search.search(q,new TermRangeFilter("id",minIP,null,T,F,c), 1000).totalHits; - assertEquals("min and up", numDocs, numHits); - - numHits = search.search(q,new TermRangeFilter("id",null,maxIP,F,T,c), 1000).totalHits; - assertEquals("max and down", numDocs, numHits); - - numHits = search.search(q,new TermRangeFilter("id",minIP,null,F,F,c), 1000).totalHits; - assertEquals("not min, but up", numDocs-1, numHits); - - numHits = search.search(q,new TermRangeFilter("id",null,maxIP,F,F,c), 1000).totalHits; - assertEquals("not max, but down", numDocs-1, numHits); - - numHits = search.search(q,new TermRangeFilter("id",medIP,maxIP,T,F,c), 1000).totalHits; - assertEquals("med and up, not max", maxId-medId, numHits); - - numHits = search.search(q,new TermRangeFilter("id",minIP,medIP,F,T,c), 1000).totalHits; - assertEquals("not min, up to med", medId-minId, numHits); - - // very small sets - - numHits = search.search(q,new TermRangeFilter("id",minIP,minIP,F,F,c), 1000).totalHits; - assertEquals("min,min,F,F", 0, numHits); - numHits = search.search(q,new TermRangeFilter("id",medIP,medIP,F,F,c), 1000).totalHits; - assertEquals("med,med,F,F", 0, numHits); - numHits = search.search(q,new TermRangeFilter("id",maxIP,maxIP,F,F,c), 1000).totalHits; - assertEquals("max,max,F,F", 0, numHits); - - numHits = search.search(q,new TermRangeFilter("id",minIP,minIP,T,T,c), 1000).totalHits; - assertEquals("min,min,T,T", 1, numHits); - numHits = search.search(q,new TermRangeFilter("id",null,minIP,F,T,c), 1000).totalHits; - assertEquals("nul,min,F,T", 1, numHits); - - numHits = search.search(q,new TermRangeFilter("id",maxIP,maxIP,T,T,c), 1000).totalHits; - assertEquals("max,max,T,T", 1, numHits); - numHits = search.search(q,new TermRangeFilter("id",maxIP,null,T,F,c), 1000).totalHits; - assertEquals("max,nul,T,T", 1, numHits); - - numHits = search.search(q,new TermRangeFilter("id",medIP,medIP,T,T,c), 1000).totalHits; - assertEquals("med,med,T,T", 1, numHits); - } - - public void testRangeFilterRand() throws IOException { - - IndexReader reader = IndexReader.open(signedIndex.index, true); - IndexSearcher search = new IndexSearcher(reader); - - String minRP = pad(signedIndex.minR); - String maxRP = pad(signedIndex.maxR); + int medId = ((maxId - minId) / 2); - int numDocs = reader.numDocs(); - - assertEquals("num of docs", numDocs, 1+ maxId - minId); - - ScoreDoc[] result; - Query q = new TermQuery(new Term("body","body")); - - // test extremes, bounded on both ends - - result = search.search(q,new TermRangeFilter("rand",minRP,maxRP,T,T), numDocs).scoreDocs; - assertEquals("find all", numDocs, result.length); - - result = search.search(q,new TermRangeFilter("rand",minRP,maxRP,T,F), numDocs).scoreDocs; - assertEquals("all but biggest", numDocs-1, result.length); - - result = search.search(q,new TermRangeFilter("rand",minRP,maxRP,F,T), numDocs).scoreDocs; - assertEquals("all but smallest", numDocs-1, result.length); - - result = search.search(q,new TermRangeFilter("rand",minRP,maxRP,F,F), numDocs).scoreDocs; - assertEquals("all but extremes", numDocs-2, result.length); + String minIP = pad(minId); + String maxIP = pad(maxId); + String medIP = pad(medId); - // unbounded - - result = search.search(q,new TermRangeFilter("rand",minRP,null,T,F), numDocs).scoreDocs; - assertEquals("smallest and up", numDocs, result.length); - - result = search.search(q,new TermRangeFilter("rand",null,maxRP,F,T), numDocs).scoreDocs; - assertEquals("biggest and down", numDocs, result.length); - - result = search.search(q,new TermRangeFilter("rand",minRP,null,F,F), numDocs).scoreDocs; - assertEquals("not smallest, but up", numDocs-1, result.length); - - result = search.search(q,new TermRangeFilter("rand",null,maxRP,F,F), numDocs).scoreDocs; - assertEquals("not biggest, but down", numDocs-1, result.length); - - // very small sets - - result = search.search(q,new TermRangeFilter("rand",minRP,minRP,F,F), numDocs).scoreDocs; - assertEquals("min,min,F,F", 0, result.length); - result = search.search(q,new TermRangeFilter("rand",maxRP,maxRP,F,F), numDocs).scoreDocs; - assertEquals("max,max,F,F", 0, result.length); - - result = search.search(q,new TermRangeFilter("rand",minRP,minRP,T,T), numDocs).scoreDocs; - assertEquals("min,min,T,T", 1, result.length); - result = search.search(q,new TermRangeFilter("rand",null,minRP,F,T), numDocs).scoreDocs; - assertEquals("nul,min,F,T", 1, result.length); - - result = search.search(q,new TermRangeFilter("rand",maxRP,maxRP,T,T), numDocs).scoreDocs; - assertEquals("max,max,T,T", 1, result.length); - result = search.search(q,new TermRangeFilter("rand",maxRP,null,T,F), numDocs).scoreDocs; - assertEquals("max,nul,T,T", 1, result.length); - + int numDocs = reader.numDocs(); + + assertEquals("num of docs", numDocs, 1 + maxId - minId); + + ScoreDoc[] result; + Query q = new TermQuery(new Term("body", "body")); + + // test id, bounded on both ends + + result = search.search(q, new TermRangeFilter("id", minIP, maxIP, T, T), + numDocs).scoreDocs; + assertEquals("find all", numDocs, result.length); + + result = search.search(q, new TermRangeFilter("id", minIP, maxIP, T, F), + numDocs).scoreDocs; + assertEquals("all but last", numDocs - 1, result.length); + + result = search.search(q, new TermRangeFilter("id", minIP, maxIP, F, T), + numDocs).scoreDocs; + assertEquals("all but first", numDocs - 1, result.length); + + result = search.search(q, new TermRangeFilter("id", minIP, maxIP, F, F), + numDocs).scoreDocs; + assertEquals("all but ends", numDocs - 2, result.length); + + result = search.search(q, new TermRangeFilter("id", medIP, maxIP, T, T), + numDocs).scoreDocs; + assertEquals("med and up", 1 + maxId - medId, result.length); + + result = search.search(q, new TermRangeFilter("id", minIP, medIP, T, T), + numDocs).scoreDocs; + assertEquals("up to med", 1 + medId - minId, result.length); + + // unbounded id + + result = search.search(q, new TermRangeFilter("id", minIP, null, T, F), + numDocs).scoreDocs; + assertEquals("min and up", numDocs, result.length); + + result = search.search(q, new TermRangeFilter("id", null, maxIP, F, T), + numDocs).scoreDocs; + assertEquals("max and down", numDocs, result.length); + + result = search.search(q, new TermRangeFilter("id", minIP, null, F, F), + numDocs).scoreDocs; + assertEquals("not min, but up", numDocs - 1, result.length); + + result = search.search(q, new TermRangeFilter("id", null, maxIP, F, F), + numDocs).scoreDocs; + assertEquals("not max, but down", numDocs - 1, result.length); + + result = search.search(q, new TermRangeFilter("id", medIP, maxIP, T, F), + numDocs).scoreDocs; + assertEquals("med and up, not max", maxId - medId, result.length); + + result = search.search(q, new TermRangeFilter("id", minIP, medIP, F, T), + numDocs).scoreDocs; + assertEquals("not min, up to med", medId - minId, result.length); + + // very small sets + + result = search.search(q, new TermRangeFilter("id", minIP, minIP, F, F), + numDocs).scoreDocs; + assertEquals("min,min,F,F", 0, result.length); + result = search.search(q, new TermRangeFilter("id", medIP, medIP, F, F), + numDocs).scoreDocs; + assertEquals("med,med,F,F", 0, result.length); + result = search.search(q, new TermRangeFilter("id", maxIP, maxIP, F, F), + numDocs).scoreDocs; + assertEquals("max,max,F,F", 0, result.length); + + result = search.search(q, new TermRangeFilter("id", minIP, minIP, T, T), + numDocs).scoreDocs; + assertEquals("min,min,T,T", 1, result.length); + result = search.search(q, new TermRangeFilter("id", null, minIP, F, T), + numDocs).scoreDocs; + assertEquals("nul,min,F,T", 1, result.length); + + result = search.search(q, new TermRangeFilter("id", maxIP, maxIP, T, T), + numDocs).scoreDocs; + assertEquals("max,max,T,T", 1, result.length); + result = search.search(q, new TermRangeFilter("id", maxIP, null, T, F), + numDocs).scoreDocs; + assertEquals("max,nul,T,T", 1, result.length); + + result = search.search(q, new TermRangeFilter("id", medIP, medIP, T, T), + numDocs).scoreDocs; + assertEquals("med,med,T,T", 1, result.length); + + } + + public void testRangeFilterIdCollating() throws IOException { + + IndexReader reader = signedIndexReader; + IndexSearcher search = new IndexSearcher(reader); + + Collator c = Collator.getInstance(Locale.ENGLISH); + + int medId = ((maxId - minId) / 2); + + String minIP = pad(minId); + String maxIP = pad(maxId); + String medIP = pad(medId); + + int numDocs = reader.numDocs(); + + assertEquals("num of docs", numDocs, 1 + maxId - minId); + + Query q = new TermQuery(new Term("body", "body")); + + // test id, bounded on both ends + int numHits = search.search(q, new TermRangeFilter("id", minIP, maxIP, T, + T, c), 1000).totalHits; + assertEquals("find all", numDocs, numHits); + + numHits = search.search(q, + new TermRangeFilter("id", minIP, maxIP, T, F, c), 1000).totalHits; + assertEquals("all but last", numDocs - 1, numHits); + + numHits = search.search(q, + new TermRangeFilter("id", minIP, maxIP, F, T, c), 1000).totalHits; + assertEquals("all but first", numDocs - 1, numHits); + + numHits = search.search(q, + new TermRangeFilter("id", minIP, maxIP, F, F, c), 1000).totalHits; + assertEquals("all but ends", numDocs - 2, numHits); + + numHits = search.search(q, + new TermRangeFilter("id", medIP, maxIP, T, T, c), 1000).totalHits; + assertEquals("med and up", 1 + maxId - medId, numHits); + + numHits = search.search(q, + new TermRangeFilter("id", minIP, medIP, T, T, c), 1000).totalHits; + assertEquals("up to med", 1 + medId - minId, numHits); + + // unbounded id + + numHits = search.search(q, new TermRangeFilter("id", minIP, null, T, F, c), + 1000).totalHits; + assertEquals("min and up", numDocs, numHits); + + numHits = search.search(q, new TermRangeFilter("id", null, maxIP, F, T, c), + 1000).totalHits; + assertEquals("max and down", numDocs, numHits); + + numHits = search.search(q, new TermRangeFilter("id", minIP, null, F, F, c), + 1000).totalHits; + assertEquals("not min, but up", numDocs - 1, numHits); + + numHits = search.search(q, new TermRangeFilter("id", null, maxIP, F, F, c), + 1000).totalHits; + assertEquals("not max, but down", numDocs - 1, numHits); + + numHits = search.search(q, + new TermRangeFilter("id", medIP, maxIP, T, F, c), 1000).totalHits; + assertEquals("med and up, not max", maxId - medId, numHits); + + numHits = search.search(q, + new TermRangeFilter("id", minIP, medIP, F, T, c), 1000).totalHits; + assertEquals("not min, up to med", medId - minId, numHits); + + // very small sets + + numHits = search.search(q, + new TermRangeFilter("id", minIP, minIP, F, F, c), 1000).totalHits; + assertEquals("min,min,F,F", 0, numHits); + numHits = search.search(q, + new TermRangeFilter("id", medIP, medIP, F, F, c), 1000).totalHits; + assertEquals("med,med,F,F", 0, numHits); + numHits = search.search(q, + new TermRangeFilter("id", maxIP, maxIP, F, F, c), 1000).totalHits; + assertEquals("max,max,F,F", 0, numHits); + + numHits = search.search(q, + new TermRangeFilter("id", minIP, minIP, T, T, c), 1000).totalHits; + assertEquals("min,min,T,T", 1, numHits); + numHits = search.search(q, new TermRangeFilter("id", null, minIP, F, T, c), + 1000).totalHits; + assertEquals("nul,min,F,T", 1, numHits); + + numHits = search.search(q, + new TermRangeFilter("id", maxIP, maxIP, T, T, c), 1000).totalHits; + assertEquals("max,max,T,T", 1, numHits); + numHits = search.search(q, new TermRangeFilter("id", maxIP, null, T, F, c), + 1000).totalHits; + assertEquals("max,nul,T,T", 1, numHits); + + numHits = search.search(q, + new TermRangeFilter("id", medIP, medIP, T, T, c), 1000).totalHits; + assertEquals("med,med,T,T", 1, numHits); + } + + public void testRangeFilterRand() throws IOException { + + IndexReader reader = signedIndexReader; + IndexSearcher search = new IndexSearcher(reader); + + String minRP = pad(signedIndexDir.minR); + String maxRP = pad(signedIndexDir.maxR); + + int numDocs = reader.numDocs(); + + assertEquals("num of docs", numDocs, 1 + maxId - minId); + + ScoreDoc[] result; + Query q = new TermQuery(new Term("body", "body")); + + // test extremes, bounded on both ends + + result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T, T), + numDocs).scoreDocs; + assertEquals("find all", numDocs, result.length); + + result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T, F), + numDocs).scoreDocs; + assertEquals("all but biggest", numDocs - 1, result.length); + + result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, T), + numDocs).scoreDocs; + assertEquals("all but smallest", numDocs - 1, result.length); + + result = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, F), + numDocs).scoreDocs; + assertEquals("all but extremes", numDocs - 2, result.length); + + // unbounded + + result = search.search(q, new TermRangeFilter("rand", minRP, null, T, F), + numDocs).scoreDocs; + assertEquals("smallest and up", numDocs, result.length); + + result = search.search(q, new TermRangeFilter("rand", null, maxRP, F, T), + numDocs).scoreDocs; + assertEquals("biggest and down", numDocs, result.length); + + result = search.search(q, new TermRangeFilter("rand", minRP, null, F, F), + numDocs).scoreDocs; + assertEquals("not smallest, but up", numDocs - 1, result.length); + + result = search.search(q, new TermRangeFilter("rand", null, maxRP, F, F), + numDocs).scoreDocs; + assertEquals("not biggest, but down", numDocs - 1, result.length); + + // very small sets + + result = search.search(q, new TermRangeFilter("rand", minRP, minRP, F, F), + numDocs).scoreDocs; + assertEquals("min,min,F,F", 0, result.length); + result = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, F, F), + numDocs).scoreDocs; + assertEquals("max,max,F,F", 0, result.length); + + result = search.search(q, new TermRangeFilter("rand", minRP, minRP, T, T), + numDocs).scoreDocs; + assertEquals("min,min,T,T", 1, result.length); + result = search.search(q, new TermRangeFilter("rand", null, minRP, F, T), + numDocs).scoreDocs; + assertEquals("nul,min,F,T", 1, result.length); + + result = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, T, T), + numDocs).scoreDocs; + assertEquals("max,max,T,T", 1, result.length); + result = search.search(q, new TermRangeFilter("rand", maxRP, null, T, F), + numDocs).scoreDocs; + assertEquals("max,nul,T,T", 1, result.length); + + } + + public void testRangeFilterRandCollating() throws IOException { + + // using the unsigned index because collation seems to ignore hyphens + IndexReader reader = unsignedIndexReader; + IndexSearcher search = new IndexSearcher(reader); + + Collator c = Collator.getInstance(Locale.ENGLISH); + + String minRP = pad(unsignedIndexDir.minR); + String maxRP = pad(unsignedIndexDir.maxR); + + int numDocs = reader.numDocs(); + + assertEquals("num of docs", numDocs, 1 + maxId - minId); + + Query q = new TermQuery(new Term("body", "body")); + + // test extremes, bounded on both ends + + int numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T, + T, c), 1000).totalHits; + assertEquals("find all", numDocs, numHits); + + numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, T, F, + c), 1000).totalHits; + assertEquals("all but biggest", numDocs - 1, numHits); + + numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, T, + c), 1000).totalHits; + assertEquals("all but smallest", numDocs - 1, numHits); + + numHits = search.search(q, new TermRangeFilter("rand", minRP, maxRP, F, F, + c), 1000).totalHits; + assertEquals("all but extremes", numDocs - 2, numHits); + + // unbounded + + numHits = search.search(q, + new TermRangeFilter("rand", minRP, null, T, F, c), 1000).totalHits; + assertEquals("smallest and up", numDocs, numHits); + + numHits = search.search(q, + new TermRangeFilter("rand", null, maxRP, F, T, c), 1000).totalHits; + assertEquals("biggest and down", numDocs, numHits); + + numHits = search.search(q, + new TermRangeFilter("rand", minRP, null, F, F, c), 1000).totalHits; + assertEquals("not smallest, but up", numDocs - 1, numHits); + + numHits = search.search(q, + new TermRangeFilter("rand", null, maxRP, F, F, c), 1000).totalHits; + assertEquals("not biggest, but down", numDocs - 1, numHits); + + // very small sets + + numHits = search.search(q, new TermRangeFilter("rand", minRP, minRP, F, F, + c), 1000).totalHits; + assertEquals("min,min,F,F", 0, numHits); + numHits = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, F, F, + c), 1000).totalHits; + assertEquals("max,max,F,F", 0, numHits); + + numHits = search.search(q, new TermRangeFilter("rand", minRP, minRP, T, T, + c), 1000).totalHits; + assertEquals("min,min,T,T", 1, numHits); + numHits = search.search(q, + new TermRangeFilter("rand", null, minRP, F, T, c), 1000).totalHits; + assertEquals("nul,min,F,T", 1, numHits); + + numHits = search.search(q, new TermRangeFilter("rand", maxRP, maxRP, T, T, + c), 1000).totalHits; + assertEquals("max,max,T,T", 1, numHits); + numHits = search.search(q, + new TermRangeFilter("rand", maxRP, null, T, F, c), 1000).totalHits; + assertEquals("max,nul,T,T", 1, numHits); + } + + public void testFarsi() throws Exception { + + /* build an index */ + RAMDirectory farsiIndex = new RAMDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(rand, farsiIndex, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + Document doc = new Document(); + doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES, + Field.Index.NOT_ANALYZED)); + doc + .add(new Field("body", "body", Field.Store.YES, + Field.Index.NOT_ANALYZED)); + writer.addDocument(doc); + + IndexReader reader = writer.getReader(); + writer.close(); + + IndexSearcher search = new IndexSearcher(reader); + Query q = new TermQuery(new Term("body", "body")); + + // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in + // RuleBasedCollator. However, the Arabic Locale seems to order the Farsi + // characters properly. + Collator collator = Collator.getInstance(new Locale("ar")); + + // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi + // orders the U+0698 character before the U+0633 character, so the single + // index Term below should NOT be returned by a TermRangeFilter with a Farsi + // Collator (or an Arabic one for the case when Farsi is not supported). + int numHits = search.search(q, new TermRangeFilter("content", "\u062F", + "\u0698", T, T, collator), 1000).totalHits; + assertEquals("The index Term should not be included.", 0, numHits); + + numHits = search.search(q, new TermRangeFilter("content", "\u0633", + "\u0638", T, T, collator), 1000).totalHits; + assertEquals("The index Term should be included.", 1, numHits); + search.close(); + reader.close(); + farsiIndex.close(); + } + + public void testDanish() throws Exception { + + /* build an index */ + RAMDirectory danishIndex = new RAMDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(rand, danishIndex, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + // Danish collation orders the words below in the given order + // (example taken from TestSort.testInternationalSort() ). + String[] words = {"H\u00D8T", "H\u00C5T", "MAND"}; + for (int docnum = 0; docnum < words.length; ++docnum) { + Document doc = new Document(); + doc.add(new Field("content", words[docnum], Field.Store.YES, + Field.Index.NOT_ANALYZED)); + doc.add(new Field("body", "body", Field.Store.YES, + Field.Index.NOT_ANALYZED)); + writer.addDocument(doc); } - - public void testRangeFilterRandCollating() throws IOException { - - // using the unsigned index because collation seems to ignore hyphens - IndexReader reader = IndexReader.open(unsignedIndex.index, true); - IndexSearcher search = new IndexSearcher(reader); - - Collator c = Collator.getInstance(Locale.ENGLISH); - - String minRP = pad(unsignedIndex.minR); - String maxRP = pad(unsignedIndex.maxR); - - int numDocs = reader.numDocs(); - - assertEquals("num of docs", numDocs, 1+ maxId - minId); - - Query q = new TermQuery(new Term("body","body")); - - // test extremes, bounded on both ends - - int numHits = search.search(q,new TermRangeFilter("rand",minRP,maxRP,T,T,c), 1000).totalHits; - assertEquals("find all", numDocs, numHits); - - numHits = search.search(q,new TermRangeFilter("rand",minRP,maxRP,T,F,c), 1000).totalHits; - assertEquals("all but biggest", numDocs-1, numHits); - - numHits = search.search(q,new TermRangeFilter("rand",minRP,maxRP,F,T,c), 1000).totalHits; - assertEquals("all but smallest", numDocs-1, numHits); - - numHits = search.search(q,new TermRangeFilter("rand",minRP,maxRP,F,F,c), 1000).totalHits; - assertEquals("all but extremes", numDocs-2, numHits); - - // unbounded - - numHits = search.search(q,new TermRangeFilter("rand",minRP,null,T,F,c), 1000).totalHits; - assertEquals("smallest and up", numDocs, numHits); - - numHits = search.search(q,new TermRangeFilter("rand",null,maxRP,F,T,c), 1000).totalHits; - assertEquals("biggest and down", numDocs, numHits); - - numHits = search.search(q,new TermRangeFilter("rand",minRP,null,F,F,c), 1000).totalHits; - assertEquals("not smallest, but up", numDocs-1, numHits); - - numHits = search.search(q,new TermRangeFilter("rand",null,maxRP,F,F,c), 1000).totalHits; - assertEquals("not biggest, but down", numDocs-1, numHits); - - // very small sets - - numHits = search.search(q,new TermRangeFilter("rand",minRP,minRP,F,F,c), 1000).totalHits; - assertEquals("min,min,F,F", 0, numHits); - numHits = search.search(q,new TermRangeFilter("rand",maxRP,maxRP,F,F,c), 1000).totalHits; - assertEquals("max,max,F,F", 0, numHits); - - numHits = search.search(q,new TermRangeFilter("rand",minRP,minRP,T,T,c), 1000).totalHits; - assertEquals("min,min,T,T", 1, numHits); - numHits = search.search(q,new TermRangeFilter("rand",null,minRP,F,T,c), 1000).totalHits; - assertEquals("nul,min,F,T", 1, numHits); - - numHits = search.search(q,new TermRangeFilter("rand",maxRP,maxRP,T,T,c), 1000).totalHits; - assertEquals("max,max,T,T", 1, numHits); - numHits = search.search(q,new TermRangeFilter("rand",maxRP,null,T,F,c), 1000).totalHits; - assertEquals("max,nul,T,T", 1, numHits); - } + IndexReader reader = writer.getReader(); + writer.close(); - public void testFarsi() throws Exception { - - /* build an index */ - RAMDirectory farsiIndex = new RAMDirectory(); - IndexWriter writer = new IndexWriter(farsiIndex, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); - Document doc = new Document(); - doc.add(new Field("content","\u0633\u0627\u0628", - Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(new Field("body", "body", - Field.Store.YES, Field.Index.NOT_ANALYZED)); - writer.addDocument(doc); - - writer.optimize(); - writer.close(); - - IndexReader reader = IndexReader.open(farsiIndex, true); - IndexSearcher search = new IndexSearcher(reader); - Query q = new TermQuery(new Term("body","body")); - - // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in - // RuleBasedCollator. However, the Arabic Locale seems to order the Farsi - // characters properly. - Collator collator = Collator.getInstance(new Locale("ar")); - - // Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi - // orders the U+0698 character before the U+0633 character, so the single - // index Term below should NOT be returned by a TermRangeFilter with a Farsi - // Collator (or an Arabic one for the case when Farsi is not supported). - int numHits = search.search - (q, new TermRangeFilter("content", "\u062F", "\u0698", T, T, collator), 1000).totalHits; - assertEquals("The index Term should not be included.", 0, numHits); - - numHits = search.search - (q, new TermRangeFilter("content", "\u0633", "\u0638", T, T, collator), 1000).totalHits; - assertEquals("The index Term should be included.", 1, numHits); - search.close(); - } - - public void testDanish() throws Exception { - - /* build an index */ - RAMDirectory danishIndex = new RAMDirectory(); - IndexWriter writer = new IndexWriter(danishIndex, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); - // Danish collation orders the words below in the given order - // (example taken from TestSort.testInternationalSort() ). - String[] words = { "H\u00D8T", "H\u00C5T", "MAND" }; - for (int docnum = 0 ; docnum < words.length ; ++docnum) { - Document doc = new Document(); - doc.add(new Field("content", words[docnum], - Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(new Field("body", "body", - Field.Store.YES, Field.Index.NOT_ANALYZED)); - writer.addDocument(doc); - } - writer.optimize(); - writer.close(); - - IndexReader reader = IndexReader.open(danishIndex, true); - IndexSearcher search = new IndexSearcher(reader); - Query q = new TermQuery(new Term("body","body")); - - Collator collator = Collator.getInstance(new Locale("da", "dk")); - - // Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ], - // but Danish collation does. - int numHits = search.search - (q, new TermRangeFilter("content", "H\u00D8T", "MAND", F, F, collator), 1000).totalHits; - assertEquals("The index Term should be included.", 1, numHits); - - numHits = search.search - (q, new TermRangeFilter("content", "H\u00C5T", "MAND", F, F, collator), 1000).totalHits; - assertEquals - ("The index Term should not be included.", 0, numHits); - search.close(); - } + IndexSearcher search = new IndexSearcher(reader); + Query q = new TermQuery(new Term("body", "body")); + + Collator collator = Collator.getInstance(new Locale("da", "dk")); + + // Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ], + // but Danish collation does. + int numHits = search.search(q, new TermRangeFilter("content", "H\u00D8T", + "MAND", F, F, collator), 1000).totalHits; + assertEquals("The index Term should be included.", 1, numHits); + + numHits = search.search(q, new TermRangeFilter("content", "H\u00C5T", + "MAND", F, F, collator), 1000).totalHits; + assertEquals("The index Term should not be included.", 0, numHits); + search.close(); + reader.close(); + danishIndex.close(); + } } Index: lucene/src/test/org/apache/lucene/index/RandomIndexWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/RandomIndexWriter.java (revision 0) +++ lucene/src/test/org/apache/lucene/index/RandomIndexWriter.java (revision 0) @@ -0,0 +1,129 @@ +package org.apache.lucene.index; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.Random; +import java.io.Closeable; +import java.io.IOException; + +import org.apache.lucene.util._TestUtil; +import org.apache.lucene.store.Directory; +import org.apache.lucene.document.Document; +import org.apache.lucene.index.codecs.Codec; +import org.apache.lucene.index.codecs.CodecProvider; +import org.apache.lucene.index.codecs.intblock.IntBlockCodec; +import org.apache.lucene.index.codecs.preflex.PreFlexCodec; +import org.apache.lucene.index.codecs.pulsing.PulsingCodec; +import org.apache.lucene.index.codecs.sep.SepCodec; +import org.apache.lucene.index.codecs.standard.StandardCodec; + +/** Silly class that randomizes the indexing experience. EG + * it may swap in a different merge policy/scheduler; may + * commit periodically; may or may not optimize in the end, + * may flush by doc count instead of RAM, etc. + */ + +public class RandomIndexWriter implements Closeable { + + public IndexWriter w; + private final Random r; + int docCount; + int flushAt; + + public RandomIndexWriter(Random r, Directory dir, IndexWriterConfig c) throws IOException { + this.r = r; + if (r.nextBoolean()) { + c.setMergePolicy(new LogDocMergePolicy()); + } + if (r.nextBoolean()) { + c.setMergeScheduler(new SerialMergeScheduler()); + } + if (r.nextBoolean()) { + c.setMaxBufferedDocs(_TestUtil.nextInt(r, 2, 1000)); + } + if (r.nextBoolean()) { + c.setTermIndexInterval(_TestUtil.nextInt(r, 1, 1000)); + } + + if (c.getMergePolicy() instanceof LogMergePolicy) { + LogMergePolicy logmp = (LogMergePolicy) c.getMergePolicy(); + logmp.setUseCompoundDocStore(r.nextBoolean()); + logmp.setUseCompoundFile(r.nextBoolean()); + logmp.setCalibrateSizeByDeletes(r.nextBoolean()); + } + + c.setReaderPooling(r.nextBoolean()); + c.setCodecProvider(new RandomCodecProvider(r)); + w = new IndexWriter(dir, c); + flushAt = _TestUtil.nextInt(r, 10, 1000); + } + + public void addDocument(Document doc) throws IOException { + w.addDocument(doc); + if (docCount++ == flushAt) { + w.commit(); + flushAt += _TestUtil.nextInt(r, 10, 1000); + } + } + + public void addIndexes(Directory... dirs) throws CorruptIndexException, IOException { + w.addIndexes(dirs); + } + + public void deleteDocuments(Term term) throws CorruptIndexException, IOException { + w.deleteDocuments(term); + } + + public int maxDoc() { + return w.maxDoc(); + } + + public IndexReader getReader() throws IOException { + if (r.nextBoolean()) { + return w.getReader(); + } else { + w.commit(); + return IndexReader.open(w.getDirectory(), new KeepOnlyLastCommitDeletionPolicy(), r.nextBoolean(), _TestUtil.nextInt(r, 1, 10)); + } + } + + public void close() throws IOException { + if (r.nextInt(4) == 2) { + w.optimize(); + } + w.close(); + } + + class RandomCodecProvider extends CodecProvider { + final String codec; + + RandomCodecProvider(Random random) { + register(new StandardCodec()); + register(new IntBlockCodec()); + register(new PreFlexCodec()); + register(new PulsingCodec()); + register(new SepCodec()); + codec = CodecProvider.CORE_CODECS[random.nextInt(CodecProvider.CORE_CODECS.length)]; + } + + @Override + public Codec getWriter(SegmentWriteState state) { + return lookup(codec); + } + } +} Property changes on: lucene\src\test\org\apache\lucene\index\RandomIndexWriter.java ___________________________________________________________________ Added: svn:eol-style + native Index: lucene/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java =================================================================== --- lucene/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java (revision 963689) +++ lucene/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java (working copy) @@ -33,6 +33,7 @@ import java.io.PrintStream; import java.io.IOException; import java.util.Arrays; +import java.util.Hashtable; import java.util.Iterator; import java.util.Random; import java.util.ArrayList; @@ -330,6 +331,30 @@ return new Random(seed); } + private static Hashtable,Long> staticSeeds = new Hashtable,Long>(); + + /** + * Returns a {@link Random} instance for generating random numbers from a beforeclass + * annotated method. + * The random seed is logged during test execution and printed to System.out on any failure + * for reproducing the test using {@link #newStaticRandom(Class, long)} with the recorded seed + * . + */ + public static Random newStaticRandom(Class clazz) { + return newStaticRandom(clazz, seedRnd.nextLong()); + } + + /** + * Returns a {@link Random} instance for generating random numbers from a beforeclass + * annotated method. + * If an error occurs in the test that is not reproducible, you can use this method to + * initialize the number generator with the seed that was printed out during the failing test. + */ + public static Random newStaticRandom(Class clazz, long seed) { + staticSeeds.put(clazz, Long.valueOf(seed)); + return new Random(seed); + } + public String getName() { return this.name; } @@ -348,6 +373,11 @@ // We get here from InterceptTestCaseEvents on the 'failed' event.... public void reportAdditionalFailureInfo() { + Long staticSeed = staticSeeds.get(getClass()); + if (staticSeed != null) { + System.out.println("NOTE: random static seed of testclass '" + getName() + "' was: " + staticSeed); + } + if (seed != null) { System.out.println("NOTE: random seed of testcase '" + getName() + "' was: " + seed); } Index: lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java =================================================================== --- lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java (revision 963689) +++ lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java (working copy) @@ -4,8 +4,8 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.store.MockRAMDirectory; /** @@ -58,13 +58,12 @@ /** add the doc to a ram index */ MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); writer.addDocument(doc); - writer.close(); /** open a reader and fetch the document */ - IndexReader reader = IndexReader.open(dir, false); + IndexReader reader = writer.getReader(); Document docFromReader = reader.document(0); assertTrue(docFromReader != null); @@ -76,6 +75,10 @@ String stringFldStoredTest = docFromReader.get("stringStored"); assertTrue(stringFldStoredTest.equals(binaryValStored)); + writer.close(); + reader.close(); + + reader = IndexReader.open(dir, false); /** delete the document from index */ reader.deleteDocument(0); assertEquals(0, reader.numDocs()); @@ -95,13 +98,12 @@ /** add the doc to a ram index */ MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); writer.addDocument(doc); - writer.close(); /** open a reader and fetch the document */ - IndexReader reader = IndexReader.open(dir, false); + IndexReader reader = writer.getReader(); Document docFromReader = reader.document(0); assertTrue(docFromReader != null); @@ -110,6 +112,7 @@ assertTrue(binaryFldCompressedTest.equals(binaryValCompressed)); assertTrue(CompressionTools.decompressString(docFromReader.getBinaryValue("stringCompressed")).equals(binaryValCompressed)); + writer.close(); reader.close(); dir.close(); } Index: lucene/src/test/org/apache/lucene/document/TestDocument.java =================================================================== --- lucene/src/test/org/apache/lucene/document/TestDocument.java (revision 963689) +++ lucene/src/test/org/apache/lucene/document/TestDocument.java (working copy) @@ -1,8 +1,9 @@ package org.apache.lucene.document; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -32,17 +33,15 @@ /** * Tests {@link Document} class. */ -public class TestDocument extends LuceneTestCase -{ - +public class TestDocument extends LuceneTestCase { + String binaryVal = "this text will be stored as a byte array in the index"; String binaryVal2 = "this text will be also stored as a byte array in the index"; - public void testBinaryField() - throws Exception - { + public void testBinaryField() throws Exception { Document doc = new Document(); - Fieldable stringFld = new Field("string", binaryVal, Field.Store.YES, Field.Index.NO); + Fieldable stringFld = new Field("string", binaryVal, Field.Store.YES, + Field.Index.NO); Fieldable binaryFld = new Field("binary", binaryVal.getBytes()); Fieldable binaryFld2 = new Field("binary", binaryVal2.getBytes()); @@ -67,7 +66,7 @@ assertEquals(3, doc.fields.size()); byte[][] binaryTests = doc.getBinaryValues("binary"); - + assertEquals(2, binaryTests.length); binaryTest = new String(binaryTests[0]); @@ -88,17 +87,17 @@ /** * Tests {@link Document#removeField(String)} method for a brand new Document * that has not been indexed yet. - * + * * @throws Exception on error */ - public void testRemoveForNewDocument() throws Exception - { + public void testRemoveForNewDocument() throws Exception { Document doc = makeDocumentWithFields(); assertEquals(8, doc.fields.size()); doc.removeFields("keyword"); assertEquals(6, doc.fields.size()); - doc.removeFields("doesnotexists"); // removing non-existing fields is siltenlty ignored - doc.removeFields("keyword"); // removing a field more than once + doc.removeFields("doesnotexists"); // removing non-existing fields is + // siltenlty ignored + doc.removeFields("keyword"); // removing a field more than once assertEquals(6, doc.fields.size()); doc.removeField("text"); assertEquals(5, doc.fields.size()); @@ -106,164 +105,171 @@ assertEquals(4, doc.fields.size()); doc.removeField("text"); assertEquals(4, doc.fields.size()); - doc.removeField("doesnotexists"); // removing non-existing fields is siltenlty ignored + doc.removeField("doesnotexists"); // removing non-existing fields is + // siltenlty ignored assertEquals(4, doc.fields.size()); doc.removeFields("unindexed"); assertEquals(2, doc.fields.size()); doc.removeFields("unstored"); assertEquals(0, doc.fields.size()); - doc.removeFields("doesnotexists"); // removing non-existing fields is siltenlty ignored + doc.removeFields("doesnotexists"); // removing non-existing fields is + // siltenlty ignored assertEquals(0, doc.fields.size()); } - - public void testConstructorExceptions() - { - new Field("name", "value", Field.Store.YES, Field.Index.NO); // okay - new Field("name", "value", Field.Store.NO, Field.Index.NOT_ANALYZED); // okay + + public void testConstructorExceptions() { + new Field("name", "value", Field.Store.YES, Field.Index.NO); // okay + new Field("name", "value", Field.Store.NO, Field.Index.NOT_ANALYZED); // okay try { new Field("name", "value", Field.Store.NO, Field.Index.NO); fail(); - } catch(IllegalArgumentException e) { + } catch (IllegalArgumentException e) { // expected exception } - new Field("name", "value", Field.Store.YES, Field.Index.NO, Field.TermVector.NO); // okay + new Field("name", "value", Field.Store.YES, Field.Index.NO, + Field.TermVector.NO); // okay try { - new Field("name", "value", Field.Store.YES, Field.Index.NO, Field.TermVector.YES); + new Field("name", "value", Field.Store.YES, Field.Index.NO, + Field.TermVector.YES); fail(); - } catch(IllegalArgumentException e) { + } catch (IllegalArgumentException e) { // expected exception } } - /** - * Tests {@link Document#getValues(String)} method for a brand new Document - * that has not been indexed yet. - * - * @throws Exception on error - */ - public void testGetValuesForNewDocument() throws Exception - { - doAssert(makeDocumentWithFields(), false); + /** + * Tests {@link Document#getValues(String)} method for a brand new Document + * that has not been indexed yet. + * + * @throws Exception on error + */ + public void testGetValuesForNewDocument() throws Exception { + doAssert(makeDocumentWithFields(), false); + } + + /** + * Tests {@link Document#getValues(String)} method for a Document retrieved + * from an index. + * + * @throws Exception on error + */ + public void testGetValuesForIndexedDocument() throws Exception { + RAMDirectory dir = new RAMDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + writer.addDocument(makeDocumentWithFields()); + IndexReader reader = writer.getReader(); + + Searcher searcher = new IndexSearcher(reader); + + // search for something that does exists + Query query = new TermQuery(new Term("keyword", "test1")); + + // ensure that queries return expected results without DateFilter first + ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; + assertEquals(1, hits.length); + + doAssert(searcher.doc(hits[0].doc), true); + writer.close(); + searcher.close(); + reader.close(); + dir.close(); + } + + private Document makeDocumentWithFields() { + Document doc = new Document(); + doc.add(new Field("keyword", "test1", Field.Store.YES, + Field.Index.NOT_ANALYZED)); + doc.add(new Field("keyword", "test2", Field.Store.YES, + Field.Index.NOT_ANALYZED)); + doc.add(new Field("text", "test1", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(new Field("text", "test2", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(new Field("unindexed", "test1", Field.Store.YES, Field.Index.NO)); + doc.add(new Field("unindexed", "test2", Field.Store.YES, Field.Index.NO)); + doc + .add(new Field("unstored", "test1", Field.Store.NO, + Field.Index.ANALYZED)); + doc + .add(new Field("unstored", "test2", Field.Store.NO, + Field.Index.ANALYZED)); + return doc; + } + + private void doAssert(Document doc, boolean fromIndex) { + String[] keywordFieldValues = doc.getValues("keyword"); + String[] textFieldValues = doc.getValues("text"); + String[] unindexedFieldValues = doc.getValues("unindexed"); + String[] unstoredFieldValues = doc.getValues("unstored"); + + assertTrue(keywordFieldValues.length == 2); + assertTrue(textFieldValues.length == 2); + assertTrue(unindexedFieldValues.length == 2); + // this test cannot work for documents retrieved from the index + // since unstored fields will obviously not be returned + if (!fromIndex) { + assertTrue(unstoredFieldValues.length == 2); } - - /** - * Tests {@link Document#getValues(String)} method for a Document retrieved from - * an index. - * - * @throws Exception on error - */ - public void testGetValuesForIndexedDocument() throws Exception { - RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); - writer.addDocument(makeDocumentWithFields()); - writer.close(); - - Searcher searcher = new IndexSearcher(dir, true); - - // search for something that does exists - Query query = new TermQuery(new Term("keyword", "test1")); - - // ensure that queries return expected results without DateFilter first - ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; - assertEquals(1, hits.length); - - doAssert(searcher.doc(hits[0].doc), true); - searcher.close(); + + assertTrue(keywordFieldValues[0].equals("test1")); + assertTrue(keywordFieldValues[1].equals("test2")); + assertTrue(textFieldValues[0].equals("test1")); + assertTrue(textFieldValues[1].equals("test2")); + assertTrue(unindexedFieldValues[0].equals("test1")); + assertTrue(unindexedFieldValues[1].equals("test2")); + // this test cannot work for documents retrieved from the index + // since unstored fields will obviously not be returned + if (!fromIndex) { + assertTrue(unstoredFieldValues[0].equals("test1")); + assertTrue(unstoredFieldValues[1].equals("test2")); } - - private Document makeDocumentWithFields() - { - Document doc = new Document(); - doc.add(new Field( "keyword", "test1", Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(new Field( "keyword", "test2", Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(new Field( "text", "test1", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field( "text", "test2", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("unindexed", "test1", Field.Store.YES, Field.Index.NO)); - doc.add(new Field("unindexed", "test2", Field.Store.YES, Field.Index.NO)); - doc.add(new Field( "unstored", "test1", Field.Store.NO, Field.Index.ANALYZED)); - doc.add(new Field( "unstored", "test2", Field.Store.NO, Field.Index.ANALYZED)); - return doc; + } + + public void testFieldSetValue() throws Exception { + + Field field = new Field("id", "id1", Field.Store.YES, + Field.Index.NOT_ANALYZED); + Document doc = new Document(); + doc.add(field); + doc.add(new Field("keyword", "test", Field.Store.YES, + Field.Index.NOT_ANALYZED)); + + RAMDirectory dir = new RAMDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + writer.addDocument(doc); + field.setValue("id2"); + writer.addDocument(doc); + field.setValue("id3"); + writer.addDocument(doc); + + IndexReader reader = writer.getReader(); + Searcher searcher = new IndexSearcher(reader); + + Query query = new TermQuery(new Term("keyword", "test")); + + // ensure that queries return expected results without DateFilter first + ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; + assertEquals(3, hits.length); + int result = 0; + for (int i = 0; i < 3; i++) { + Document doc2 = searcher.doc(hits[i].doc); + Field f = doc2.getField("id"); + if (f.stringValue().equals("id1")) result |= 1; + else if (f.stringValue().equals("id2")) result |= 2; + else if (f.stringValue().equals("id3")) result |= 4; + else fail("unexpected id field"); } - - private void doAssert(Document doc, boolean fromIndex) - { - String[] keywordFieldValues = doc.getValues("keyword"); - String[] textFieldValues = doc.getValues("text"); - String[] unindexedFieldValues = doc.getValues("unindexed"); - String[] unstoredFieldValues = doc.getValues("unstored"); - - assertTrue(keywordFieldValues.length == 2); - assertTrue(textFieldValues.length == 2); - assertTrue(unindexedFieldValues.length == 2); - // this test cannot work for documents retrieved from the index - // since unstored fields will obviously not be returned - if (! fromIndex) - { - assertTrue(unstoredFieldValues.length == 2); - } - - assertTrue(keywordFieldValues[0].equals("test1")); - assertTrue(keywordFieldValues[1].equals("test2")); - assertTrue(textFieldValues[0].equals("test1")); - assertTrue(textFieldValues[1].equals("test2")); - assertTrue(unindexedFieldValues[0].equals("test1")); - assertTrue(unindexedFieldValues[1].equals("test2")); - // this test cannot work for documents retrieved from the index - // since unstored fields will obviously not be returned - if (! fromIndex) - { - assertTrue(unstoredFieldValues[0].equals("test1")); - assertTrue(unstoredFieldValues[1].equals("test2")); - } - } - - public void testFieldSetValue() throws Exception { - - Field field = new Field("id", "id1", Field.Store.YES, Field.Index.NOT_ANALYZED); - Document doc = new Document(); - doc.add(field); - doc.add(new Field("keyword", "test", Field.Store.YES, Field.Index.NOT_ANALYZED)); - - RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); - writer.addDocument(doc); - field.setValue("id2"); - writer.addDocument(doc); - field.setValue("id3"); - writer.addDocument(doc); - writer.close(); - - Searcher searcher = new IndexSearcher(dir, true); - - Query query = new TermQuery(new Term("keyword", "test")); - - // ensure that queries return expected results without DateFilter first - ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; - assertEquals(3, hits.length); - int result = 0; - for(int i=0;i<3;i++) { - Document doc2 = searcher.doc(hits[i].doc); - Field f = doc2.getField("id"); - if (f.stringValue().equals("id1")) - result |= 1; - else if (f.stringValue().equals("id2")) - result |= 2; - else if (f.stringValue().equals("id3")) - result |= 4; - else - fail("unexpected id field"); - } - searcher.close(); - dir.close(); - assertEquals("did not see all IDs", 7, result); - } - + writer.close(); + searcher.close(); + reader.close(); + dir.close(); + assertEquals("did not see all IDs", 7, result); + } + public void testFieldSetValueChangeBinary() { Field field1 = new Field("field1", new byte[0]); - Field field2 = new Field("field2", "", - Field.Store.YES, Field.Index.ANALYZED); + Field field2 = new Field("field2", "", Field.Store.YES, + Field.Index.ANALYZED); try { field1.setValue("abc"); fail("did not hit expected exception"); Index: lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsWriterImpl.java =================================================================== --- lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsWriterImpl.java (revision 963689) +++ lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsWriterImpl.java (working copy) @@ -229,7 +229,7 @@ @Override public void finishDoc() { - assert currentDoc.numPositions == currentDoc.termDocFreq; + assert omitTF || currentDoc.numPositions == currentDoc.termDocFreq; } boolean pendingIsIndexTerm; Index: lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java (revision 963689) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java (working copy) @@ -28,8 +28,8 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; @@ -38,95 +38,94 @@ import org.apache.lucene.util.LuceneTestCase; public class TestMoreLikeThis extends LuceneTestCase { - private RAMDirectory directory; - private IndexReader reader; - private IndexSearcher searcher; + private RAMDirectory directory; + private IndexReader reader; + private IndexSearcher searcher; + + @Override + protected void setUp() throws Exception { + super.setUp(); + directory = new RAMDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + + // Add series of docs with specific information for MoreLikeThis + addDoc(writer, "lucene"); + addDoc(writer, "lucene release"); - @Override - protected void setUp() throws Exception { - super.setUp(); - directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); - - // Add series of docs with specific information for MoreLikeThis - addDoc(writer, "lucene"); - addDoc(writer, "lucene release"); - - writer.close(); - reader = IndexReader.open(directory, true); - searcher = new IndexSearcher(reader); - + reader = writer.getReader(); + writer.close(); + searcher = new IndexSearcher(reader); + } + + @Override + protected void tearDown() throws Exception { + reader.close(); + searcher.close(); + directory.close(); + super.tearDown(); + } + + private void addDoc(RandomIndexWriter writer, String text) throws IOException { + Document doc = new Document(); + doc.add(new Field("text", text, Field.Store.YES, Field.Index.ANALYZED)); + writer.addDocument(doc); + } + + public void testBoostFactor() throws Throwable { + Map originalValues = getOriginalValues(); + + MoreLikeThis mlt = new MoreLikeThis(reader); + mlt.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false)); + mlt.setMinDocFreq(1); + mlt.setMinTermFreq(1); + mlt.setMinWordLen(1); + mlt.setFieldNames(new String[] {"text"}); + mlt.setBoost(true); + + // this mean that every term boost factor will be multiplied by this + // number + float boostFactor = 5; + mlt.setBoostFactor(boostFactor); + + BooleanQuery query = (BooleanQuery) mlt.like(new StringReader( + "lucene release")); + List clauses = query.clauses(); + + assertEquals("Expected " + originalValues.size() + " clauses.", + originalValues.size(), clauses.size()); + + for (int i = 0; i < clauses.size(); i++) { + BooleanClause clause = clauses.get(i); + TermQuery tq = (TermQuery) clause.getQuery(); + Float termBoost = originalValues.get(tq.getTerm().text()); + assertNotNull("Expected term " + tq.getTerm().text(), termBoost); + + float totalBoost = termBoost.floatValue() * boostFactor; + assertEquals("Expected boost of " + totalBoost + " for term '" + + tq.getTerm().text() + "' got " + tq.getBoost(), totalBoost, tq + .getBoost(), 0.0001); } - - @Override - protected void tearDown() throws Exception { - reader.close(); - searcher.close(); - directory.close(); - super.tearDown(); + } + + private Map getOriginalValues() throws IOException { + Map originalValues = new HashMap(); + MoreLikeThis mlt = new MoreLikeThis(reader); + mlt.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false)); + mlt.setMinDocFreq(1); + mlt.setMinTermFreq(1); + mlt.setMinWordLen(1); + mlt.setFieldNames(new String[] {"text"}); + mlt.setBoost(true); + BooleanQuery query = (BooleanQuery) mlt.like(new StringReader( + "lucene release")); + List clauses = query.clauses(); + + for (int i = 0; i < clauses.size(); i++) { + BooleanClause clause = clauses.get(i); + TermQuery tq = (TermQuery) clause.getQuery(); + originalValues.put(tq.getTerm().text(), Float.valueOf(tq.getBoost())); } - - private void addDoc(IndexWriter writer, String text) throws IOException { - Document doc = new Document(); - doc.add(new Field("text", text, Field.Store.YES, Field.Index.ANALYZED)); - writer.addDocument(doc); - } - - public void testBoostFactor() throws Throwable { - Map originalValues = getOriginalValues(); - - MoreLikeThis mlt = new MoreLikeThis( - reader); - mlt.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false)); - mlt.setMinDocFreq(1); - mlt.setMinTermFreq(1); - mlt.setMinWordLen(1); - mlt.setFieldNames(new String[] { "text" }); - mlt.setBoost(true); - - // this mean that every term boost factor will be multiplied by this - // number - float boostFactor = 5; - mlt.setBoostFactor(boostFactor); - - BooleanQuery query = (BooleanQuery) mlt.like(new StringReader( - "lucene release")); - List clauses = query.clauses(); - - assertEquals("Expected " + originalValues.size() + " clauses.", - originalValues.size(), clauses.size()); - - for (int i = 0; i < clauses.size(); i++) { - BooleanClause clause = clauses.get(i); - TermQuery tq = (TermQuery) clause.getQuery(); - Float termBoost = originalValues.get(tq.getTerm().text()); - assertNotNull("Expected term " + tq.getTerm().text(), termBoost); - - float totalBoost = termBoost.floatValue() * boostFactor; - assertEquals("Expected boost of " + totalBoost + " for term '" - + tq.getTerm().text() + "' got " + tq.getBoost(), - totalBoost, tq.getBoost(), 0.0001); - } - } - - private Map getOriginalValues() throws IOException { - Map originalValues = new HashMap(); - MoreLikeThis mlt = new MoreLikeThis(reader); - mlt.setAnalyzer(new MockAnalyzer(MockTokenizer.WHITESPACE, false)); - mlt.setMinDocFreq(1); - mlt.setMinTermFreq(1); - mlt.setMinWordLen(1); - mlt.setFieldNames(new String[] { "text" }); - mlt.setBoost(true); - BooleanQuery query = (BooleanQuery) mlt.like(new StringReader( - "lucene release")); - List clauses = query.clauses(); - - for (int i = 0; i < clauses.size(); i++) { - BooleanClause clause = clauses.get(i); - TermQuery tq = (TermQuery) clause.getQuery(); - originalValues.put(tq.getTerm().text(), Float.valueOf(tq.getBoost())); - } - return originalValues; - } + return originalValues; + } } Index: lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (revision 963689) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (working copy) @@ -26,6 +26,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.MultiFields; @@ -44,8 +45,8 @@ protected void setUp() throws Exception { super.setUp(); directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); //Add series of docs with filterable fields : url, text and dates flags addDoc(writer, "http://lucene.apache.org", "lucene 1.4.3 available", "20040101"); @@ -56,9 +57,8 @@ addDoc(writer, "http://www.bar.com", "Dog uses Lucene", "20050101"); addDoc(writer, "http://lucene.apache.org", "Lucene 2.0 out", "20050101"); addDoc(writer, "http://lucene.apache.org", "Oops. Lucene 2.1 out", "20050102"); - - writer.close(); - reader=IndexReader.open(directory, true); + reader = writer.getReader(); + writer.close(); searcher =new IndexSearcher(reader); } @@ -71,7 +71,7 @@ super.tearDown(); } - private void addDoc(IndexWriter writer, String url, String text, String date) throws IOException + private void addDoc(RandomIndexWriter writer, String url, String text, String date) throws IOException { Document doc=new Document(); doc.add(new Field(KEY_FIELD,url,Field.Store.YES,Field.Index.NOT_ANALYZED)); Index: lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java (revision 963689) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java (working copy) @@ -23,8 +23,8 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; @@ -53,16 +53,16 @@ public void testMissingTerms() throws Exception { String fieldName="field1"; RAMDirectory rd=new RAMDirectory(); - IndexWriter w = new IndexWriter(rd, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter w = new RandomIndexWriter(newRandom(), rd, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); for (int i = 0; i < 100; i++) { Document doc=new Document(); int term=i*10; //terms are units of 10; doc.add(new Field(fieldName,""+term,Field.Store.YES,Field.Index.NOT_ANALYZED)); w.addDocument(doc); } + IndexReader reader = w.getReader(); w.close(); - IndexReader reader = IndexReader.open(rd, true); TermsFilter tf=new TermsFilter(); tf.addTerm(new Term(fieldName,"19")); @@ -80,6 +80,8 @@ tf.addTerm(new Term(fieldName,"00")); bits = (OpenBitSet)tf.getDocIdSet(reader); assertEquals("Must match 2", 2, bits.cardinality()); - + + reader.close(); + rd.close(); } } Index: lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java (revision 963689) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java (working copy) @@ -19,12 +19,14 @@ import java.util.Calendar; import java.util.GregorianCalendar; +import java.util.Random; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -47,18 +49,22 @@ private RAMDirectory directory; private IndexSearcher searcher; + private IndexReader reader; private Query query; // private DateFilter dateFilter; DateFilter was deprecated and removed private TermRangeFilter dateFilter; private QueryWrapperFilter bobFilter; private QueryWrapperFilter sueFilter; + private Random random; + @Override protected void setUp() throws Exception { super.setUp(); + random = newRandom(); directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(random, directory, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); Calendar cal = new GregorianCalendar(); cal.clear(); @@ -73,10 +79,10 @@ cal.add(Calendar.DATE, 1); } - + reader = writer.getReader(); writer.close(); - searcher = new IndexSearcher(directory, true); + searcher = new IndexSearcher(reader); // query for everything to make life easier BooleanQuery bq = new BooleanQuery(); @@ -96,6 +102,14 @@ new TermQuery(new Term("owner", "sue"))); } + @Override + public void tearDown() throws Exception { + searcher.close(); + reader.close(); + directory.close(); + super.tearDown(); + } + private ChainedFilter getChainedFilter(Filter[] chain, int[] logic) { if (logic == null) { return new ChainedFilter(chain); @@ -186,10 +200,12 @@ public void testWithCachingFilter() throws Exception { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + RandomIndexWriter writer = new RandomIndexWriter(random, dir, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + IndexReader reader = writer.getReader(); writer.close(); - Searcher searcher = new IndexSearcher(dir, true); + Searcher searcher = new IndexSearcher(reader); Query query = new TermQuery(new Term("none", "none")); @@ -206,6 +222,9 @@ // throws java.lang.ClassCastException: org.apache.lucene.util.OpenBitSet cannot be cast to java.util.BitSet searcher.search(new MatchAllDocsQuery(), cf, 1); + searcher.close(); + reader.close(); + dir.close(); } } Index: lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java (revision 963689) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java (working copy) @@ -24,8 +24,8 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; @@ -38,7 +38,7 @@ protected void setUp() throws Exception { super.setUp(); directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, new IndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false))); //Add series of docs with filterable fields : acces rights, prices, dates and "in-stock" flags @@ -47,12 +47,18 @@ addDoc(writer, "guest", "020", "20050101","Y"); addDoc(writer, "admin", "020", "20050101","Maybe"); addDoc(writer, "admin guest", "030", "20050101","N"); - - writer.close(); - reader=IndexReader.open(directory, true); + reader = writer.getReader(); + writer.close(); } - private void addDoc(IndexWriter writer, String accessRights, String price, String date, String inStock) throws IOException + @Override + protected void tearDown() throws Exception { + reader.close(); + directory.close(); + super.tearDown(); + } + + private void addDoc(RandomIndexWriter writer, String accessRights, String price, String date, String inStock) throws IOException { Document doc=new Document(); doc.add(new Field("accessRights",accessRights,Field.Store.YES,Field.Index.ANALYZED)); Index: lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java (revision 963689) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java (working copy) @@ -17,9 +17,11 @@ * limitations under the License. */ +import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; @@ -33,30 +35,30 @@ public class TestRegexQuery extends LuceneTestCase { private IndexSearcher searcher; + private IndexReader reader; + private Directory directory; private final String FN = "field"; @Override protected void setUp() throws Exception { super.setUp(); - RAMDirectory directory = new RAMDirectory(); - try { - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer())); - Document doc = new Document(); - doc.add(new Field(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.ANALYZED)); - writer.addDocument(doc); - writer.optimize(); - writer.close(); - searcher = new IndexSearcher(directory, true); - } catch (Exception e) { - fail(e.toString()); - } + directory = new RAMDirectory(); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + Document doc = new Document(); + doc.add(new Field(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.ANALYZED)); + writer.addDocument(doc); + reader = writer.getReader(); + writer.close(); + searcher = new IndexSearcher(reader); } @Override protected void tearDown() throws Exception { searcher.close(); + reader.close(); + directory.close(); super.tearDown(); } Index: lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java (revision 963689) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java (working copy) @@ -24,8 +24,9 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; @@ -33,13 +34,15 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase { private RAMDirectory directory; private IndexSearcher searcher; + private IndexReader reader; private Analyzer analyzer=new MockAnalyzer(); @Override protected void setUp() throws Exception { super.setUp(); directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); + RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, + new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); //Add series of docs with misspelt names addDoc(writer, "jonathon smythe","1"); @@ -48,12 +51,20 @@ addDoc(writer, "johnny smith","4" ); addDoc(writer, "jonny smith","5" ); addDoc(writer, "johnathon smythe","6"); - + reader = writer.getReader(); writer.close(); - searcher=new IndexSearcher(directory, true); + searcher=new IndexSearcher(reader); } - private void addDoc(IndexWriter writer, String name, String id) throws IOException + @Override + protected void tearDown() throws Exception { + searcher.close(); + reader.close(); + directory.close(); + super.tearDown(); + } + + private void addDoc(RandomIndexWriter writer, String name, String id) throws IOException { Document doc=new Document(); doc.add(new Field("name",name,Field.Store.YES,Field.Index.ANALYZED));