Index: lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java (working copy)
@@ -20,12 +20,15 @@
import java.io.BufferedReader;
import java.io.InputStream;
import java.io.InputStreamReader;
+import java.util.Random;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
@@ -55,6 +58,13 @@
public class TestFuzzyQuery2 extends LuceneTestCase {
/** epsilon for score comparisons */
static final float epsilon = 0.00001f;
+ private Random random;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ random = newRandom();
+ }
public void testFromTestData() throws Exception {
// TODO: randomize!
@@ -78,8 +88,8 @@
int terms = (int) Math.pow(2, bits);
RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new MockAnalyzer(MockTokenizer.KEYWORD, false),
- IndexWriter.MaxFieldLength.UNLIMITED);
+ RandomIndexWriter writer = new RandomIndexWriter(random, dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.KEYWORD, false)));
Document doc = new Document();
Field field = new Field("field", "", Field.Store.NO, Field.Index.ANALYZED);
@@ -88,12 +98,11 @@
for (int i = 0; i < terms; i++) {
field.setValue(mapInt(codePointTable, i));
writer.addDocument(doc);
- }
+ }
- writer.optimize();
- writer.close();
-
- IndexSearcher searcher = new IndexSearcher(dir);
+ IndexReader r = writer.getReader();
+ IndexSearcher searcher = new IndexSearcher(r);
+ writer.close();
String line;
while ((line = reader.readLine()) != null) {
String params[] = line.split(",");
@@ -113,6 +122,7 @@
}
}
searcher.close();
+ r.close();
dir.close();
}
Index: lucene/src/test/org/apache/lucene/search/TestNot.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestNot.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestNot.java (working copy)
@@ -19,8 +19,9 @@
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.analysis.MockAnalyzer;
@@ -39,21 +40,24 @@
public void testNot() throws Exception {
RAMDirectory store = new RAMDirectory();
- IndexWriter writer = new IndexWriter(store, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), store,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document d1 = new Document();
d1.add(new Field("field", "a b", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d1);
- writer.optimize();
- writer.close();
+ IndexReader reader = writer.getReader();
- Searcher searcher = new IndexSearcher(store, true);
+ Searcher searcher = new IndexSearcher(reader);
QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer());
Query query = parser.parse("a NOT b");
//System.out.println(query);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(0, hits.length);
+ writer.close();
+ searcher.close();
+ reader.close();
+ store.close();
}
}
Index: lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java (working copy)
@@ -19,8 +19,9 @@
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
@@ -38,18 +39,19 @@
"/Computers/Mac/One",
"/Computers/Mac/Two",
"/Computers/Windows"};
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < categories.length; i++) {
Document doc = new Document();
doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
- writer.close();
+ IndexReader reader = writer.getReader();
// PrefixFilter combined with ConstantScoreQuery
PrefixFilter filter = new PrefixFilter(new Term("category", "/Computers"));
Query query = new ConstantScoreQuery(filter);
- IndexSearcher searcher = new IndexSearcher(directory, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(4, hits.length);
@@ -100,5 +102,10 @@
query = new ConstantScoreQuery(filter);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals(0, hits.length);
+
+ writer.close();
+ searcher.close();
+ reader.close();
+ directory.close();
}
}
Index: lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java (working copy)
@@ -18,12 +18,16 @@
*/
import java.io.IOException;
+import java.util.Random;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.automaton.Automaton;
@@ -35,15 +39,18 @@
* and the differences between UTF-8/UTF-32 and UTF-16 binary sort order.
*/
public class TestAutomatonQueryUnicode extends LuceneTestCase {
+ private IndexReader reader;
private IndexSearcher searcher;
+ private Directory directory;
private final String FN = "field";
public void setUp() throws Exception {
super.setUp();
- RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new MockAnalyzer(), true,
- IndexWriter.MaxFieldLength.LIMITED);
+ Random random = newRandom();
+ directory = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field titleField = new Field("title", "some title", Field.Store.NO,
Field.Index.ANALYZED);
@@ -79,13 +86,15 @@
writer.addDocument(doc);
field.setValue("\uFFFD\uFFFD");
writer.addDocument(doc);
- writer.optimize();
+ reader = writer.getReader();
+ searcher = new IndexSearcher(reader);
writer.close();
- searcher = new IndexSearcher(directory, true);
}
public void tearDown() throws Exception {
searcher.close();
+ reader.close();
+ directory.close();
super.tearDown();
}
Index: lucene/src/test/org/apache/lucene/search/TestBooleanQuery.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestBooleanQuery.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestBooleanQuery.java (working copy)
@@ -16,9 +16,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-import org.apache.lucene.index.IndexWriter;
+
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -61,7 +62,7 @@
// LUCENE-1630
public void testNullOrSubScorer() throws Throwable {
Directory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(
+ RandomIndexWriter w = new RandomIndexWriter(newRandom(), dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "a b c d", Field.Store.NO, Field.Index.ANALYZED));
Index: lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java (working copy)
@@ -46,12 +46,15 @@
public static final float SCORE_COMP_THRESH = 1e-6f;
private IndexSearcher searcher;
+ private IndexReader reader;
private PhraseQuery query;
private RAMDirectory directory;
+ private Random random;
@Override
public void setUp() throws Exception {
super.setUp();
+ random = newRandom();
directory = new RAMDirectory();
Analyzer analyzer = new Analyzer() {
@Override
@@ -64,7 +67,8 @@
return 100;
}
};
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
Document doc = new Document();
doc.add(new Field("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED));
@@ -82,16 +86,17 @@
doc.add(new Field("nonexist", "phrase exist notexist exist found", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
- writer.optimize();
+ reader = writer.getReader();
writer.close();
- searcher = new IndexSearcher(directory, true);
+ searcher = new IndexSearcher(reader);
query = new PhraseQuery();
}
@Override
protected void tearDown() throws Exception {
searcher.close();
+ reader.close();
directory.close();
super.tearDown();
}
@@ -211,14 +216,15 @@
public void testPhraseQueryWithStopAnalyzer() throws Exception {
RAMDirectory directory = new RAMDirectory();
Analyzer stopAnalyzer = new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, false);
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
- Version.LUCENE_24, stopAnalyzer));
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(Version.LUCENE_24, stopAnalyzer));
Document doc = new Document();
doc.add(new Field("field", "the stop words are here", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
+ IndexReader reader = writer.getReader();
writer.close();
- IndexSearcher searcher = new IndexSearcher(directory, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
// valid exact phrase query
PhraseQuery query = new PhraseQuery();
@@ -239,11 +245,14 @@
searcher.close();
+ reader.close();
+ directory.close();
}
public void testPhraseQueryInConjunctionScorer() throws Exception {
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("source", "marketing info", Field.Store.YES, Field.Index.ANALYZED));
@@ -254,10 +263,10 @@
doc.add(new Field("source", "marketing info", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
- writer.optimize();
+ IndexReader reader = writer.getReader();
writer.close();
- IndexSearcher searcher = new IndexSearcher(directory, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
PhraseQuery phraseQuery = new PhraseQuery();
phraseQuery.add(new Term("source", "marketing"));
@@ -277,8 +286,10 @@
searcher.close();
+ reader.close();
- writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
+ writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
doc = new Document();
doc.add(new Field("contents", "map entry woo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
@@ -291,10 +302,10 @@
doc.add(new Field("contents", "map foobarword entry woo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
- writer.optimize();
+ reader = writer.getReader();
writer.close();
- searcher = new IndexSearcher(directory, true);
+ searcher = new IndexSearcher(reader);
termQuery = new TermQuery(new Term("contents","woo"));
phraseQuery = new PhraseQuery();
@@ -322,12 +333,14 @@
searcher.close();
+ reader.close();
directory.close();
}
public void testSlopScoring() throws IOException {
Directory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field", "foo firstname lastname foo", Field.Store.YES, Field.Index.ANALYZED));
@@ -341,10 +354,10 @@
doc3.add(new Field("field", "foo firstname zzz yyy lastname foo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc3);
- writer.optimize();
+ IndexReader reader = writer.getReader();
writer.close();
- Searcher searcher = new IndexSearcher(directory, true);
+ Searcher searcher = new IndexSearcher(reader);
PhraseQuery query = new PhraseQuery();
query.add(new Term("field", "firstname"));
query.add(new Term("field", "lastname"));
@@ -359,7 +372,10 @@
assertEquals(1, hits[1].doc);
assertEquals(0.31, hits[2].score, 0.01);
assertEquals(2, hits[2].doc);
- QueryUtils.check(query,searcher);
+ QueryUtils.check(query,searcher);
+ searcher.close();
+ reader.close();
+ directory.close();
}
public void testToString() throws Exception {
@@ -587,13 +603,14 @@
Directory dir = new MockRAMDirectory();
Analyzer analyzer = new MockAnalyzer();
- IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
+ RandomIndexWriter w = new RandomIndexWriter(random, dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
List> docs = new ArrayList>();
Document d = new Document();
Field f = new Field("f", "", Field.Store.NO, Field.Index.ANALYZED);
d.add(f);
- Random r = newRandom();
+ Random r = random;
int NUM_DOCS = 10*_TestUtil.getRandomMultiplier();
for(int i=0;i doSearch(int numResults) throws IOException {
Query q = new MatchAllDocsQuery();
- IndexSearcher searcher = new IndexSearcher(dir, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
TopDocsCollector tdc = new MyTopsDocCollector(numResults);
searcher.search(q, tdc);
searcher.close();
@@ -109,15 +110,17 @@
// populate an index with 30 documents, this should be enough for the test.
// The documents have no content - the test uses MatchAllDocsQuery().
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 30; i++) {
writer.addDocument(new Document());
}
+ reader = writer.getReader();
writer.close();
}
@Override
protected void tearDown() throws Exception {
+ reader.close();
dir.close();
dir = null;
super.tearDown();
Index: lucene/src/test/org/apache/lucene/search/TestWildcard.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestWildcard.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestWildcard.java (working copy)
@@ -23,19 +23,28 @@
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.store.RAMDirectory;
import java.io.IOException;
+import java.util.Random;
/**
* TestWildcard tests the '*' and '?' wildcard characters.
*/
public class TestWildcard
extends LuceneTestCase {
+ private Random random;
+
+ @Override
+ public void setUp() throws Exception {
+ super.setUp();
+ random = newRandom();
+ }
+
public void testEquals() {
WildcardQuery wq1 = new WildcardQuery(new Term("field", "b*a"));
WildcardQuery wq2 = new WildcardQuery(new Term("field", "b*a"));
@@ -193,14 +202,13 @@
private RAMDirectory getIndexStore(String field, String[] contents)
throws IOException {
RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(
+ RandomIndexWriter writer = new RandomIndexWriter(random, indexStore, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < contents.length; ++i) {
Document doc = new Document();
doc.add(new Field(field, contents[i], Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
- writer.optimize();
writer.close();
return indexStore;
@@ -251,7 +259,8 @@
// prepare the index
RAMDirectory dir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter iw = new RandomIndexWriter(random, dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < docs.length; i++) {
Document doc = new Document();
doc.add(new Field(field,docs[i],Store.NO,Index.ANALYZED));
Index: lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestAutomatonQuery.java (working copy)
@@ -18,13 +18,17 @@
*/
import java.io.IOException;
+import java.util.Random;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.automaton.Automaton;
@@ -32,15 +36,18 @@
import org.apache.lucene.util.automaton.BasicOperations;
public class TestAutomatonQuery extends LuceneTestCase {
+ private Directory directory;
+ private IndexReader reader;
private IndexSearcher searcher;
-
+
private final String FN = "field";
public void setUp() throws Exception {
super.setUp();
- RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new MockAnalyzer(), true,
- IndexWriter.MaxFieldLength.LIMITED);
+ Random random = newRandom();
+ directory = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field titleField = new Field("title", "some title", Field.Store.NO,
Field.Index.ANALYZED);
@@ -57,13 +64,15 @@
field.setValue("doc three has some different stuff"
+ " with numbers 1234 5678.9 and letter b");
writer.addDocument(doc);
- writer.optimize();
+ reader = writer.getReader();
+ searcher = new IndexSearcher(reader);
writer.close();
- searcher = new IndexSearcher(directory, true);
}
public void tearDown() throws Exception {
searcher.close();
+ reader.close();
+ directory.close();
super.tearDown();
}
Index: lucene/src/test/org/apache/lucene/search/TestBoolean2.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestBoolean2.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestBoolean2.java (working copy)
@@ -23,8 +23,8 @@
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.queryParser.ParseException;
@@ -42,6 +42,7 @@
private IndexSearcher searcher;
private IndexSearcher bigSearcher;
private IndexReader reader;
+ private Random rnd;
private static int NUM_EXTRA_DOCS = 6000;
public static final String field = "field";
@@ -51,8 +52,9 @@
@Override
protected void setUp() throws Exception {
super.setUp();
+ rnd = newRandom();
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer= new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer= new RandomIndexWriter(rnd, directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
doc.add(new Field(field, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
@@ -69,14 +71,14 @@
int docCount = 0;
do {
final Directory copy = new RAMDirectory(dir2);
- IndexWriter w = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter w = new RandomIndexWriter(rnd, dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
w.addIndexes(new Directory[] {copy});
docCount = w.maxDoc();
w.close();
mulFactor *= 2;
} while(docCount < 3000);
- IndexWriter w = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter w = new RandomIndexWriter(rnd, dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add(new Field("field2", "xxx", Field.Store.NO, Field.Index.ANALYZED));
for(int i=0;i
- * same as TestRankingSimilarity in TestRanking.zip from
- * http://issues.apache.org/jira/browse/LUCENE-323
- *
- */
- private static class TestSimilarity extends DefaultSimilarity {
-
- public TestSimilarity() {
- }
- @Override
- public float tf(float freq) {
- if (freq > 0.0f) return 1.0f;
- else return 0.0f;
- }
- @Override
- public float lengthNorm(String fieldName, int numTerms) {
- return 1.0f;
- }
- @Override
- public float idf(int docFreq, int numDocs) {
- return 1.0f;
- }
+public class TestDisjunctionMaxQuery extends LuceneTestCase {
+
+ /** threshold for comparing floats */
+ public static final float SCORE_COMP_THRESH = 0.0000f;
+
+ /**
+ * Similarity to eliminate tf, idf and lengthNorm effects to isolate test
+ * case.
+ *
+ *
+ * same as TestRankingSimilarity in TestRanking.zip from
+ * http://issues.apache.org/jira/browse/LUCENE-323
+ *
+ */
+ private static class TestSimilarity extends DefaultSimilarity {
+
+ public TestSimilarity() {}
+
+ @Override
+ public float tf(float freq) {
+ if (freq > 0.0f) return 1.0f;
+ else return 0.0f;
}
-
- public Similarity sim = new TestSimilarity();
- public Directory index;
- public IndexReader r;
- public IndexSearcher s;
-
+
@Override
- protected void setUp() throws Exception {
- super.setUp();
-
- index = new RAMDirectory();
- IndexWriter writer = new IndexWriter(index, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setSimilarity(sim));
-
- // hed is the most important field, dek is secondary
-
- // d1 is an "ok" match for: albino elephant
- {
- Document d1 = new Document();
- d1.add(new Field("id", "d1", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id", "d1"));
- d1.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "elephant"));
- d1.add(new Field("dek", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("dek", "elephant"));
- writer.addDocument(d1);
- }
-
- // d2 is a "good" match for: albino elephant
- {
- Document d2 = new Document();
- d2.add(new Field("id", "d2", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id", "d2"));
- d2.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "elephant"));
- d2.add(new Field("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("dek", "albino"));
- d2.add(new Field("dek", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("dek", "elephant"));
- writer.addDocument(d2);
- }
-
- // d3 is a "better" match for: albino elephant
- {
- Document d3 = new Document();
- d3.add(new Field("id", "d3", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id", "d3"));
- d3.add(new Field("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "albino"));
- d3.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "elephant"));
- writer.addDocument(d3);
- }
-
- // d4 is the "best" match for: albino elephant
- {
- Document d4 = new Document();
- d4.add(new Field("id", "d4", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id", "d4"));
- d4.add(new Field("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "albino"));
- d4.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("hed", "elephant"));
- d4.add(new Field("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));//Field.Text("dek", "albino"));
- writer.addDocument(d4);
- }
-
- writer.close();
-
- r = IndexReader.open(index, true);
- s = new IndexSearcher(r);
- s.setSimilarity(sim);
+ public float lengthNorm(String fieldName, int numTerms) {
+ return 1.0f;
}
-
+
+ @Override
+ public float idf(int docFreq, int numDocs) {
+ return 1.0f;
+ }
+ }
+
+ public Similarity sim = new TestSimilarity();
+ public Directory index;
+ public IndexReader r;
+ public IndexSearcher s;
+
+ @Override
+ protected void setUp() throws Exception {
+ super.setUp();
+
+ index = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), index,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
+ .setSimilarity(sim));
+
+ // hed is the most important field, dek is secondary
+
+ // d1 is an "ok" match for: albino elephant
+ {
+ Document d1 = new Document();
+ d1.add(new Field("id", "d1", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
+ // "d1"));
+ d1
+ .add(new Field("hed", "elephant", Field.Store.YES,
+ Field.Index.ANALYZED));// Field.Text("hed", "elephant"));
+ d1
+ .add(new Field("dek", "elephant", Field.Store.YES,
+ Field.Index.ANALYZED));// Field.Text("dek", "elephant"));
+ writer.addDocument(d1);
+ }
+
+ // d2 is a "good" match for: albino elephant
+ {
+ Document d2 = new Document();
+ d2.add(new Field("id", "d2", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
+ // "d2"));
+ d2
+ .add(new Field("hed", "elephant", Field.Store.YES,
+ Field.Index.ANALYZED));// Field.Text("hed", "elephant"));
+ d2.add(new Field("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("dek",
+ // "albino"));
+ d2
+ .add(new Field("dek", "elephant", Field.Store.YES,
+ Field.Index.ANALYZED));// Field.Text("dek", "elephant"));
+ writer.addDocument(d2);
+ }
+
+ // d3 is a "better" match for: albino elephant
+ {
+ Document d3 = new Document();
+ d3.add(new Field("id", "d3", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
+ // "d3"));
+ d3.add(new Field("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("hed",
+ // "albino"));
+ d3
+ .add(new Field("hed", "elephant", Field.Store.YES,
+ Field.Index.ANALYZED));// Field.Text("hed", "elephant"));
+ writer.addDocument(d3);
+ }
+
+ // d4 is the "best" match for: albino elephant
+ {
+ Document d4 = new Document();
+ d4.add(new Field("id", "d4", Field.Store.YES, Field.Index.NOT_ANALYZED));// Field.Keyword("id",
+ // "d4"));
+ d4.add(new Field("hed", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("hed",
+ // "albino"));
+ d4
+ .add(new Field("hed", "elephant", Field.Store.YES,
+ Field.Index.ANALYZED));// Field.Text("hed", "elephant"));
+ d4.add(new Field("dek", "albino", Field.Store.YES, Field.Index.ANALYZED));// Field.Text("dek",
+ // "albino"));
+ writer.addDocument(d4);
+ }
+
+ r = writer.getReader();
+ writer.close();
+ s = new IndexSearcher(r);
+ s.setSimilarity(sim);
+ }
+
+ @Override
+ protected void tearDown() throws Exception {
+ s.close();
+ r.close();
+ index.close();
+ super.tearDown();
+ }
+
public void testSkipToFirsttimeMiss() throws IOException {
final DisjunctionMaxQuery dq = new DisjunctionMaxQuery(0.0f);
- dq.add(tq("id","d1"));
- dq.add(tq("dek","DOES_NOT_EXIST"));
-
- QueryUtils.check(dq,s);
-
+ dq.add(tq("id", "d1"));
+ dq.add(tq("dek", "DOES_NOT_EXIST"));
+
+ QueryUtils.check(dq, s);
+
final Weight dw = dq.weight(s);
final Scorer ds = dw.scorer(r, true, false);
final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS;
if (skipOk) {
- fail("firsttime skipTo found a match? ... " + r.document(ds.docID()).get("id"));
+ fail("firsttime skipTo found a match? ... "
+ + r.document(ds.docID()).get("id"));
}
}
-
+
public void testSkipToFirsttimeHit() throws IOException {
final DisjunctionMaxQuery dq = new DisjunctionMaxQuery(0.0f);
- dq.add(tq("dek","albino"));
- dq.add(tq("dek","DOES_NOT_EXIST"));
-
- QueryUtils.check(dq,s);
-
+ dq.add(tq("dek", "albino"));
+ dq.add(tq("dek", "DOES_NOT_EXIST"));
+
+ QueryUtils.check(dq, s);
+
final Weight dw = dq.weight(s);
final Scorer ds = dw.scorer(r, true, false);
- assertTrue("firsttime skipTo found no match", ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
+ assertTrue("firsttime skipTo found no match",
+ ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
assertEquals("found wrong docid", "d4", r.document(ds.docID()).get("id"));
}
-
+
public void testSimpleEqualScores1() throws Exception {
-
+
DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f);
- q.add(tq("hed","albino"));
- q.add(tq("hed","elephant"));
- QueryUtils.check(q,s);
-
+ q.add(tq("hed", "albino"));
+ q.add(tq("hed", "elephant"));
+ QueryUtils.check(q, s);
+
ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-
+
try {
- assertEquals("all docs should match " + q.toString(),
- 4, h.length);
-
+ assertEquals("all docs should match " + q.toString(), 4, h.length);
+
float score = h[0].score;
for (int i = 1; i < h.length; i++) {
- assertEquals("score #" + i + " is not the same",
- score, h[i].score, SCORE_COMP_THRESH);
+ assertEquals("score #" + i + " is not the same", score, h[i].score,
+ SCORE_COMP_THRESH);
}
} catch (Error e) {
- printHits("testSimpleEqualScores1",h,s);
+ printHits("testSimpleEqualScores1", h, s);
throw e;
}
-
+
}
-
- public void testSimpleEqualScores2() throws Exception {
-
- DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f);
- q.add(tq("dek","albino"));
- q.add(tq("dek","elephant"));
- QueryUtils.check(q,s);
-
-
- ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-
- try {
- assertEquals("3 docs should match " + q.toString(),
- 3, h.length);
- float score = h[0].score;
- for (int i = 1; i < h.length; i++) {
- assertEquals("score #" + i + " is not the same",
- score, h[i].score, SCORE_COMP_THRESH);
- }
- } catch (Error e) {
- printHits("testSimpleEqualScores2",h, s);
- throw e;
- }
-
+
+ public void testSimpleEqualScores2() throws Exception {
+
+ DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f);
+ q.add(tq("dek", "albino"));
+ q.add(tq("dek", "elephant"));
+ QueryUtils.check(q, s);
+
+ ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+
+ try {
+ assertEquals("3 docs should match " + q.toString(), 3, h.length);
+ float score = h[0].score;
+ for (int i = 1; i < h.length; i++) {
+ assertEquals("score #" + i + " is not the same", score, h[i].score,
+ SCORE_COMP_THRESH);
+ }
+ } catch (Error e) {
+ printHits("testSimpleEqualScores2", h, s);
+ throw e;
}
-
- public void testSimpleEqualScores3() throws Exception {
-
- DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f);
- q.add(tq("hed","albino"));
- q.add(tq("hed","elephant"));
- q.add(tq("dek","albino"));
- q.add(tq("dek","elephant"));
- QueryUtils.check(q,s);
-
-
- ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-
- try {
- assertEquals("all docs should match " + q.toString(),
- 4, h.length);
- float score = h[0].score;
- for (int i = 1; i < h.length; i++) {
- assertEquals("score #" + i + " is not the same",
- score, h[i].score, SCORE_COMP_THRESH);
- }
- } catch (Error e) {
- printHits("testSimpleEqualScores3",h, s);
- throw e;
- }
-
+
+ }
+
+ public void testSimpleEqualScores3() throws Exception {
+
+ DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f);
+ q.add(tq("hed", "albino"));
+ q.add(tq("hed", "elephant"));
+ q.add(tq("dek", "albino"));
+ q.add(tq("dek", "elephant"));
+ QueryUtils.check(q, s);
+
+ ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+
+ try {
+ assertEquals("all docs should match " + q.toString(), 4, h.length);
+ float score = h[0].score;
+ for (int i = 1; i < h.length; i++) {
+ assertEquals("score #" + i + " is not the same", score, h[i].score,
+ SCORE_COMP_THRESH);
+ }
+ } catch (Error e) {
+ printHits("testSimpleEqualScores3", h, s);
+ throw e;
}
-
- public void testSimpleTiebreaker() throws Exception {
-
- DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.01f);
- q.add(tq("dek","albino"));
- q.add(tq("dek","elephant"));
- QueryUtils.check(q,s);
-
-
- ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-
- try {
- assertEquals("3 docs should match " + q.toString(),
- 3, h.length);
- assertEquals("wrong first", "d2", s.doc(h[0].doc).get("id"));
- float score0 = h[0].score;
- float score1 = h[1].score;
- float score2 = h[2].score;
- assertTrue("d2 does not have better score then others: " +
- score0 + " >? " + score1,
- score0 > score1);
- assertEquals("d4 and d1 don't have equal scores",
- score1, score2, SCORE_COMP_THRESH);
- } catch (Error e) {
- printHits("testSimpleTiebreaker",h, s);
- throw e;
- }
+
+ }
+
+ public void testSimpleTiebreaker() throws Exception {
+
+ DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.01f);
+ q.add(tq("dek", "albino"));
+ q.add(tq("dek", "elephant"));
+ QueryUtils.check(q, s);
+
+ ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+
+ try {
+ assertEquals("3 docs should match " + q.toString(), 3, h.length);
+ assertEquals("wrong first", "d2", s.doc(h[0].doc).get("id"));
+ float score0 = h[0].score;
+ float score1 = h[1].score;
+ float score2 = h[2].score;
+ assertTrue("d2 does not have better score then others: " + score0
+ + " >? " + score1, score0 > score1);
+ assertEquals("d4 and d1 don't have equal scores", score1, score2,
+ SCORE_COMP_THRESH);
+ } catch (Error e) {
+ printHits("testSimpleTiebreaker", h, s);
+ throw e;
}
-
- public void testBooleanRequiredEqualScores() throws Exception {
-
- BooleanQuery q = new BooleanQuery();
- {
- DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.0f);
- q1.add(tq("hed","albino"));
- q1.add(tq("dek","albino"));
- q.add(q1,BooleanClause.Occur.MUST);//true,false);
- QueryUtils.check(q1,s);
-
- }
- {
- DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.0f);
- q2.add(tq("hed","elephant"));
- q2.add(tq("dek","elephant"));
- q.add(q2, BooleanClause.Occur.MUST);//true,false);
- QueryUtils.check(q2,s);
- }
-
- QueryUtils.check(q,s);
-
- ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-
- try {
- assertEquals("3 docs should match " + q.toString(),
- 3, h.length);
- float score = h[0].score;
- for (int i = 1; i < h.length; i++) {
- assertEquals("score #" + i + " is not the same",
- score, h[i].score, SCORE_COMP_THRESH);
- }
- } catch (Error e) {
- printHits("testBooleanRequiredEqualScores1",h, s);
- throw e;
- }
+ }
+
+ public void testBooleanRequiredEqualScores() throws Exception {
+
+ BooleanQuery q = new BooleanQuery();
+ {
+ DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.0f);
+ q1.add(tq("hed", "albino"));
+ q1.add(tq("dek", "albino"));
+ q.add(q1, BooleanClause.Occur.MUST);// true,false);
+ QueryUtils.check(q1, s);
+
}
-
-
- public void testBooleanOptionalNoTiebreaker() throws Exception {
-
- BooleanQuery q = new BooleanQuery();
- {
- DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.0f);
- q1.add(tq("hed","albino"));
- q1.add(tq("dek","albino"));
- q.add(q1, BooleanClause.Occur.SHOULD);//false,false);
- }
- {
- DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.0f);
- q2.add(tq("hed","elephant"));
- q2.add(tq("dek","elephant"));
- q.add(q2, BooleanClause.Occur.SHOULD);//false,false);
- }
- QueryUtils.check(q,s);
-
-
- ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-
- try {
- assertEquals("4 docs should match " + q.toString(),
- 4, h.length);
- float score = h[0].score;
- for (int i = 1; i < h.length-1; i++) { /* note: -1 */
- assertEquals("score #" + i + " is not the same",
- score, h[i].score, SCORE_COMP_THRESH);
- }
- assertEquals("wrong last", "d1", s.doc(h[h.length-1].doc).get("id"));
- float score1 = h[h.length-1].score;
- assertTrue("d1 does not have worse score then others: " +
- score + " >? " + score1,
- score > score1);
- } catch (Error e) {
- printHits("testBooleanOptionalNoTiebreaker",h, s);
- throw e;
- }
+ {
+ DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.0f);
+ q2.add(tq("hed", "elephant"));
+ q2.add(tq("dek", "elephant"));
+ q.add(q2, BooleanClause.Occur.MUST);// true,false);
+ QueryUtils.check(q2, s);
}
-
-
- public void testBooleanOptionalWithTiebreaker() throws Exception {
-
- BooleanQuery q = new BooleanQuery();
- {
- DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.01f);
- q1.add(tq("hed","albino"));
- q1.add(tq("dek","albino"));
- q.add(q1, BooleanClause.Occur.SHOULD);//false,false);
- }
- {
- DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.01f);
- q2.add(tq("hed","elephant"));
- q2.add(tq("dek","elephant"));
- q.add(q2, BooleanClause.Occur.SHOULD);//false,false);
- }
- QueryUtils.check(q,s);
-
-
- ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-
- try {
-
- assertEquals("4 docs should match " + q.toString(),
- 4, h.length);
-
- float score0 = h[0].score;
- float score1 = h[1].score;
- float score2 = h[2].score;
- float score3 = h[3].score;
-
- String doc0 = s.doc(h[0].doc).get("id");
- String doc1 = s.doc(h[1].doc).get("id");
- String doc2 = s.doc(h[2].doc).get("id");
- String doc3 = s.doc(h[3].doc).get("id");
-
- assertTrue("doc0 should be d2 or d4: " + doc0,
- doc0.equals("d2") || doc0.equals("d4"));
- assertTrue("doc1 should be d2 or d4: " + doc0,
- doc1.equals("d2") || doc1.equals("d4"));
- assertEquals("score0 and score1 should match",
- score0, score1, SCORE_COMP_THRESH);
- assertEquals("wrong third", "d3", doc2);
- assertTrue("d3 does not have worse score then d2 and d4: " +
- score1 + " >? " + score2,
- score1 > score2);
-
- assertEquals("wrong fourth", "d1", doc3);
- assertTrue("d1 does not have worse score then d3: " +
- score2 + " >? " + score3,
- score2 > score3);
-
- } catch (Error e) {
- printHits("testBooleanOptionalWithTiebreaker",h, s);
- throw e;
- }
-
+
+ QueryUtils.check(q, s);
+
+ ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+
+ try {
+ assertEquals("3 docs should match " + q.toString(), 3, h.length);
+ float score = h[0].score;
+ for (int i = 1; i < h.length; i++) {
+ assertEquals("score #" + i + " is not the same", score, h[i].score,
+ SCORE_COMP_THRESH);
+ }
+ } catch (Error e) {
+ printHits("testBooleanRequiredEqualScores1", h, s);
+ throw e;
}
-
-
- public void testBooleanOptionalWithTiebreakerAndBoost() throws Exception {
-
- BooleanQuery q = new BooleanQuery();
- {
- DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.01f);
- q1.add(tq("hed","albino", 1.5f));
- q1.add(tq("dek","albino"));
- q.add(q1, BooleanClause.Occur.SHOULD);//false,false);
- }
- {
- DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.01f);
- q2.add(tq("hed","elephant", 1.5f));
- q2.add(tq("dek","elephant"));
- q.add(q2, BooleanClause.Occur.SHOULD);//false,false);
- }
- QueryUtils.check(q,s);
-
-
- ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
-
- try {
-
- assertEquals("4 docs should match " + q.toString(),
- 4, h.length);
-
- float score0 = h[0].score;
- float score1 = h[1].score;
- float score2 = h[2].score;
- float score3 = h[3].score;
-
- String doc0 = s.doc(h[0].doc).get("id");
- String doc1 = s.doc(h[1].doc).get("id");
- String doc2 = s.doc(h[2].doc).get("id");
- String doc3 = s.doc(h[3].doc).get("id");
-
- assertEquals("doc0 should be d4: ", "d4", doc0);
- assertEquals("doc1 should be d3: ", "d3", doc1);
- assertEquals("doc2 should be d2: ", "d2", doc2);
- assertEquals("doc3 should be d1: ", "d1", doc3);
-
- assertTrue("d4 does not have a better score then d3: " +
- score0 + " >? " + score1,
- score0 > score1);
- assertTrue("d3 does not have a better score then d2: " +
- score1 + " >? " + score2,
- score1 > score2);
- assertTrue("d3 does not have a better score then d1: " +
- score2 + " >? " + score3,
- score2 > score3);
-
- } catch (Error e) {
- printHits("testBooleanOptionalWithTiebreakerAndBoost",h, s);
- throw e;
- }
+ }
+
+ public void testBooleanOptionalNoTiebreaker() throws Exception {
+
+ BooleanQuery q = new BooleanQuery();
+ {
+ DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.0f);
+ q1.add(tq("hed", "albino"));
+ q1.add(tq("dek", "albino"));
+ q.add(q1, BooleanClause.Occur.SHOULD);// false,false);
}
-
-
-
-
-
-
-
- /** macro */
- protected Query tq(String f, String t) {
- return new TermQuery(new Term(f, t));
+ {
+ DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.0f);
+ q2.add(tq("hed", "elephant"));
+ q2.add(tq("dek", "elephant"));
+ q.add(q2, BooleanClause.Occur.SHOULD);// false,false);
}
- /** macro */
- protected Query tq(String f, String t, float b) {
- Query q = tq(f,t);
- q.setBoost(b);
- return q;
+ QueryUtils.check(q, s);
+
+ ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+
+ try {
+ assertEquals("4 docs should match " + q.toString(), 4, h.length);
+ float score = h[0].score;
+ for (int i = 1; i < h.length - 1; i++) { /* note: -1 */
+ assertEquals("score #" + i + " is not the same", score, h[i].score,
+ SCORE_COMP_THRESH);
+ }
+ assertEquals("wrong last", "d1", s.doc(h[h.length - 1].doc).get("id"));
+ float score1 = h[h.length - 1].score;
+ assertTrue("d1 does not have worse score then others: " + score + " >? "
+ + score1, score > score1);
+ } catch (Error e) {
+ printHits("testBooleanOptionalNoTiebreaker", h, s);
+ throw e;
}
-
-
- protected void printHits(String test, ScoreDoc[] h, Searcher searcher) throws Exception {
-
- System.err.println("------- " + test + " -------");
-
- DecimalFormat f = new DecimalFormat("0.000000000");
-
- for (int i = 0; i < h.length; i++) {
- Document d = searcher.doc(h[i].doc);
- float score = h[i].score;
- System.err.println("#" + i + ": " + f.format(score) + " - " +
- d.get("id"));
- }
+ }
+
+ public void testBooleanOptionalWithTiebreaker() throws Exception {
+
+ BooleanQuery q = new BooleanQuery();
+ {
+ DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.01f);
+ q1.add(tq("hed", "albino"));
+ q1.add(tq("dek", "albino"));
+ q.add(q1, BooleanClause.Occur.SHOULD);// false,false);
}
-
+ {
+ DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.01f);
+ q2.add(tq("hed", "elephant"));
+ q2.add(tq("dek", "elephant"));
+ q.add(q2, BooleanClause.Occur.SHOULD);// false,false);
+ }
+ QueryUtils.check(q, s);
+
+ ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+
+ try {
+
+ assertEquals("4 docs should match " + q.toString(), 4, h.length);
+
+ float score0 = h[0].score;
+ float score1 = h[1].score;
+ float score2 = h[2].score;
+ float score3 = h[3].score;
+
+ String doc0 = s.doc(h[0].doc).get("id");
+ String doc1 = s.doc(h[1].doc).get("id");
+ String doc2 = s.doc(h[2].doc).get("id");
+ String doc3 = s.doc(h[3].doc).get("id");
+
+ assertTrue("doc0 should be d2 or d4: " + doc0, doc0.equals("d2")
+ || doc0.equals("d4"));
+ assertTrue("doc1 should be d2 or d4: " + doc0, doc1.equals("d2")
+ || doc1.equals("d4"));
+ assertEquals("score0 and score1 should match", score0, score1,
+ SCORE_COMP_THRESH);
+ assertEquals("wrong third", "d3", doc2);
+ assertTrue("d3 does not have worse score then d2 and d4: " + score1
+ + " >? " + score2, score1 > score2);
+
+ assertEquals("wrong fourth", "d1", doc3);
+ assertTrue("d1 does not have worse score then d3: " + score2 + " >? "
+ + score3, score2 > score3);
+
+ } catch (Error e) {
+ printHits("testBooleanOptionalWithTiebreaker", h, s);
+ throw e;
+ }
+
+ }
+
+ public void testBooleanOptionalWithTiebreakerAndBoost() throws Exception {
+
+ BooleanQuery q = new BooleanQuery();
+ {
+ DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.01f);
+ q1.add(tq("hed", "albino", 1.5f));
+ q1.add(tq("dek", "albino"));
+ q.add(q1, BooleanClause.Occur.SHOULD);// false,false);
+ }
+ {
+ DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.01f);
+ q2.add(tq("hed", "elephant", 1.5f));
+ q2.add(tq("dek", "elephant"));
+ q.add(q2, BooleanClause.Occur.SHOULD);// false,false);
+ }
+ QueryUtils.check(q, s);
+
+ ScoreDoc[] h = s.search(q, null, 1000).scoreDocs;
+
+ try {
+
+ assertEquals("4 docs should match " + q.toString(), 4, h.length);
+
+ float score0 = h[0].score;
+ float score1 = h[1].score;
+ float score2 = h[2].score;
+ float score3 = h[3].score;
+
+ String doc0 = s.doc(h[0].doc).get("id");
+ String doc1 = s.doc(h[1].doc).get("id");
+ String doc2 = s.doc(h[2].doc).get("id");
+ String doc3 = s.doc(h[3].doc).get("id");
+
+ assertEquals("doc0 should be d4: ", "d4", doc0);
+ assertEquals("doc1 should be d3: ", "d3", doc1);
+ assertEquals("doc2 should be d2: ", "d2", doc2);
+ assertEquals("doc3 should be d1: ", "d1", doc3);
+
+ assertTrue("d4 does not have a better score then d3: " + score0 + " >? "
+ + score1, score0 > score1);
+ assertTrue("d3 does not have a better score then d2: " + score1 + " >? "
+ + score2, score1 > score2);
+ assertTrue("d3 does not have a better score then d1: " + score2 + " >? "
+ + score3, score2 > score3);
+
+ } catch (Error e) {
+ printHits("testBooleanOptionalWithTiebreakerAndBoost", h, s);
+ throw e;
+ }
+ }
+
+ /** macro */
+ protected Query tq(String f, String t) {
+ return new TermQuery(new Term(f, t));
+ }
+
+ /** macro */
+ protected Query tq(String f, String t, float b) {
+ Query q = tq(f, t);
+ q.setBoost(b);
+ return q;
+ }
+
+ protected void printHits(String test, ScoreDoc[] h, Searcher searcher)
+ throws Exception {
+
+ System.err.println("------- " + test + " -------");
+
+ DecimalFormat f = new DecimalFormat("0.000000000");
+
+ for (int i = 0; i < h.length; i++) {
+ Document d = searcher.doc(h[i].doc);
+ float score = h[i].score;
+ System.err
+ .println("#" + i + ": " + f.format(score) + " - " + d.get("id"));
+ }
+ }
}
Index: lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestTopScoreDocCollector.java (working copy)
@@ -17,10 +17,13 @@
* limitations under the License.
*/
+import java.util.Random;
+
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
@@ -38,12 +41,12 @@
public void testOutOfOrderCollection() throws Exception {
Directory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ Random random = newRandom();
+ RandomIndexWriter writer = new RandomIndexWriter(random, dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 10; i++) {
writer.addDocument(new Document());
}
- writer.commit();
- writer.close();
boolean[] inOrder = new boolean[] { false, true };
String[] actualTSDCClass = new String[] {
@@ -58,7 +61,8 @@
// Set minNrShouldMatch to 1 so that BQ will not optimize rewrite to return
// the clause instead of BQ.
bq.setMinimumNumberShouldMatch(1);
- IndexSearcher searcher = new IndexSearcher(dir, true);
+ IndexReader reader = writer.getReader();
+ IndexSearcher searcher = new IndexSearcher(reader);
for (int i = 0; i < inOrder.length; i++) {
TopDocsCollector tdc = TopScoreDocCollector.create(3, inOrder[i]);
assertEquals("org.apache.lucene.search.TopScoreDocCollector$" + actualTSDCClass[i], tdc.getClass().getName());
@@ -71,6 +75,10 @@
assertEquals("expected doc Id " + j + " found " + sd[j].doc, j, sd[j].doc);
}
}
+ writer.close();
+ searcher.close();
+ reader.close();
+ dir.close();
}
}
Index: lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java (working copy)
@@ -26,8 +26,11 @@
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
@@ -39,13 +42,17 @@
*/
public class TestRegexpRandom extends LuceneTestCase {
private Searcher searcher;
+ private IndexReader reader;
+ private Directory dir;
private Random random;
@Override
protected void setUp() throws Exception {
super.setUp();
- RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new MockAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
+ random = newRandom();
+ dir = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(random, dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
Field field = new Field("field", "", Field.Store.NO, Field.Index.ANALYZED);
@@ -57,9 +64,9 @@
writer.addDocument(doc);
}
- writer.optimize();
+ reader = writer.getReader();
writer.close();
- searcher = new IndexSearcher(dir);
+ searcher = new IndexSearcher(reader);
}
private char N() {
@@ -89,11 +96,12 @@
@Override
protected void tearDown() throws Exception {
searcher.close();
+ reader.close();
+ dir.close();
super.tearDown();
}
public void testRegexps() throws Exception {
- random = newRandom(System.nanoTime());
for (int i = 0; i < 100*_TestUtil.getRandomMultiplier(); i++) {
assertPatternHits("NNNN", 1);
assertPatternHits(".NNN", 10);
Index: lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java (working copy)
@@ -21,8 +21,9 @@
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
@@ -40,12 +41,15 @@
private static final String FIELD = "name";
private RAMDirectory directory = new RAMDirectory();
+ private IndexReader reader;
+ private IndexSearcher searcher;
@Override
protected void setUp() throws Exception {
super.setUp();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < 5137; ++i) {
Document doc = new Document();
@@ -73,40 +77,46 @@
writer.addDocument(doc);
}
+ reader = writer.getReader();
+ searcher = new IndexSearcher(reader);
writer.close();
}
+ @Override
+ public void tearDown() throws Exception {
+ searcher.close();
+ reader.close();
+ directory.close();
+ super.tearDown();
+ }
+
public void testPrefixQuery() throws Exception {
- IndexSearcher indexSearcher = new IndexSearcher(directory, true);
Query query = new PrefixQuery(new Term(FIELD, "tang"));
assertEquals("Number of matched documents", 2,
- indexSearcher.search(query, null, 1000).totalHits);
+ searcher.search(query, null, 1000).totalHits);
}
public void testTermQuery() throws Exception {
- IndexSearcher indexSearcher = new IndexSearcher(directory, true);
Query query = new TermQuery(new Term(FIELD, "tangfulin"));
assertEquals("Number of matched documents", 2,
- indexSearcher.search(query, null, 1000).totalHits);
+ searcher.search(query, null, 1000).totalHits);
}
public void testTermBooleanQuery() throws Exception {
- IndexSearcher indexSearcher = new IndexSearcher(directory, true);
BooleanQuery query = new BooleanQuery();
query.add(new TermQuery(new Term(FIELD, "tangfulin")),
BooleanClause.Occur.SHOULD);
query.add(new TermQuery(new Term(FIELD, "notexistnames")),
BooleanClause.Occur.SHOULD);
assertEquals("Number of matched documents", 2,
- indexSearcher.search(query, null, 1000).totalHits);
+ searcher.search(query, null, 1000).totalHits);
}
public void testPrefixBooleanQuery() throws Exception {
- IndexSearcher indexSearcher = new IndexSearcher(directory, true);
BooleanQuery query = new BooleanQuery();
query.add(new PrefixQuery(new Term(FIELD, "tang")),
BooleanClause.Occur.SHOULD);
query.add(new TermQuery(new Term(FIELD, "notexistnames")),
BooleanClause.Occur.SHOULD);
assertEquals("Number of matched documents", 2,
- indexSearcher.search(query, null, 1000).totalHits);
+ searcher.search(query, null, 1000).totalHits);
}
}
Index: lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java (working copy)
@@ -23,8 +23,10 @@
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericField;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.BytesRef;
@@ -46,12 +48,15 @@
private static final int noDocs = 10000*_TestUtil.getRandomMultiplier();
private static RAMDirectory directory = null;
+ private static IndexReader reader = null;
private static IndexSearcher searcher = null;
-
+
@BeforeClass
public static void beforeClass() throws Exception {
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ Random random = newStaticRandom(TestNumericRangeQuery32.class);
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
NumericField
field8 = new NumericField("field8", 8, Field.Store.YES, true),
@@ -83,15 +88,17 @@
writer.addDocument(doc);
}
- writer.optimize();
+ reader = writer.getReader();
+ searcher=new IndexSearcher(reader);
writer.close();
- searcher=new IndexSearcher(directory, true);
}
@AfterClass
public static void afterClass() throws Exception {
searcher.close();
searcher = null;
+ reader.close();
+ reader = null;
directory.close();
directory = null;
}
@@ -147,7 +154,7 @@
assertEquals("First doc"+type, 2*distance+startOffset, Integer.parseInt(doc.get(field)) );
doc=searcher.doc(sd[sd.length-1].doc);
assertEquals("Last doc"+type, (1+count)*distance+startOffset, Integer.parseInt(doc.get(field)) );
- if (i>0) {
+ if (i>0 && searcher.getIndexReader().getSequentialSubReaders().length == 1) {
assertEquals("Distinct term number is equal for all query types", lastTerms, terms);
}
lastTerms = terms;
@@ -372,7 +379,7 @@
termCountT += tq.getTotalNumberOfTerms();
termCountC += cq.getTotalNumberOfTerms();
}
- if (precisionStep == Integer.MAX_VALUE) {
+ if (precisionStep == Integer.MAX_VALUE && searcher.getIndexReader().getSequentialSubReaders().length == 1) {
assertEquals("Total number of terms should be equal for unlimited precStep", termCountT, termCountC);
} else if (VERBOSE) {
System.out.println("Average number of terms during random search on '" + field + "':");
Index: lucene/src/test/org/apache/lucene/search/TestDocBoost.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestDocBoost.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestDocBoost.java (working copy)
@@ -23,8 +23,8 @@
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.*;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
@@ -40,8 +40,8 @@
public void testDocBoost() throws Exception {
RAMDirectory store = new RAMDirectory();
- IndexWriter writer = new IndexWriter(store, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), store,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Fieldable f1 = new Field("field", "word", Field.Store.YES, Field.Index.ANALYZED);
Fieldable f2 = new Field("field", "word", Field.Store.YES, Field.Index.ANALYZED);
@@ -63,12 +63,13 @@
writer.addDocument(d2);
writer.addDocument(d3);
writer.addDocument(d4);
- writer.optimize();
+
+ IndexReader reader = writer.getReader();
writer.close();
final float[] scores = new float[4];
- new IndexSearcher(store, true).search
+ new IndexSearcher(reader).search
(new TermQuery(new Term("field", "word")),
new Collector() {
private int base = 0;
@@ -97,5 +98,8 @@
assertTrue(scores[i] > lastScore);
lastScore = scores[i];
}
+
+ reader.close();
+ store.close();
}
}
Index: lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (working copy)
@@ -23,8 +23,10 @@
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericField;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCaseJ4;
@@ -45,12 +47,15 @@
private static final int noDocs = 10000*_TestUtil.getRandomMultiplier();
private static RAMDirectory directory = null;
+ private static IndexReader reader = null;
private static IndexSearcher searcher = null;
@BeforeClass
public static void beforeClass() throws Exception {
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ Random random = newStaticRandom(TestNumericRangeQuery64.class);
+ RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
NumericField
field8 = new NumericField("field8", 8, Field.Store.YES, true),
@@ -86,15 +91,17 @@
writer.addDocument(doc);
}
- writer.optimize();
+ reader = writer.getReader();
+ searcher=new IndexSearcher(reader);
writer.close();
- searcher=new IndexSearcher(directory, true);
}
@AfterClass
public static void afterClass() throws Exception {
searcher.close();
searcher = null;
+ reader.close();
+ reader = null;
directory.close();
directory = null;
}
@@ -150,7 +157,7 @@
assertEquals("First doc"+type, 2*distance+startOffset, Long.parseLong(doc.get(field)) );
doc=searcher.doc(sd[sd.length-1].doc);
assertEquals("Last doc"+type, (1+count)*distance+startOffset, Long.parseLong(doc.get(field)) );
- if (i>0) {
+ if (i>0 && searcher.getIndexReader().getSequentialSubReaders().length == 1) {
assertEquals("Distinct term number is equal for all query types", lastTerms, terms);
}
lastTerms = terms;
@@ -391,7 +398,7 @@
termCountT += tq.getTotalNumberOfTerms();
termCountC += cq.getTotalNumberOfTerms();
}
- if (precisionStep == Integer.MAX_VALUE) {
+ if (precisionStep == Integer.MAX_VALUE && searcher.getIndexReader().getSequentialSubReaders().length == 1) {
assertEquals("Total number of terms should be equal for unlimited precStep", termCountT, termCountC);
} else if (VERBOSE) {
System.out.println("Average number of terms during random search on '" + field + "':");
Index: lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java (working copy)
@@ -19,8 +19,9 @@
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
@@ -37,16 +38,17 @@
String[] categories = new String[] {"/Computers",
"/Computers/Mac",
"/Computers/Windows"};
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
for (int i = 0; i < categories.length; i++) {
Document doc = new Document();
doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
- writer.close();
+ IndexReader reader = writer.getReader();
PrefixQuery query = new PrefixQuery(new Term("category", "/Computers"));
- IndexSearcher searcher = new IndexSearcher(directory, true);
+ IndexSearcher searcher = new IndexSearcher(reader);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("All documents in /Computers category and below", 3, hits.length);
@@ -58,5 +60,9 @@
assertFalse(query.getTermsEnum(searcher.getIndexReader()) instanceof PrefixTermsEnum);
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("everything", 3, hits.length);
+ writer.close();
+ searcher.close();
+ reader.close();
+ directory.close();
}
}
Index: lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (working copy)
@@ -18,10 +18,10 @@
*/
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.store.MockRAMDirectory;
@@ -36,236 +36,245 @@
/**
* This class tests the MultiPhraseQuery class.
- *
- *
+ *
+ *
*/
-public class TestMultiPhraseQuery extends LuceneTestCase
-{
- public TestMultiPhraseQuery(String name) {
- super(name);
- }
-
- public void testPhrasePrefix() throws IOException {
- MockRAMDirectory indexStore = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
- add("blueberry pie", writer);
- add("blueberry strudel", writer);
- add("blueberry pizza", writer);
- add("blueberry chewing gum", writer);
- add("bluebird pizza", writer);
- add("bluebird foobar pizza", writer);
- add("piccadilly circus", writer);
- writer.optimize();
- writer.close();
-
- IndexSearcher searcher = new IndexSearcher(indexStore, true);
-
- // search for "blueberry pi*":
- MultiPhraseQuery query1 = new MultiPhraseQuery();
- // search for "strawberry pi*":
- MultiPhraseQuery query2 = new MultiPhraseQuery();
- query1.add(new Term("body", "blueberry"));
- query2.add(new Term("body", "strawberry"));
-
- LinkedList termsWithPrefix = new LinkedList();
- IndexReader ir = IndexReader.open(indexStore, true);
-
- // this TermEnum gives "piccadilly", "pie" and "pizza".
- String prefix = "pi";
- TermsEnum te = MultiFields.getFields(ir).terms("body").iterator();
- te.seek(new BytesRef(prefix));
- do {
- String s = te.term().utf8ToString();
- if (s.startsWith(prefix)) {
- termsWithPrefix.add(new Term("body", s));
- } else {
- break;
- }
- } while (te.next() != null);
-
- query1.add(termsWithPrefix.toArray(new Term[0]));
- assertEquals("body:\"blueberry (piccadilly pie pizza)\"", query1.toString());
- query2.add(termsWithPrefix.toArray(new Term[0]));
- assertEquals("body:\"strawberry (piccadilly pie pizza)\"", query2.toString());
-
- ScoreDoc[] result;
- result = searcher.search(query1, null, 1000).scoreDocs;
- assertEquals(2, result.length);
- result = searcher.search(query2, null, 1000).scoreDocs;
- assertEquals(0, result.length);
-
- // search for "blue* pizza":
- MultiPhraseQuery query3 = new MultiPhraseQuery();
- termsWithPrefix.clear();
- prefix = "blue";
- te.seek(new BytesRef(prefix));
-
- do {
- if (te.term().utf8ToString().startsWith(prefix))
- {
- termsWithPrefix.add(new Term("body", te.term().utf8ToString()));
- }
- } while (te.next() != null);
- ir.close();
- query3.add(termsWithPrefix.toArray(new Term[0]));
- query3.add(new Term("body", "pizza"));
-
- result = searcher.search(query3, null, 1000).scoreDocs;
- assertEquals(2, result.length); // blueberry pizza, bluebird pizza
- assertEquals("body:\"(blueberry bluebird) pizza\"", query3.toString());
-
- // test slop:
- query3.setSlop(1);
- result = searcher.search(query3, null, 1000).scoreDocs;
-
- // just make sure no exc:
- searcher.explain(query3, 0);
-
- assertEquals(3, result.length); // blueberry pizza, bluebird pizza, bluebird foobar pizza
-
- MultiPhraseQuery query4 = new MultiPhraseQuery();
- try {
- query4.add(new Term("field1", "foo"));
- query4.add(new Term("field2", "foobar"));
- fail();
- } catch(IllegalArgumentException e) {
- // okay, all terms must belong to the same field
- }
-
- searcher.close();
- indexStore.close();
-
- }
+public class TestMultiPhraseQuery extends LuceneTestCase {
+ public TestMultiPhraseQuery(String name) {
+ super(name);
+ }
+
+ public void testPhrasePrefix() throws IOException {
+ MockRAMDirectory indexStore = new MockRAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), indexStore,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ add("blueberry pie", writer);
+ add("blueberry strudel", writer);
+ add("blueberry pizza", writer);
+ add("blueberry chewing gum", writer);
+ add("bluebird pizza", writer);
+ add("bluebird foobar pizza", writer);
+ add("piccadilly circus", writer);
- private void add(String s, IndexWriter writer) throws IOException {
- Document doc = new Document();
- doc.add(new Field("body", s, Field.Store.YES, Field.Index.ANALYZED));
- writer.addDocument(doc);
+ IndexReader reader = writer.getReader();
+ IndexSearcher searcher = new IndexSearcher(reader);
+
+ // search for "blueberry pi*":
+ MultiPhraseQuery query1 = new MultiPhraseQuery();
+ // search for "strawberry pi*":
+ MultiPhraseQuery query2 = new MultiPhraseQuery();
+ query1.add(new Term("body", "blueberry"));
+ query2.add(new Term("body", "strawberry"));
+
+ LinkedList termsWithPrefix = new LinkedList();
+
+ // this TermEnum gives "piccadilly", "pie" and "pizza".
+ String prefix = "pi";
+ TermsEnum te = MultiFields.getFields(reader).terms("body").iterator();
+ te.seek(new BytesRef(prefix));
+ do {
+ String s = te.term().utf8ToString();
+ if (s.startsWith(prefix)) {
+ termsWithPrefix.add(new Term("body", s));
+ } else {
+ break;
+ }
+ } while (te.next() != null);
+
+ query1.add(termsWithPrefix.toArray(new Term[0]));
+ assertEquals("body:\"blueberry (piccadilly pie pizza)\"", query1.toString());
+ query2.add(termsWithPrefix.toArray(new Term[0]));
+ assertEquals("body:\"strawberry (piccadilly pie pizza)\"", query2
+ .toString());
+
+ ScoreDoc[] result;
+ result = searcher.search(query1, null, 1000).scoreDocs;
+ assertEquals(2, result.length);
+ result = searcher.search(query2, null, 1000).scoreDocs;
+ assertEquals(0, result.length);
+
+ // search for "blue* pizza":
+ MultiPhraseQuery query3 = new MultiPhraseQuery();
+ termsWithPrefix.clear();
+ prefix = "blue";
+ te.seek(new BytesRef(prefix));
+
+ do {
+ if (te.term().utf8ToString().startsWith(prefix)) {
+ termsWithPrefix.add(new Term("body", te.term().utf8ToString()));
+ }
+ } while (te.next() != null);
+
+ query3.add(termsWithPrefix.toArray(new Term[0]));
+ query3.add(new Term("body", "pizza"));
+
+ result = searcher.search(query3, null, 1000).scoreDocs;
+ assertEquals(2, result.length); // blueberry pizza, bluebird pizza
+ assertEquals("body:\"(blueberry bluebird) pizza\"", query3.toString());
+
+ // test slop:
+ query3.setSlop(1);
+ result = searcher.search(query3, null, 1000).scoreDocs;
+
+ // just make sure no exc:
+ searcher.explain(query3, 0);
+
+ assertEquals(3, result.length); // blueberry pizza, bluebird pizza, bluebird
+ // foobar pizza
+
+ MultiPhraseQuery query4 = new MultiPhraseQuery();
+ try {
+ query4.add(new Term("field1", "foo"));
+ query4.add(new Term("field2", "foobar"));
+ fail();
+ } catch (IllegalArgumentException e) {
+ // okay, all terms must belong to the same field
}
- public void testBooleanQueryContainingSingleTermPrefixQuery() throws IOException {
- // this tests against bug 33161 (now fixed)
- // In order to cause the bug, the outer query must have more than one term
- // and all terms required.
- // The contained PhraseMultiQuery must contain exactly one term array.
-
- MockRAMDirectory indexStore = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
- add("blueberry pie", writer);
- add("blueberry chewing gum", writer);
- add("blue raspberry pie", writer);
- writer.optimize();
- writer.close();
-
- IndexSearcher searcher = new IndexSearcher(indexStore, true);
- // This query will be equivalent to +body:pie +body:"blue*"
- BooleanQuery q = new BooleanQuery();
- q.add(new TermQuery(new Term("body", "pie")), BooleanClause.Occur.MUST);
-
- MultiPhraseQuery trouble = new MultiPhraseQuery();
- trouble.add(new Term[] {
- new Term("body", "blueberry"),
- new Term("body", "blue")
- });
- q.add(trouble, BooleanClause.Occur.MUST);
-
- // exception will be thrown here without fix
- ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
-
- assertEquals("Wrong number of hits", 2, hits.length);
-
- // just make sure no exc:
- searcher.explain(q, 0);
-
- searcher.close();
- indexStore.close();
+ writer.close();
+ searcher.close();
+ reader.close();
+ indexStore.close();
+
}
+
+ private void add(String s, RandomIndexWriter writer) throws IOException {
+ Document doc = new Document();
+ doc.add(new Field("body", s, Field.Store.YES, Field.Index.ANALYZED));
+ writer.addDocument(doc);
+ }
+
+ public void testBooleanQueryContainingSingleTermPrefixQuery()
+ throws IOException {
+ // this tests against bug 33161 (now fixed)
+ // In order to cause the bug, the outer query must have more than one term
+ // and all terms required.
+ // The contained PhraseMultiQuery must contain exactly one term array.
+ MockRAMDirectory indexStore = new MockRAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), indexStore,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ add("blueberry pie", writer);
+ add("blueberry chewing gum", writer);
+ add("blue raspberry pie", writer);
+
+ IndexReader reader = writer.getReader();
+ IndexSearcher searcher = new IndexSearcher(reader);
+ // This query will be equivalent to +body:pie +body:"blue*"
+ BooleanQuery q = new BooleanQuery();
+ q.add(new TermQuery(new Term("body", "pie")), BooleanClause.Occur.MUST);
+
+ MultiPhraseQuery trouble = new MultiPhraseQuery();
+ trouble.add(new Term[] {new Term("body", "blueberry"),
+ new Term("body", "blue")});
+ q.add(trouble, BooleanClause.Occur.MUST);
+
+ // exception will be thrown here without fix
+ ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
+
+ assertEquals("Wrong number of hits", 2, hits.length);
+
+ // just make sure no exc:
+ searcher.explain(q, 0);
+
+ writer.close();
+ searcher.close();
+ reader.close();
+ indexStore.close();
+ }
+
public void testPhrasePrefixWithBooleanQuery() throws IOException {
MockRAMDirectory indexStore = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), indexStore,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
add("This is a test", "object", writer);
add("a note", "note", writer);
- writer.close();
- IndexSearcher searcher = new IndexSearcher(indexStore, true);
-
+ IndexReader reader = writer.getReader();
+ IndexSearcher searcher = new IndexSearcher(reader);
+
// This query will be equivalent to +type:note +body:"a t*"
BooleanQuery q = new BooleanQuery();
q.add(new TermQuery(new Term("type", "note")), BooleanClause.Occur.MUST);
-
+
MultiPhraseQuery trouble = new MultiPhraseQuery();
trouble.add(new Term("body", "a"));
- trouble.add(new Term[] { new Term("body", "test"), new Term("body", "this") });
+ trouble
+ .add(new Term[] {new Term("body", "test"), new Term("body", "this")});
q.add(trouble, BooleanClause.Occur.MUST);
-
+
// exception will be thrown here without fix for #35626:
ScoreDoc[] hits = searcher.search(q, null, 1000).scoreDocs;
assertEquals("Wrong number of hits", 0, hits.length);
+ writer.close();
searcher.close();
+ reader.close();
indexStore.close();
}
-
+
public void testNoDocs() throws Exception {
MockRAMDirectory indexStore = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new MockAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), indexStore,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
add("a note", "note", writer);
- writer.close();
-
- IndexSearcher searcher = new IndexSearcher(indexStore, true);
-
+
+ IndexReader reader = writer.getReader();
+ IndexSearcher searcher = new IndexSearcher(reader);
+
MultiPhraseQuery q = new MultiPhraseQuery();
q.add(new Term("body", "a"));
- q.add(new Term[] { new Term("body", "nope"), new Term("body", "nope") });
- assertEquals("Wrong number of hits", 0, searcher.search(q, null, 1).totalHits);
-
+ q.add(new Term[] {new Term("body", "nope"), new Term("body", "nope")});
+ assertEquals("Wrong number of hits", 0,
+ searcher.search(q, null, 1).totalHits);
+
// just make sure no exc:
searcher.explain(q, 0);
-
+
+ writer.close();
searcher.close();
+ reader.close();
indexStore.close();
}
- public void testHashCodeAndEquals(){
+ public void testHashCodeAndEquals() {
MultiPhraseQuery query1 = new MultiPhraseQuery();
MultiPhraseQuery query2 = new MultiPhraseQuery();
assertEquals(query1.hashCode(), query2.hashCode());
- assertEquals(query1,query2);
+ assertEquals(query1, query2);
- Term term1= new Term("someField","someText");
+ Term term1 = new Term("someField", "someText");
query1.add(term1);
query2.add(term1);
assertEquals(query1.hashCode(), query2.hashCode());
- assertEquals(query1,query2);
+ assertEquals(query1, query2);
- Term term2= new Term("someField","someMoreText");
+ Term term2 = new Term("someField", "someMoreText");
query1.add(term2);
- assertFalse(query1.hashCode()==query2.hashCode());
+ assertFalse(query1.hashCode() == query2.hashCode());
assertFalse(query1.equals(query2));
query2.add(term2);
assertEquals(query1.hashCode(), query2.hashCode());
- assertEquals(query1,query2);
+ assertEquals(query1, query2);
}
-
- private void add(String s, String type, IndexWriter writer) throws IOException {
+ private void add(String s, String type, RandomIndexWriter writer)
+ throws IOException {
Document doc = new Document();
doc.add(new Field("body", s, Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("type", type, Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
-
+
// LUCENE-2526
public void testEmptyToString() {
new MultiPhraseQuery().toString();
}
-
+
}
Index: lucene/src/test/org/apache/lucene/search/TestBooleanPrefixQuery.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestBooleanPrefixQuery.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestBooleanPrefixQuery.java (working copy)
@@ -22,8 +22,8 @@
import junit.framework.TestSuite;
import junit.textui.TestRunner;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.analysis.MockAnalyzer;
@@ -33,8 +33,6 @@
import org.apache.lucene.search.Query;
import org.apache.lucene.search.BooleanQuery;
-import java.io.IOException;
-
/**
*
**/
@@ -79,29 +77,27 @@
Query rw1 = null;
Query rw2 = null;
IndexReader reader = null;
- try {
- IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), directory, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer()));
- for (int i = 0; i < categories.length; i++) {
- Document doc = new Document();
- doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
- writer.addDocument(doc);
- }
- writer.close();
+ for (int i = 0; i < categories.length; i++) {
+ Document doc = new Document();
+ doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
+ writer.addDocument(doc);
+ }
+ reader = writer.getReader();
+ writer.close();
- reader = IndexReader.open(directory, true);
- PrefixQuery query = new PrefixQuery(new Term("category", "foo"));
- rw1 = query.rewrite(reader);
+ PrefixQuery query = new PrefixQuery(new Term("category", "foo"));
+ rw1 = query.rewrite(reader);
- BooleanQuery bq = new BooleanQuery();
- bq.add(query, BooleanClause.Occur.MUST);
+ BooleanQuery bq = new BooleanQuery();
+ bq.add(query, BooleanClause.Occur.MUST);
- rw2 = bq.rewrite(reader);
- } catch (IOException e) {
- fail(e.getMessage());
- }
+ rw2 = bq.rewrite(reader);
assertEquals("Number of Clauses Mismatch", getCount(reader, rw1), getCount(reader, rw2));
+ reader.close();
+ directory.close();
}
}
Index: lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java (working copy)
@@ -20,9 +20,9 @@
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.store.RAMDirectory;
@@ -41,6 +41,7 @@
public class TestFilteredQuery extends LuceneTestCase {
private IndexSearcher searcher;
+ private IndexReader reader;
private RAMDirectory directory;
private Query query;
private Filter filter;
@@ -49,7 +50,8 @@
protected void setUp() throws Exception {
super.setUp();
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter (directory, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter (newRandom(), directory,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
Document doc = new Document();
doc.add (new Field("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED));
@@ -71,10 +73,10 @@
doc.add (new Field("sorter", "c", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument (doc);
- writer.optimize ();
+ reader = writer.getReader();
writer.close ();
- searcher = new IndexSearcher (directory, true);
+ searcher = new IndexSearcher (reader);
query = new TermQuery (new Term ("field", "three"));
filter = newStaticFilterB();
}
@@ -95,6 +97,7 @@
@Override
protected void tearDown() throws Exception {
searcher.close();
+ reader.close();
directory.close();
super.tearDown();
}
Index: lucene/src/test/org/apache/lucene/index/RandomIndexWriter.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/RandomIndexWriter.java (revision 0)
+++ lucene/src/test/org/apache/lucene/index/RandomIndexWriter.java (revision 0)
@@ -0,0 +1,129 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Random;
+import java.io.Closeable;
+import java.io.IOException;
+
+import org.apache.lucene.util._TestUtil;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.codecs.Codec;
+import org.apache.lucene.index.codecs.CodecProvider;
+import org.apache.lucene.index.codecs.intblock.IntBlockCodec;
+import org.apache.lucene.index.codecs.preflex.PreFlexCodec;
+import org.apache.lucene.index.codecs.pulsing.PulsingCodec;
+import org.apache.lucene.index.codecs.sep.SepCodec;
+import org.apache.lucene.index.codecs.standard.StandardCodec;
+
+/** Silly class that randomizes the indexing experience. EG
+ * it may swap in a different merge policy/scheduler; may
+ * commit periodically; may or may not optimize in the end,
+ * may flush by doc count instead of RAM, etc.
+ */
+
+public class RandomIndexWriter implements Closeable {
+
+ public IndexWriter w;
+ private final Random r;
+ int docCount;
+ int flushAt;
+
+ public RandomIndexWriter(Random r, Directory dir, IndexWriterConfig c) throws IOException {
+ this.r = r;
+ if (r.nextBoolean()) {
+ c.setMergePolicy(new LogDocMergePolicy());
+ }
+ if (r.nextBoolean()) {
+ c.setMergeScheduler(new SerialMergeScheduler());
+ }
+ if (r.nextBoolean()) {
+ c.setMaxBufferedDocs(_TestUtil.nextInt(r, 20, 1000));
+ }
+ if (r.nextBoolean()) {
+ c.setTermIndexInterval(_TestUtil.nextInt(r, 1, 1000));
+ }
+
+ if (c.getMergePolicy() instanceof LogMergePolicy) {
+ LogMergePolicy logmp = (LogMergePolicy) c.getMergePolicy();
+ logmp.setUseCompoundDocStore(r.nextBoolean());
+ logmp.setUseCompoundFile(r.nextBoolean());
+ logmp.setCalibrateSizeByDeletes(r.nextBoolean());
+ }
+
+ c.setReaderPooling(r.nextBoolean());
+ c.setCodecProvider(new RandomCodecProvider(r));
+ w = new IndexWriter(dir, c);
+ flushAt = _TestUtil.nextInt(r, 10, 1000);
+ }
+
+ public void addDocument(Document doc) throws IOException {
+ w.addDocument(doc);
+ if (docCount++ == flushAt) {
+ w.commit();
+ flushAt += _TestUtil.nextInt(r, 10, 1000);
+ }
+ }
+
+ public void addIndexes(Directory... dirs) throws CorruptIndexException, IOException {
+ w.addIndexes(dirs);
+ }
+
+ public void deleteDocuments(Term term) throws CorruptIndexException, IOException {
+ w.deleteDocuments(term);
+ }
+
+ public int maxDoc() {
+ return w.maxDoc();
+ }
+
+ public IndexReader getReader() throws IOException {
+ if (r.nextBoolean()) {
+ return w.getReader();
+ } else {
+ w.commit();
+ return IndexReader.open(w.getDirectory(), new KeepOnlyLastCommitDeletionPolicy(), r.nextBoolean(), _TestUtil.nextInt(r, 1, 10));
+ }
+ }
+
+ public void close() throws IOException {
+ if (r.nextInt(4) == 2) {
+ w.optimize();
+ }
+ w.close();
+ }
+
+ class RandomCodecProvider extends CodecProvider {
+ final String codec;
+
+ RandomCodecProvider(Random random) {
+ register(new StandardCodec());
+ register(new IntBlockCodec());
+ register(new PreFlexCodec());
+ register(new PulsingCodec());
+ register(new SepCodec());
+ codec = CodecProvider.CORE_CODECS[random.nextInt(CodecProvider.CORE_CODECS.length)];
+ }
+
+ @Override
+ public Codec getWriter(SegmentWriteState state) {
+ return lookup(codec);
+ }
+ }
+}
Property changes on: lucene\src\test\org\apache\lucene\index\RandomIndexWriter.java
___________________________________________________________________
Added: svn:eol-style
+ native
Index: lucene/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java
===================================================================
--- lucene/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/util/LuceneTestCaseJ4.java (working copy)
@@ -33,6 +33,7 @@
import java.io.PrintStream;
import java.io.IOException;
import java.util.Arrays;
+import java.util.Hashtable;
import java.util.Iterator;
import java.util.Random;
import java.util.ArrayList;
@@ -330,6 +331,17 @@
return new Random(seed);
}
+ private static Hashtable,Long> staticSeeds = new Hashtable,Long>();
+
+ public static Random newStaticRandom(Class> clazz) {
+ return newStaticRandom(clazz, seedRnd.nextLong());
+ }
+
+ public static Random newStaticRandom(Class> clazz, long seed) {
+ staticSeeds.put(clazz, Long.valueOf(seed));
+ return new Random(seed);
+ }
+
public String getName() {
return this.name;
}
@@ -348,6 +360,11 @@
// We get here from InterceptTestCaseEvents on the 'failed' event....
public void reportAdditionalFailureInfo() {
+ Long staticSeed = staticSeeds.get(getClass());
+ if (staticSeed != null) {
+ System.out.println("NOTE: random static seed of testclass '" + getName() + "' was: " + staticSeed);
+ }
+
if (seed != null) {
System.out.println("NOTE: random seed of testcase '" + getName() + "' was: " + seed);
}
Index: lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java
===================================================================
--- lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java (working copy)
@@ -4,8 +4,8 @@
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.store.MockRAMDirectory;
/**
@@ -58,13 +58,12 @@
/** add the doc to a ram index */
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.addDocument(doc);
- writer.close();
/** open a reader and fetch the document */
- IndexReader reader = IndexReader.open(dir, false);
+ IndexReader reader = writer.getReader();
Document docFromReader = reader.document(0);
assertTrue(docFromReader != null);
@@ -76,6 +75,10 @@
String stringFldStoredTest = docFromReader.get("stringStored");
assertTrue(stringFldStoredTest.equals(binaryValStored));
+ writer.close();
+ reader.close();
+
+ reader = IndexReader.open(dir, false);
/** delete the document from index */
reader.deleteDocument(0);
assertEquals(0, reader.numDocs());
@@ -95,13 +98,12 @@
/** add the doc to a ram index */
MockRAMDirectory dir = new MockRAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
writer.addDocument(doc);
- writer.close();
/** open a reader and fetch the document */
- IndexReader reader = IndexReader.open(dir, false);
+ IndexReader reader = writer.getReader();
Document docFromReader = reader.document(0);
assertTrue(docFromReader != null);
@@ -110,6 +112,7 @@
assertTrue(binaryFldCompressedTest.equals(binaryValCompressed));
assertTrue(CompressionTools.decompressString(docFromReader.getBinaryValue("stringCompressed")).equals(binaryValCompressed));
+ writer.close();
reader.close();
dir.close();
}
Index: lucene/src/test/org/apache/lucene/document/TestDocument.java
===================================================================
--- lucene/src/test/org/apache/lucene/document/TestDocument.java (revision 963263)
+++ lucene/src/test/org/apache/lucene/document/TestDocument.java (working copy)
@@ -1,8 +1,9 @@
package org.apache.lucene.document;
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
@@ -32,17 +33,15 @@
/**
* Tests {@link Document} class.
*/
-public class TestDocument extends LuceneTestCase
-{
-
+public class TestDocument extends LuceneTestCase {
+
String binaryVal = "this text will be stored as a byte array in the index";
String binaryVal2 = "this text will be also stored as a byte array in the index";
- public void testBinaryField()
- throws Exception
- {
+ public void testBinaryField() throws Exception {
Document doc = new Document();
- Fieldable stringFld = new Field("string", binaryVal, Field.Store.YES, Field.Index.NO);
+ Fieldable stringFld = new Field("string", binaryVal, Field.Store.YES,
+ Field.Index.NO);
Fieldable binaryFld = new Field("binary", binaryVal.getBytes());
Fieldable binaryFld2 = new Field("binary", binaryVal2.getBytes());
@@ -67,7 +66,7 @@
assertEquals(3, doc.fields.size());
byte[][] binaryTests = doc.getBinaryValues("binary");
-
+
assertEquals(2, binaryTests.length);
binaryTest = new String(binaryTests[0]);
@@ -88,17 +87,17 @@
/**
* Tests {@link Document#removeField(String)} method for a brand new Document
* that has not been indexed yet.
- *
+ *
* @throws Exception on error
*/
- public void testRemoveForNewDocument() throws Exception
- {
+ public void testRemoveForNewDocument() throws Exception {
Document doc = makeDocumentWithFields();
assertEquals(8, doc.fields.size());
doc.removeFields("keyword");
assertEquals(6, doc.fields.size());
- doc.removeFields("doesnotexists"); // removing non-existing fields is siltenlty ignored
- doc.removeFields("keyword"); // removing a field more than once
+ doc.removeFields("doesnotexists"); // removing non-existing fields is
+ // siltenlty ignored
+ doc.removeFields("keyword"); // removing a field more than once
assertEquals(6, doc.fields.size());
doc.removeField("text");
assertEquals(5, doc.fields.size());
@@ -106,164 +105,171 @@
assertEquals(4, doc.fields.size());
doc.removeField("text");
assertEquals(4, doc.fields.size());
- doc.removeField("doesnotexists"); // removing non-existing fields is siltenlty ignored
+ doc.removeField("doesnotexists"); // removing non-existing fields is
+ // siltenlty ignored
assertEquals(4, doc.fields.size());
doc.removeFields("unindexed");
assertEquals(2, doc.fields.size());
doc.removeFields("unstored");
assertEquals(0, doc.fields.size());
- doc.removeFields("doesnotexists"); // removing non-existing fields is siltenlty ignored
+ doc.removeFields("doesnotexists"); // removing non-existing fields is
+ // siltenlty ignored
assertEquals(0, doc.fields.size());
}
-
- public void testConstructorExceptions()
- {
- new Field("name", "value", Field.Store.YES, Field.Index.NO); // okay
- new Field("name", "value", Field.Store.NO, Field.Index.NOT_ANALYZED); // okay
+
+ public void testConstructorExceptions() {
+ new Field("name", "value", Field.Store.YES, Field.Index.NO); // okay
+ new Field("name", "value", Field.Store.NO, Field.Index.NOT_ANALYZED); // okay
try {
new Field("name", "value", Field.Store.NO, Field.Index.NO);
fail();
- } catch(IllegalArgumentException e) {
+ } catch (IllegalArgumentException e) {
// expected exception
}
- new Field("name", "value", Field.Store.YES, Field.Index.NO, Field.TermVector.NO); // okay
+ new Field("name", "value", Field.Store.YES, Field.Index.NO,
+ Field.TermVector.NO); // okay
try {
- new Field("name", "value", Field.Store.YES, Field.Index.NO, Field.TermVector.YES);
+ new Field("name", "value", Field.Store.YES, Field.Index.NO,
+ Field.TermVector.YES);
fail();
- } catch(IllegalArgumentException e) {
+ } catch (IllegalArgumentException e) {
// expected exception
}
}
- /**
- * Tests {@link Document#getValues(String)} method for a brand new Document
- * that has not been indexed yet.
- *
- * @throws Exception on error
- */
- public void testGetValuesForNewDocument() throws Exception
- {
- doAssert(makeDocumentWithFields(), false);
+ /**
+ * Tests {@link Document#getValues(String)} method for a brand new Document
+ * that has not been indexed yet.
+ *
+ * @throws Exception on error
+ */
+ public void testGetValuesForNewDocument() throws Exception {
+ doAssert(makeDocumentWithFields(), false);
+ }
+
+ /**
+ * Tests {@link Document#getValues(String)} method for a Document retrieved
+ * from an index.
+ *
+ * @throws Exception on error
+ */
+ public void testGetValuesForIndexedDocument() throws Exception {
+ RAMDirectory dir = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ writer.addDocument(makeDocumentWithFields());
+ IndexReader reader = writer.getReader();
+
+ Searcher searcher = new IndexSearcher(reader);
+
+ // search for something that does exists
+ Query query = new TermQuery(new Term("keyword", "test1"));
+
+ // ensure that queries return expected results without DateFilter first
+ ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+ assertEquals(1, hits.length);
+
+ doAssert(searcher.doc(hits[0].doc), true);
+ writer.close();
+ searcher.close();
+ reader.close();
+ dir.close();
+ }
+
+ private Document makeDocumentWithFields() {
+ Document doc = new Document();
+ doc.add(new Field("keyword", "test1", Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+ doc.add(new Field("keyword", "test2", Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+ doc.add(new Field("text", "test1", Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("text", "test2", Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("unindexed", "test1", Field.Store.YES, Field.Index.NO));
+ doc.add(new Field("unindexed", "test2", Field.Store.YES, Field.Index.NO));
+ doc
+ .add(new Field("unstored", "test1", Field.Store.NO,
+ Field.Index.ANALYZED));
+ doc
+ .add(new Field("unstored", "test2", Field.Store.NO,
+ Field.Index.ANALYZED));
+ return doc;
+ }
+
+ private void doAssert(Document doc, boolean fromIndex) {
+ String[] keywordFieldValues = doc.getValues("keyword");
+ String[] textFieldValues = doc.getValues("text");
+ String[] unindexedFieldValues = doc.getValues("unindexed");
+ String[] unstoredFieldValues = doc.getValues("unstored");
+
+ assertTrue(keywordFieldValues.length == 2);
+ assertTrue(textFieldValues.length == 2);
+ assertTrue(unindexedFieldValues.length == 2);
+ // this test cannot work for documents retrieved from the index
+ // since unstored fields will obviously not be returned
+ if (!fromIndex) {
+ assertTrue(unstoredFieldValues.length == 2);
}
-
- /**
- * Tests {@link Document#getValues(String)} method for a Document retrieved from
- * an index.
- *
- * @throws Exception on error
- */
- public void testGetValuesForIndexedDocument() throws Exception {
- RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
- writer.addDocument(makeDocumentWithFields());
- writer.close();
-
- Searcher searcher = new IndexSearcher(dir, true);
-
- // search for something that does exists
- Query query = new TermQuery(new Term("keyword", "test1"));
-
- // ensure that queries return expected results without DateFilter first
- ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
- assertEquals(1, hits.length);
-
- doAssert(searcher.doc(hits[0].doc), true);
- searcher.close();
+
+ assertTrue(keywordFieldValues[0].equals("test1"));
+ assertTrue(keywordFieldValues[1].equals("test2"));
+ assertTrue(textFieldValues[0].equals("test1"));
+ assertTrue(textFieldValues[1].equals("test2"));
+ assertTrue(unindexedFieldValues[0].equals("test1"));
+ assertTrue(unindexedFieldValues[1].equals("test2"));
+ // this test cannot work for documents retrieved from the index
+ // since unstored fields will obviously not be returned
+ if (!fromIndex) {
+ assertTrue(unstoredFieldValues[0].equals("test1"));
+ assertTrue(unstoredFieldValues[1].equals("test2"));
}
-
- private Document makeDocumentWithFields()
- {
- Document doc = new Document();
- doc.add(new Field( "keyword", "test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new Field( "keyword", "test2", Field.Store.YES, Field.Index.NOT_ANALYZED));
- doc.add(new Field( "text", "test1", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field( "text", "test2", Field.Store.YES, Field.Index.ANALYZED));
- doc.add(new Field("unindexed", "test1", Field.Store.YES, Field.Index.NO));
- doc.add(new Field("unindexed", "test2", Field.Store.YES, Field.Index.NO));
- doc.add(new Field( "unstored", "test1", Field.Store.NO, Field.Index.ANALYZED));
- doc.add(new Field( "unstored", "test2", Field.Store.NO, Field.Index.ANALYZED));
- return doc;
+ }
+
+ public void testFieldSetValue() throws Exception {
+
+ Field field = new Field("id", "id1", Field.Store.YES,
+ Field.Index.NOT_ANALYZED);
+ Document doc = new Document();
+ doc.add(field);
+ doc.add(new Field("keyword", "test", Field.Store.YES,
+ Field.Index.NOT_ANALYZED));
+
+ RAMDirectory dir = new RAMDirectory();
+ RandomIndexWriter writer = new RandomIndexWriter(newRandom(), dir,
+ new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
+ writer.addDocument(doc);
+ field.setValue("id2");
+ writer.addDocument(doc);
+ field.setValue("id3");
+ writer.addDocument(doc);
+
+ IndexReader reader = writer.getReader();
+ Searcher searcher = new IndexSearcher(reader);
+
+ Query query = new TermQuery(new Term("keyword", "test"));
+
+ // ensure that queries return expected results without DateFilter first
+ ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+ assertEquals(3, hits.length);
+ int result = 0;
+ for (int i = 0; i < 3; i++) {
+ Document doc2 = searcher.doc(hits[i].doc);
+ Field f = doc2.getField("id");
+ if (f.stringValue().equals("id1")) result |= 1;
+ else if (f.stringValue().equals("id2")) result |= 2;
+ else if (f.stringValue().equals("id3")) result |= 4;
+ else fail("unexpected id field");
}
-
- private void doAssert(Document doc, boolean fromIndex)
- {
- String[] keywordFieldValues = doc.getValues("keyword");
- String[] textFieldValues = doc.getValues("text");
- String[] unindexedFieldValues = doc.getValues("unindexed");
- String[] unstoredFieldValues = doc.getValues("unstored");
-
- assertTrue(keywordFieldValues.length == 2);
- assertTrue(textFieldValues.length == 2);
- assertTrue(unindexedFieldValues.length == 2);
- // this test cannot work for documents retrieved from the index
- // since unstored fields will obviously not be returned
- if (! fromIndex)
- {
- assertTrue(unstoredFieldValues.length == 2);
- }
-
- assertTrue(keywordFieldValues[0].equals("test1"));
- assertTrue(keywordFieldValues[1].equals("test2"));
- assertTrue(textFieldValues[0].equals("test1"));
- assertTrue(textFieldValues[1].equals("test2"));
- assertTrue(unindexedFieldValues[0].equals("test1"));
- assertTrue(unindexedFieldValues[1].equals("test2"));
- // this test cannot work for documents retrieved from the index
- // since unstored fields will obviously not be returned
- if (! fromIndex)
- {
- assertTrue(unstoredFieldValues[0].equals("test1"));
- assertTrue(unstoredFieldValues[1].equals("test2"));
- }
- }
-
- public void testFieldSetValue() throws Exception {
-
- Field field = new Field("id", "id1", Field.Store.YES, Field.Index.NOT_ANALYZED);
- Document doc = new Document();
- doc.add(field);
- doc.add(new Field("keyword", "test", Field.Store.YES, Field.Index.NOT_ANALYZED));
-
- RAMDirectory dir = new RAMDirectory();
- IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
- TEST_VERSION_CURRENT, new MockAnalyzer()));
- writer.addDocument(doc);
- field.setValue("id2");
- writer.addDocument(doc);
- field.setValue("id3");
- writer.addDocument(doc);
- writer.close();
-
- Searcher searcher = new IndexSearcher(dir, true);
-
- Query query = new TermQuery(new Term("keyword", "test"));
-
- // ensure that queries return expected results without DateFilter first
- ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
- assertEquals(3, hits.length);
- int result = 0;
- for(int i=0;i<3;i++) {
- Document doc2 = searcher.doc(hits[i].doc);
- Field f = doc2.getField("id");
- if (f.stringValue().equals("id1"))
- result |= 1;
- else if (f.stringValue().equals("id2"))
- result |= 2;
- else if (f.stringValue().equals("id3"))
- result |= 4;
- else
- fail("unexpected id field");
- }
- searcher.close();
- dir.close();
- assertEquals("did not see all IDs", 7, result);
- }
-
+ writer.close();
+ searcher.close();
+ reader.close();
+ dir.close();
+ assertEquals("did not see all IDs", 7, result);
+ }
+
public void testFieldSetValueChangeBinary() {
Field field1 = new Field("field1", new byte[0]);
- Field field2 = new Field("field2", "",
- Field.Store.YES, Field.Index.ANALYZED);
+ Field field2 = new Field("field2", "", Field.Store.YES,
+ Field.Index.ANALYZED);
try {
field1.setValue("abc");
fail("did not hit expected exception");
Index: lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsWriterImpl.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsWriterImpl.java (revision 963263)
+++ lucene/src/java/org/apache/lucene/index/codecs/pulsing/PulsingPostingsWriterImpl.java (working copy)
@@ -229,7 +229,7 @@
@Override
public void finishDoc() {
- assert currentDoc.numPositions == currentDoc.termDocFreq;
+ assert omitTF || currentDoc.numPositions == currentDoc.termDocFreq;
}
boolean pendingIsIndexTerm;