diff --git build.xml build.xml index 8d9326d..127ae7b 100644 --- build.xml +++ build.xml @@ -18,6 +18,12 @@ --> + + + + + + diff --git lucene/build.xml lucene/build.xml index 6e5591a..2bdacbb 100644 --- lucene/build.xml +++ lucene/build.xml @@ -64,9 +64,6 @@ - - - @@ -88,10 +85,22 @@ - + + + Warning: Ignoring your multiplier and nightly settings for backwards tests. + These tests are for API compatibility only! + + - + + + @@ -103,13 +112,6 @@ - - - Warning: Ignoring your multiplier and nightly settings for backwards tests. - These tests are for API compatibility only! - - - @@ -118,30 +120,17 @@ Note: we disable multiplier/nightly because the purpose is to find API breaks --> - + - - - - - - - - - - - - - - - - - - - - - + + @@ -87,7 +81,7 @@ - + @@ -171,12 +165,6 @@ - - - - @@ -562,151 +550,277 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Tests failed! + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - + + +# +# Test case filtering: +# - 'tests.filter' is a class-filtering shell-like glob pattern. +# - 'tests.filter.method' is a method-filtering shell-like glob pattern. +# - - - +# Run a single test case (verbose) +ant test -Dtests.filter=org.apache.lucene.package.ClassName - - - +# Run a single test case (simpler) +ant test -Dtests.filter=*.ClassName - - - - - - - - +# Run all tests in a package and sub-packages +ant test -Dtests.filter=org.apache.lucene.package.* + +# Run all test methods that contain 'esi' substring (...r*esi*ze...). +ant test -Dtests.filter.method=*esi* + +# +# Load balancing and caches. +# + +# Run sequentially (one slave JVM). +ant -Dtests.threadspercpu=1 test +# Run with a constant suite order on slave JVM (no job stealing). +ant -Djunit4.dynamicAssignmentRatio=0 test + +# Update global (versioned) cache (from the top-level). +ant clean test +ant -f lucene/build.xml test-updatecache + +# +# Miscellaneous +# + +# Run all tests without stopping on errors (can be diagnosed from logs). +# from the top level: +ant -Djunit4.haltonfailure=false test + + + + + + + - + See http://issues.apache.org/jira/browse/LUCENE-721 + --> diff --git lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java index e4cdc32..049fefe 100644 --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java @@ -56,7 +56,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final String TEXT = "the fox jumped"; final Directory directory = newDirectory(); final IndexWriter indexWriter = new IndexWriter(directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); try { final Document document = new Document(); FieldType customType = new FieldType(TextField.TYPE_UNSTORED); @@ -98,7 +98,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final String TEXT = "the fox jumped"; final Directory directory = newDirectory(); final IndexWriter indexWriter = new IndexWriter(directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); try { final Document document = new Document(); @@ -169,7 +169,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final String TEXT = "the fox did not jump"; final Directory directory = newDirectory(); final IndexWriter indexWriter = new IndexWriter(directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); try { final Document document = new Document(); @@ -212,7 +212,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final String TEXT = "the fox did not jump"; final Directory directory = newDirectory(); final IndexWriter indexWriter = new IndexWriter(directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); try { final Document document = new Document(); @@ -252,7 +252,7 @@ public class HighlighterPhraseTest extends LuceneTestCase { final String TEXT = "the fox did not jump"; final Directory directory = newDirectory(); final IndexWriter indexWriter = new IndexWriter(directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); try { final Document document = new Document(); FieldType customType = new FieldType(TextField.TYPE_UNSTORED); diff --git lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java index 00818d5..d32fcf9 100644 --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java @@ -69,7 +69,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte Directory ramDir; public IndexSearcher searcher = null; int numHighlights = 0; - final Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); + Analyzer analyzer; TopDocs hits; String[] texts = { @@ -78,9 +78,9 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte "JFK has been shot", "John Kennedy has been shot", "This text has a typo in referring to Keneddy", "wordx wordy wordz wordx wordy wordx worda wordb wordy wordc", "y z x y z a b", "lets is a the lets is a the lets is a the lets" }; - + public void testQueryScorerHits() throws Exception { - Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + Analyzer analyzer = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true); PhraseQuery phraseQuery = new PhraseQuery(); phraseQuery.add(new Term(FIELD_NAME, "very")); @@ -152,9 +152,9 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte * This method intended for use with testHighlightingWithDefaultField() * @throws InvalidTokenOffsetsException */ - private static String highlightField(Query query, String fieldName, String text) + private String highlightField(Query query, String fieldName, String text) throws IOException, InvalidTokenOffsetsException { - TokenStream tokenStream = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true) + TokenStream tokenStream = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true) .tokenStream(fieldName, new StringReader(text)); // Assuming "", "" used to highlight SimpleHTMLFormatter formatter = new SimpleHTMLFormatter(); @@ -233,7 +233,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte Highlighter h = new Highlighter(this, scorer); - Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer analyzer = new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false); h.getBestFragment(analyzer, f1, content); @@ -1166,7 +1166,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte } public void testMaxSizeHighlight() throws Exception { - final MockAnalyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); + final MockAnalyzer analyzer = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); // we disable MockTokenizer checks because we will forcefully limit the // tokenstream and call end() before incrementToken() returns false. analyzer.setEnableChecks(false); @@ -1201,7 +1201,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte CharacterRunAutomaton stopWords = new CharacterRunAutomaton(BasicAutomata.makeString("stoppedtoken")); // we disable MockTokenizer checks because we will forcefully limit the // tokenstream and call end() before incrementToken() returns false. - final MockAnalyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true); + final MockAnalyzer analyzer = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, stopWords, true); analyzer.setEnableChecks(false); TermQuery query = new TermQuery(new Term("data", goodWord)); @@ -1249,12 +1249,12 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte String text = "this is a text with searchterm in it"; SimpleHTMLFormatter fm = new SimpleHTMLFormatter(); - TokenStream tokenStream = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true) + TokenStream tokenStream = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, stopWords, true) .tokenStream("text", new StringReader(text)); Highlighter hg = getHighlighter(query, "text", fm); hg.setTextFragmenter(new NullFragmenter()); hg.setMaxDocCharsToAnalyze(36); - String match = hg.getBestFragment(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true), "text", text); + String match = hg.getBestFragment(new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, stopWords, true), "text", text); assertTrue( "Matched text should contain remainder of text after highlighted query ", match.endsWith("in it")); @@ -1271,7 +1271,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte numHighlights = 0; // test to show how rewritten query can still be used searcher = new IndexSearcher(reader); - Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); + Analyzer analyzer = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); BooleanQuery query = new BooleanQuery(); query.add(new WildcardQuery(new Term(FIELD_NAME, "jf?")), Occur.SHOULD); @@ -1613,7 +1613,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte } private Directory dir; - private Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + private Analyzer a; public void testWeightedTermsWithDeletes() throws IOException, InvalidTokenOffsetsException { makeIndex(); @@ -1628,7 +1628,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte } private void makeIndex() throws IOException { - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); writer.addDocument( doc( "t_text1", "random words for highlighting tests del" ) ); writer.addDocument( doc( "t_text1", "more random words for second field del" ) ); writer.addDocument( doc( "t_text1", "random words for highlighting tests del" ) ); @@ -1638,7 +1638,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte } private void deleteDocument() throws IOException { - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND)); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND)); writer.deleteDocuments( new Term( "t_text1", "del" ) ); // To see negative idf, keep comment the following line //writer.forceMerge(1); @@ -1730,10 +1730,13 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte @Override public void setUp() throws Exception { super.setUp(); + + a = new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false); + analyzer = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); dir = newDirectory(); ramDir = newDirectory(); IndexWriter writer = new IndexWriter(ramDir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true))); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true))); for (String text : texts) { addDoc(writer, text); } diff --git lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java index c76a8ff..e3b15a5 100644 --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/custom/HighlightCustomQueryTest.java @@ -88,9 +88,9 @@ public class HighlightCustomQueryTest extends LuceneTestCase { * * @throws InvalidTokenOffsetsException */ - private static String highlightField(Query query, String fieldName, + private String highlightField(Query query, String fieldName, String text) throws IOException, InvalidTokenOffsetsException { - TokenStream tokenStream = new MockAnalyzer(random, MockTokenizer.SIMPLE, + TokenStream tokenStream = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true).tokenStream(fieldName, new StringReader(text)); // Assuming "", "" used to highlight diff --git lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java index 4d88012..e5d5a9b 100644 --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java @@ -84,9 +84,9 @@ public abstract class AbstractTestCase extends LuceneTestCase { @Override public void setUp() throws Exception { super.setUp(); - analyzerW = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + analyzerW = new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false); analyzerB = new BigramAnalyzer(); - analyzerK = new MockAnalyzer(random, MockTokenizer.KEYWORD, false); + analyzerK = new MockAnalyzer(getRandom(), MockTokenizer.KEYWORD, false); dir = newDirectory(); } diff --git lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java index a42004a..a76b70f 100644 --- lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java +++ lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java @@ -99,14 +99,14 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase { StringBuilder termField = new StringBuilder(); // add up to 250 terms to field "foo" - final int numFooTerms = random.nextInt(250 * RANDOM_MULTIPLIER); + final int numFooTerms = getRandom().nextInt(250 * RANDOM_MULTIPLIER); for (int i = 0; i < numFooTerms; i++) { fooField.append(" "); fooField.append(randomTerm()); } // add up to 250 terms to field "term" - final int numTermTerms = random.nextInt(250 * RANDOM_MULTIPLIER); + final int numTermTerms = getRandom().nextInt(250 * RANDOM_MULTIPLIER); for (int i = 0; i < numTermTerms; i++) { termField.append(" "); termField.append(randomTerm()); @@ -151,10 +151,10 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase { * Return a random analyzer (Simple, Stop, Standard) to analyze the terms. */ private Analyzer randomAnalyzer() { - switch(random.nextInt(3)) { - case 0: return new MockAnalyzer(random, MockTokenizer.SIMPLE, true); - case 1: return new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); - default: return new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + switch(getRandom().nextInt(3)) { + case 0: return new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true); + case 1: return new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); + default: return new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false); } } @@ -173,21 +173,21 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase { * the other half of the time, returns a random unicode string. */ private String randomTerm() { - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { // return a random TEST_TERM - return TEST_TERMS[random.nextInt(TEST_TERMS.length)]; + return TEST_TERMS[getRandom().nextInt(TEST_TERMS.length)]; } else { // return a random unicode term - return _TestUtil.randomUnicodeString(random); + return _TestUtil.randomUnicodeString(getRandom()); } } public void testDocsEnumStart() throws Exception { - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); MemoryIndex memory = new MemoryIndex(); memory.addField("foo", "bar", analyzer); AtomicReader reader = (AtomicReader) memory.createSearcher().getIndexReader(); - DocsEnum disi = _TestUtil.docs(random, reader, "foo", new BytesRef("bar"), null, null, false); + DocsEnum disi = _TestUtil.docs(getRandom(), reader, "foo", new BytesRef("bar"), null, null, false); int docid = disi.docID(); assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS); assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); @@ -203,7 +203,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase { } public void testDocsAndPositionsEnumStart() throws Exception { - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); MemoryIndex memory = new MemoryIndex(); memory.addField("foo", "bar", analyzer); AtomicReader reader = (AtomicReader) memory.createSearcher().getIndexReader(); diff --git lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java index 588ba26..bbf06da 100644 --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java @@ -41,7 +41,7 @@ public class TestIndexSplitter extends LuceneTestCase { mergePolicy.setNoCFSRatio(1); IndexWriter iw = new IndexWriter( fsDir, - new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setOpenMode(OpenMode.CREATE). setMergePolicy(mergePolicy) ); diff --git lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java index 54c58ad..c12a684 100644 --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java @@ -33,7 +33,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); Document doc; for (int i = 0; i < NUM_DOCS; i++) { doc = new Document(); diff --git lucene/contrib/misc/src/test/org/apache/lucene/index/TestPKIndexSplitter.java lucene/contrib/misc/src/test/org/apache/lucene/index/TestPKIndexSplitter.java index b5fdfb4..91b8a52 100644 --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestPKIndexSplitter.java +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestPKIndexSplitter.java @@ -36,7 +36,7 @@ public class TestPKIndexSplitter extends LuceneTestCase { NumberFormat format = new DecimalFormat("000000000"); Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)) + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)) .setOpenMode(OpenMode.CREATE).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); for (int x = 0; x < 11; x++) { Document doc = createDocument(x, "1", 3, format); @@ -56,7 +56,7 @@ public class TestPKIndexSplitter extends LuceneTestCase { // delete some documents w = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)) + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)) .setOpenMode(OpenMode.APPEND).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); w.deleteDocuments(midTerm); w.deleteDocuments(new Term("id", format.format(2))); @@ -71,8 +71,8 @@ public class TestPKIndexSplitter extends LuceneTestCase { Directory dir1 = newDirectory(); Directory dir2 = newDirectory(); PKIndexSplitter splitter = new PKIndexSplitter(dir, dir1, dir2, splitTerm, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)), - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())), + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); splitter.split(); IndexReader ir1 = IndexReader.open(dir1); diff --git lucene/contrib/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java lucene/contrib/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java index c67bc3b..980e9ee 100644 --- lucene/contrib/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java +++ lucene/contrib/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java @@ -17,6 +17,8 @@ package org.apache.lucene.misc; * limitations under the License. */ +import java.util.Random; + import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.document.Document; @@ -39,8 +41,8 @@ public class TestHighFreqTerms extends LuceneTestCase { @BeforeClass public static void setUpClass() throws Exception { dir = newDirectory(); - writer = new IndexWriter(dir, newIndexWriterConfig(random, - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)) + writer = new IndexWriter(dir, newIndexWriterConfig(getStaticRandom(), + TEST_VERSION_CURRENT, new MockAnalyzer(getStaticRandom(), MockTokenizer.WHITESPACE, false)) .setMaxBufferedDocs(2)); indexDocs(writer); reader = IndexReader.open(dir); @@ -194,7 +196,8 @@ public class TestHighFreqTerms extends LuceneTestCase { /********************Testing Utils**********************************/ private static void indexDocs(IndexWriter writer) throws Exception { - + Random rnd = getStaticRandom(); + /** * Generate 10 documents where term n has a docFreq of n and a totalTermFreq of n*2 (squared). */ @@ -202,9 +205,9 @@ public class TestHighFreqTerms extends LuceneTestCase { Document doc = new Document(); String content = getContent(i); - doc.add(newField(random, "FIELD_1", content, TextField.TYPE_STORED)); + doc.add(newField(rnd, "FIELD_1", content, TextField.TYPE_STORED)); //add a different field - doc.add(newField(random, "different_field", "diff", TextField.TYPE_STORED)); + doc.add(newField(rnd, "different_field", "diff", TextField.TYPE_STORED)); writer.addDocument(doc); } @@ -212,7 +215,7 @@ public class TestHighFreqTerms extends LuceneTestCase { //highest freq terms for a specific field. for (int i = 1; i <= 10; i++) { Document doc = new Document(); - doc.add(newField(random, "different_field", "diff", TextField.TYPE_STORED)); + doc.add(newField(rnd, "different_field", "diff", TextField.TYPE_STORED)); writer.addDocument(doc); } // add some docs where tf < df so we can see if sorting works @@ -223,7 +226,7 @@ public class TestHighFreqTerms extends LuceneTestCase { for (int i = 0; i < highTF; i++) { content += "highTF "; } - doc.add(newField(random, "FIELD_1", content, TextField.TYPE_STORED)); + doc.add(newField(rnd, "FIELD_1", content, TextField.TYPE_STORED)); writer.addDocument(doc); // highTF medium df =5 int medium_df = 5; @@ -234,7 +237,7 @@ public class TestHighFreqTerms extends LuceneTestCase { for (int j = 0; j < tf; j++) { newcontent += "highTFmedDF "; } - newdoc.add(newField(random, "FIELD_1", newcontent, TextField.TYPE_STORED)); + newdoc.add(newField(rnd, "FIELD_1", newcontent, TextField.TYPE_STORED)); writer.addDocument(newdoc); } // add a doc with high tf in field different_field @@ -244,7 +247,7 @@ public class TestHighFreqTerms extends LuceneTestCase { for (int i = 0; i < targetTF; i++) { content += "TF150 "; } - doc.add(newField(random, "different_field", content, TextField.TYPE_STORED)); + doc.add(newField(rnd, "different_field", content, TextField.TYPE_STORED)); writer.addDocument(doc); writer.close(); diff --git lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java index ef29813..e1f405b 100644 --- lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java +++ lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java @@ -44,7 +44,7 @@ public class DuplicateFilterTest extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergePolicy(newLogMergePolicy())); //Add series of docs with filterable fields : url, text and dates flags addDoc(writer, "http://lucene.apache.org", "lucene 1.4.3 available", "20040101"); @@ -134,7 +134,7 @@ public class DuplicateFilterTest extends LuceneTestCase { for (ScoreDoc hit : hits) { Document d = searcher.doc(hit.doc); String url = d.get(KEY_FIELD); - DocsEnum td = _TestUtil.docs(random, reader, + DocsEnum td = _TestUtil.docs(getRandom(), reader, KEY_FIELD, new BytesRef(url), MultiFields.getLiveDocs(reader), @@ -158,7 +158,7 @@ public class DuplicateFilterTest extends LuceneTestCase { for (ScoreDoc hit : hits) { Document d = searcher.doc(hit.doc); String url = d.get(KEY_FIELD); - DocsEnum td = _TestUtil.docs(random, reader, + DocsEnum td = _TestUtil.docs(getRandom(), reader, KEY_FIELD, new BytesRef(url), MultiFields.getLiveDocs(reader), diff --git lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/FuzzyLikeThisQueryTest.java lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/FuzzyLikeThisQueryTest.java index 823a923..b90bdf9 100644 --- lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/FuzzyLikeThisQueryTest.java +++ lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/FuzzyLikeThisQueryTest.java @@ -38,13 +38,15 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase { private Directory directory; private IndexSearcher searcher; private IndexReader reader; - private Analyzer analyzer = new MockAnalyzer(random); + private Analyzer analyzer; @Override public void setUp() throws Exception { super.setUp(); + + analyzer = new MockAnalyzer(getRandom()); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergePolicy(newLogMergePolicy())); //Add series of docs with misspelt names addDoc(writer, "jonathon smythe", "1"); @@ -122,7 +124,7 @@ public class FuzzyLikeThisQueryTest extends LuceneTestCase { } public void testFuzzyLikeThisQueryEquals() { - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); FuzzyLikeThisQuery fltq1 = new FuzzyLikeThisQuery(10, analyzer); fltq1.addTerms("javi", "subject", 0.5f, 2); FuzzyLikeThisQuery fltq2 = new FuzzyLikeThisQuery(10, analyzer); diff --git lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java index 0771f1c..858b55a 100644 --- lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java +++ lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/TestSlowCollationMethods.java @@ -47,22 +47,22 @@ public class TestSlowCollationMethods extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { - final Locale locale = LuceneTestCase.randomLocale(random); + final Locale locale = LuceneTestCase.randomLocale(getStaticRandom()); collator = Collator.getInstance(locale); collator.setStrength(Collator.IDENTICAL); collator.setDecomposition(Collator.NO_DECOMPOSITION); numDocs = 1000 * RANDOM_MULTIPLIER; dir = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, dir); + RandomIndexWriter iw = new RandomIndexWriter(getStaticRandom(), dir); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); - String value = _TestUtil.randomUnicodeString(random); + String value = _TestUtil.randomUnicodeString(getStaticRandom()); Field field = newField("field", value, StringField.TYPE_STORED); doc.add(field); iw.addDocument(doc); } - splitDoc = _TestUtil.randomUnicodeString(random); + splitDoc = _TestUtil.randomUnicodeString(getStaticRandom()); reader = iw.getReader(); iw.close(); @@ -97,13 +97,13 @@ public class TestSlowCollationMethods extends LuceneTestCase { }); final Sort sort = new Sort(sf); - final TopDocs docs1 = searcher.search(TermRangeQuery.newStringRange("field", null, splitDoc, true, true), null, numDocs/(1+random.nextInt(4)), sort); + final TopDocs docs1 = searcher.search(TermRangeQuery.newStringRange("field", null, splitDoc, true, true), null, numDocs/(1+getRandom().nextInt(4)), sort); doCheckSorting(docs1); - final TopDocs docs2 = searcher.search(TermRangeQuery.newStringRange("field", splitDoc, null, true, true), null, numDocs/(1+random.nextInt(4)), sort); + final TopDocs docs2 = searcher.search(TermRangeQuery.newStringRange("field", splitDoc, null, true, true), null, numDocs/(1+getRandom().nextInt(4)), sort); doCheckSorting(docs2); - final TopDocs docs = TopDocs.merge(sort, numDocs/(1+random.nextInt(4)), new TopDocs[]{docs1, docs2}); + final TopDocs docs = TopDocs.merge(sort, numDocs/(1+getRandom().nextInt(4)), new TopDocs[]{docs1, docs2}); doCheckSorting(docs); } @@ -130,8 +130,8 @@ public class TestSlowCollationMethods extends LuceneTestCase { public void testRangeQuery() throws Exception { int numQueries = 50*RANDOM_MULTIPLIER; for (int i = 0; i < numQueries; i++) { - String startPoint = _TestUtil.randomUnicodeString(random); - String endPoint = _TestUtil.randomUnicodeString(random); + String startPoint = _TestUtil.randomUnicodeString(getRandom()); + String endPoint = _TestUtil.randomUnicodeString(getRandom()); Query query = new SlowCollatedTermRangeQuery("field", startPoint, endPoint, true, true, collator); doTestRanges(startPoint, endPoint, query); } @@ -140,8 +140,8 @@ public class TestSlowCollationMethods extends LuceneTestCase { public void testRangeFilter() throws Exception { int numQueries = 50*RANDOM_MULTIPLIER; for (int i = 0; i < numQueries; i++) { - String startPoint = _TestUtil.randomUnicodeString(random); - String endPoint = _TestUtil.randomUnicodeString(random); + String startPoint = _TestUtil.randomUnicodeString(getRandom()); + String endPoint = _TestUtil.randomUnicodeString(getRandom()); Query query = new ConstantScoreQuery(new SlowCollatedTermRangeFilter("field", startPoint, endPoint, true, true, collator)); doTestRanges(startPoint, endPoint, query); } diff --git lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java index d72d516..ac3207b 100644 --- lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java +++ lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestRegexQuery.java @@ -45,7 +45,7 @@ public class TestRegexQuery extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); Document doc = new Document(); doc.add(newField(FN, "the quick brown fox jumps over the lazy dog", TextField.TYPE_UNSTORED)); writer.addDocument(doc); diff --git lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java index 4866cd7..f1b1fb8 100644 --- lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java +++ lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java @@ -58,7 +58,7 @@ public class TestSpanRegexQuery extends LuceneTestCase { public void testSpanRegex() throws Exception { Directory directory = newDirectory(); IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); Document doc = new Document(); // doc.add(newField("field", "the quick brown fox jumps over the lazy dog", // Field.Store.NO, Field.Index.ANALYZED)); @@ -98,14 +98,14 @@ public class TestSpanRegexQuery extends LuceneTestCase { // creating first index writer IndexWriter writerA = new IndexWriter(indexStoreA, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.CREATE)); writerA.addDocument(lDoc); writerA.forceMerge(1); writerA.close(); // creating second index writer IndexWriter writerB = new IndexWriter(indexStoreB, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.CREATE)); writerB.addDocument(lDoc2); writerB.forceMerge(1); writerB.close(); diff --git lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java index 0250549..9ccb294 100644 --- lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java +++ lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java @@ -71,7 +71,7 @@ public class TestCartesian extends LuceneTestCase { super.setUp(); directory = newDirectory(); - IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); setUpPlotter( 2, 15); diff --git lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java index cc30bf7..a36a57a 100644 --- lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java +++ lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java @@ -49,7 +49,7 @@ public class TestDistance extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); addData(writer); } diff --git lucene/core/src/test/org/apache/lucene/TestDemo.java lucene/core/src/test/org/apache/lucene/TestDemo.java index 666533e..b4d98b6 100644 --- lucene/core/src/test/org/apache/lucene/TestDemo.java +++ lucene/core/src/test/org/apache/lucene/TestDemo.java @@ -39,13 +39,13 @@ import org.apache.lucene.util.LuceneTestCase; public class TestDemo extends LuceneTestCase { public void testDemo() throws IOException { - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); // Store the index in memory: Directory directory = newDirectory(); // To store an index on disk, use this instead: //Directory directory = FSDirectory.open("/tmp/testindex"); - RandomIndexWriter iwriter = new RandomIndexWriter(random, directory, analyzer); + RandomIndexWriter iwriter = new RandomIndexWriter(getRandom(), directory, analyzer); Document doc = new Document(); String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; String text = "This is the text to be indexed. " + longTerm; diff --git lucene/core/src/test/org/apache/lucene/TestExternalCodecs.java lucene/core/src/test/org/apache/lucene/TestExternalCodecs.java index f310e82..599a3b2 100644 --- lucene/core/src/test/org/apache/lucene/TestExternalCodecs.java +++ lucene/core/src/test/org/apache/lucene/TestExternalCodecs.java @@ -66,7 +66,7 @@ public class TestExternalCodecs extends LuceneTestCase { dir.setCheckIndexOnClose(false); // we use a custom codec provider IndexWriter w = new IndexWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setCodec(new CustomPerFieldCodec()). setMergePolicy(newLogMergePolicy(3)) ); diff --git lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java index 8f0418f..7ff0e6e 100644 --- lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java +++ lucene/core/src/test/org/apache/lucene/TestMergeSchedulerExternal.java @@ -95,7 +95,7 @@ public class TestMergeSchedulerExternal extends LuceneTestCase { doc.add(idField); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergeScheduler(new MyMergeScheduler()) + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergeScheduler(new MyMergeScheduler()) .setMaxBufferedDocs(2).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH) .setMergePolicy(newLogMergePolicy())); LogMergePolicy logMP = (LogMergePolicy) writer.getConfig().getMergePolicy(); diff --git lucene/core/src/test/org/apache/lucene/TestSearch.java lucene/core/src/test/org/apache/lucene/TestSearch.java index 5ec455c..f0dffdf 100644 --- lucene/core/src/test/org/apache/lucene/TestSearch.java +++ lucene/core/src/test/org/apache/lucene/TestSearch.java @@ -46,7 +46,7 @@ public class TestSearch extends LuceneTestCase { public void testSearch() throws Exception { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw, true); - doTestSearch(random, pw, false); + doTestSearch(getRandom(), pw, false); pw.close(); sw.close(); String multiFileOutput = sw.getBuffer().toString(); @@ -54,7 +54,7 @@ public class TestSearch extends LuceneTestCase { sw = new StringWriter(); pw = new PrintWriter(sw, true); - doTestSearch(random, pw, true); + doTestSearch(getRandom(), pw, true); pw.close(); sw.close(); String singleFileOutput = sw.getBuffer().toString(); diff --git lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java index 23d92bd..e473efe 100644 --- lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java +++ lucene/core/src/test/org/apache/lucene/TestSearchForDuplicates.java @@ -50,7 +50,7 @@ public class TestSearchForDuplicates extends LuceneTestCase { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw, true); final int MAX_DOCS = atLeast(225); - doTest(random, pw, false, MAX_DOCS); + doTest(getRandom(), pw, false, MAX_DOCS); pw.close(); sw.close(); String multiFileOutput = sw.getBuffer().toString(); @@ -58,7 +58,7 @@ public class TestSearchForDuplicates extends LuceneTestCase { sw = new StringWriter(); pw = new PrintWriter(sw, true); - doTest(random, pw, true, MAX_DOCS); + doTest(getRandom(), pw, true, MAX_DOCS); pw.close(); sw.close(); String singleFileOutput = sw.getBuffer().toString(); diff --git lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java index 9506034..367ac10 100644 --- lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java +++ lucene/core/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java @@ -36,7 +36,7 @@ public class TestCachingTokenFilter extends BaseTokenStreamTestCase { public void testCaching() throws IOException { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); Document doc = new Document(); TokenStream stream = new TokenStream() { private int index = 0; diff --git lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java index 71d6e3a..3872cfe 100644 --- lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java +++ lucene/core/src/test/org/apache/lucene/analysis/TestMockAnalyzer.java @@ -33,7 +33,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { /** Test a configuration that behaves a lot like WhitespaceAnalyzer */ public void testWhitespace() throws Exception { - Analyzer a = new MockAnalyzer(random); + Analyzer a = new MockAnalyzer(getRandom()); assertAnalyzesTo(a, "A bc defg hiJklmn opqrstuv wxy z ", new String[] { "a", "bc", "defg", "hijklmn", "opqrstuv", "wxy", "z" }); assertAnalyzesToReuse(a, "aba cadaba shazam", @@ -44,7 +44,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { /** Test a configuration that behaves a lot like SimpleAnalyzer */ public void testSimple() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + Analyzer a = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true); assertAnalyzesTo(a, "a-bc123 defg+hijklmn567opqrstuv78wxy_z ", new String[] { "a", "bc", "defg", "hijklmn", "opqrstuv", "wxy", "z" }); assertAnalyzesToReuse(a, "aba4cadaba-Shazam", @@ -55,7 +55,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { /** Test a configuration that behaves a lot like KeywordAnalyzer */ public void testKeyword() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.KEYWORD, false); + Analyzer a = new MockAnalyzer(getRandom(), MockTokenizer.KEYWORD, false); assertAnalyzesTo(a, "a-bc123 defg+hijklmn567opqrstuv78wxy_z ", new String[] { "a-bc123 defg+hijklmn567opqrstuv78wxy_z " }); assertAnalyzesToReuse(a, "aba4cadaba-Shazam", @@ -66,13 +66,13 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { /** Test a configuration that behaves a lot like StopAnalyzer */ public void testStop() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); + Analyzer a = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); assertAnalyzesTo(a, "the quick brown a fox", new String[] { "quick", "brown", "fox" }, new int[] { 2, 1, 2 }); // disable positions - a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, false); + a = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, false); assertAnalyzesTo(a, "the quick brown a fox", new String[] { "quick", "brown", "fox" }, new int[] { 1, 1, 1 }); @@ -85,7 +85,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { BasicOperations.complement( Automaton.union( Arrays.asList(BasicAutomata.makeString("foo"), BasicAutomata.makeString("bar"))))); - Analyzer a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, keepWords, true); + Analyzer a = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, keepWords, true); assertAnalyzesTo(a, "quick foo brown bar bar fox foo", new String[] { "foo", "bar", "bar", "foo" }, new int[] { 2, 2, 1, 2 }); @@ -94,7 +94,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { /** Test a configuration that behaves a lot like LengthFilter */ public void testLength() throws Exception { CharacterRunAutomaton length5 = new CharacterRunAutomaton(new RegExp(".{5,}").toAutomaton()); - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, true, length5, true); + Analyzer a = new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, true, length5, true); assertAnalyzesTo(a, "ok toolong fine notfine", new String[] { "ok", "fine" }, new int[] { 1, 2 }); @@ -103,7 +103,7 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { public void testLUCENE_3042() throws Exception { String testString = "t"; - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); TokenStream stream = analyzer.tokenStream("dummy", new StringReader(testString)); stream.reset(); while (stream.incrementToken()) { @@ -117,16 +117,16 @@ public class TestMockAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new MockAnalyzer(random), atLeast(1000)); + checkRandomData(getRandom(), new MockAnalyzer(getRandom()), atLeast(1000)); } public void testForwardOffsets() throws Exception { int num = atLeast(10000); for (int i = 0; i < num; i++) { - String s = _TestUtil.randomHtmlishString(random, 20); + String s = _TestUtil.randomHtmlishString(getRandom(), 20); StringReader reader = new StringReader(s); MockCharFilter charfilter = new MockCharFilter(CharReader.get(reader), 2); - MockAnalyzer analyzer = new MockAnalyzer(random); + MockAnalyzer analyzer = new MockAnalyzer(getRandom()); TokenStream ts = analyzer.tokenStream("bogus", charfilter); ts.reset(); while (ts.incrementToken()) { diff --git lucene/core/src/test/org/apache/lucene/codecs/appending/TestAppendingCodec.java lucene/core/src/test/org/apache/lucene/codecs/appending/TestAppendingCodec.java index df9bf80..c29203b 100644 --- lucene/core/src/test/org/apache/lucene/codecs/appending/TestAppendingCodec.java +++ lucene/core/src/test/org/apache/lucene/codecs/appending/TestAppendingCodec.java @@ -107,8 +107,8 @@ public class TestAppendingCodec extends LuceneTestCase { private static final String text = "the quick brown fox jumped over the lazy dog"; public void testCodec() throws Exception { - Directory dir = new AppendingRAMDirectory(random, new RAMDirectory()); - IndexWriterConfig cfg = new IndexWriterConfig(Version.LUCENE_40, new MockAnalyzer(random)); + Directory dir = new AppendingRAMDirectory(getRandom(), new RAMDirectory()); + IndexWriterConfig cfg = new IndexWriterConfig(Version.LUCENE_40, new MockAnalyzer(getRandom())); cfg.setCodec(new AppendingCodec()); ((TieredMergePolicy)cfg.getMergePolicy()).setUseCompoundFile(false); @@ -149,8 +149,8 @@ public class TestAppendingCodec extends LuceneTestCase { } public void testCompoundFile() throws Exception { - Directory dir = new AppendingRAMDirectory(random, new RAMDirectory()); - IndexWriterConfig cfg = new IndexWriterConfig(Version.LUCENE_40, new MockAnalyzer(random)); + Directory dir = new AppendingRAMDirectory(getRandom(), new RAMDirectory()); + IndexWriterConfig cfg = new IndexWriterConfig(Version.LUCENE_40, new MockAnalyzer(getRandom())); TieredMergePolicy mp = new TieredMergePolicy(); mp.setUseCompoundFile(true); mp.setNoCFSRatio(1.0); diff --git lucene/core/src/test/org/apache/lucene/codecs/intblock/TestIntBlockCodec.java lucene/core/src/test/org/apache/lucene/codecs/intblock/TestIntBlockCodec.java index b85f7fd..8b65067 100644 --- lucene/core/src/test/org/apache/lucene/codecs/intblock/TestIntBlockCodec.java +++ lucene/core/src/test/org/apache/lucene/codecs/intblock/TestIntBlockCodec.java @@ -29,13 +29,13 @@ public class TestIntBlockCodec extends LuceneTestCase { IntStreamFactory f = new MockFixedIntBlockPostingsFormat(128).getIntFactory(); - IntIndexOutput out = f.createOutput(dir, "test", newIOContext(random)); + IntIndexOutput out = f.createOutput(dir, "test", newIOContext(getRandom())); for(int i=0;i<11777;i++) { out.write(i); } out.close(); - IntIndexInput in = f.openInput(dir, "test", newIOContext(random)); + IntIndexInput in = f.openInput(dir, "test", newIOContext(getRandom())); IntIndexInput.Reader r = in.reader(); for(int i=0;i<11777;i++) { @@ -50,12 +50,12 @@ public class TestIntBlockCodec extends LuceneTestCase { Directory dir = newDirectory(); IntStreamFactory f = new MockFixedIntBlockPostingsFormat(128).getIntFactory(); - IntIndexOutput out = f.createOutput(dir, "test", newIOContext(random)); + IntIndexOutput out = f.createOutput(dir, "test", newIOContext(getRandom())); // write no ints out.close(); - IntIndexInput in = f.openInput(dir, "test", newIOContext(random)); + IntIndexInput in = f.openInput(dir, "test", newIOContext(getRandom())); in.reader(); // read no ints in.close(); diff --git lucene/core/src/test/org/apache/lucene/codecs/lucene3x/TestSurrogates.java lucene/core/src/test/org/apache/lucene/codecs/lucene3x/TestSurrogates.java index 4350627..f3ff1f6 100644 --- lucene/core/src/test/org/apache/lucene/codecs/lucene3x/TestSurrogates.java +++ lucene/core/src/test/org/apache/lucene/codecs/lucene3x/TestSurrogates.java @@ -287,12 +287,12 @@ public class TestSurrogates extends LuceneTestCase { @Test public void testSurrogatesOrder() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, + RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir, newIndexWriterConfig( TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setCodec(new PreFlexRWCodec())); + new MockAnalyzer(getRandom())).setCodec(new PreFlexRWCodec())); - final int numField = _TestUtil.nextInt(random, 2, 5); + final int numField = _TestUtil.nextInt(getRandom(), 2, 5); int uniqueTermCount = 0; @@ -307,7 +307,7 @@ public class TestSurrogates extends LuceneTestCase { final Set uniqueTerms = new HashSet(); for(int i=0;i=count1; i--) { - BitVector bv2 = new BitVector(d, "TESTBV", newIOContext(random)); + BitVector bv2 = new BitVector(d, "TESTBV", newIOContext(getRandom())); assertTrue(doCompare(bv,bv2)); bv = bv2; bv.set(i); assertEquals(i,size-bv.count()); - bv.write(d, "TESTBV", newIOContext(random)); + bv.write(d, "TESTBV", newIOContext(getRandom())); } } @@ -224,11 +224,11 @@ public class TestBitVector extends LuceneTestCase final int numBits = 10240; BitVector bv = new BitVector(numBits); bv.invertAll(); - int numToClear = random.nextInt(5); + int numToClear = getRandom().nextInt(5); for(int i=0;i enums = new IdentityHashMap(); MatchNoBits bits = new Bits.MatchNoBits(r.maxDoc()); while ((iterator.next()) != null) { - DocsEnum docs = iterator.docs(random.nextBoolean() ? bits : new Bits.MatchNoBits(r.maxDoc()), null, random.nextBoolean()); + DocsEnum docs = iterator.docs(getRandom().nextBoolean() ? bits : new Bits.MatchNoBits(r.maxDoc()), null, getRandom().nextBoolean()); enums.put(docs, true); } @@ -73,10 +73,10 @@ public class TestReuseDocsEnum extends LuceneTestCase { public void testReuseDocsEnumSameBitsOrNull() throws IOException { Directory dir = newDirectory(); Codec cp = _TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat()); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodec(cp)); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setCodec(cp)); int numdocs = atLeast(20); - createRandomIndex(numdocs, writer, random); + createRandomIndex(numdocs, writer, getRandom()); writer.commit(); DirectoryReader open = DirectoryReader.open(dir); @@ -88,7 +88,7 @@ public class TestReuseDocsEnum extends LuceneTestCase { MatchNoBits bits = new Bits.MatchNoBits(open.maxDoc()); DocsEnum docs = null; while ((iterator.next()) != null) { - docs = iterator.docs(bits, docs, random.nextBoolean()); + docs = iterator.docs(bits, docs, getRandom().nextBoolean()); enums.put(docs, true); } @@ -97,7 +97,7 @@ public class TestReuseDocsEnum extends LuceneTestCase { iterator = terms.iterator(null); docs = null; while ((iterator.next()) != null) { - docs = iterator.docs(new Bits.MatchNoBits(open.maxDoc()), docs, random.nextBoolean()); + docs = iterator.docs(new Bits.MatchNoBits(open.maxDoc()), docs, getRandom().nextBoolean()); enums.put(docs, true); } assertEquals(terms.getUniqueTermCount(), enums.size()); @@ -106,7 +106,7 @@ public class TestReuseDocsEnum extends LuceneTestCase { iterator = terms.iterator(null); docs = null; while ((iterator.next()) != null) { - docs = iterator.docs(null, docs, random.nextBoolean()); + docs = iterator.docs(null, docs, getRandom().nextBoolean()); enums.put(docs, true); } assertEquals(1, enums.size()); @@ -118,10 +118,10 @@ public class TestReuseDocsEnum extends LuceneTestCase { public void testReuseDocsEnumDifferentReader() throws IOException { Directory dir = newDirectory(); Codec cp = _TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat()); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodec(cp)); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setCodec(cp)); int numdocs = atLeast(20); - createRandomIndex(numdocs, writer, random); + createRandomIndex(numdocs, writer, getRandom()); writer.commit(); DirectoryReader firstReader = DirectoryReader.open(dir); @@ -138,7 +138,7 @@ public class TestReuseDocsEnum extends LuceneTestCase { DocsEnum docs = null; BytesRef term = null; while ((term = iterator.next()) != null) { - docs = iterator.docs(null, randomDocsEnum("body", term, sequentialSubReaders2, bits), random.nextBoolean()); + docs = iterator.docs(null, randomDocsEnum("body", term, sequentialSubReaders2, bits), getRandom().nextBoolean()); enums.put(docs, true); } assertEquals(terms.getUniqueTermCount(), enums.size()); @@ -147,7 +147,7 @@ public class TestReuseDocsEnum extends LuceneTestCase { enums.clear(); docs = null; while ((term = iterator.next()) != null) { - docs = iterator.docs(bits, randomDocsEnum("body", term, sequentialSubReaders2, bits), random.nextBoolean()); + docs = iterator.docs(bits, randomDocsEnum("body", term, sequentialSubReaders2, bits), getRandom().nextBoolean()); enums.put(docs, true); } assertEquals(terms.getUniqueTermCount(), enums.size()); @@ -156,11 +156,11 @@ public class TestReuseDocsEnum extends LuceneTestCase { } public DocsEnum randomDocsEnum(String field, BytesRef term, IndexReader[] readers, Bits bits) throws IOException { - if (random.nextInt(10) == 0) { + if (getRandom().nextInt(10) == 0) { return null; } - AtomicReader indexReader = (AtomicReader) readers[random.nextInt(readers.length)]; - return indexReader.termDocsEnum(bits, field, term, random.nextBoolean()); + AtomicReader indexReader = (AtomicReader) readers[getRandom().nextInt(readers.length)]; + return indexReader.termDocsEnum(bits, field, term, getRandom().nextBoolean()); } /** diff --git lucene/core/src/test/org/apache/lucene/codecs/lucene40/values/TestDocValues.java lucene/core/src/test/org/apache/lucene/codecs/lucene40/values/TestDocValues.java index 5457483..bd5af9b 100644 --- lucene/core/src/test/org/apache/lucene/codecs/lucene40/values/TestDocValues.java +++ lucene/core/src/test/org/apache/lucene/codecs/lucene40/values/TestDocValues.java @@ -69,18 +69,18 @@ public class TestDocValues extends LuceneTestCase { Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Bytes.getWriter(dir, "test", mode, fixedSize, COMP, trackBytes, newIOContext(random), - random.nextBoolean()); + DocValuesConsumer w = Bytes.getWriter(dir, "test", mode, fixedSize, COMP, trackBytes, newIOContext(getRandom()), + getRandom().nextBoolean()); int maxDoc = 220; final String[] values = new String[maxDoc]; final int fixedLength = 1 + atLeast(50); for (int i = 0; i < 100; i++) { final String s; - if (i > 0 && random.nextInt(5) <= 2) { + if (i > 0 && getRandom().nextInt(5) <= 2) { // use prior value - s = values[2 * random.nextInt(i)]; + s = values[2 * getRandom().nextInt(i)]; } else { - s = _TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39)); + s = _TestUtil.randomFixedByteLengthUnicodeString(getRandom(), fixedSize? fixedLength : 1 + getRandom().nextInt(39)); } values[2 * i] = s; @@ -91,7 +91,7 @@ public class TestDocValues extends LuceneTestCase { w.finish(maxDoc); assertEquals(0, trackBytes.get()); - DocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc, COMP, newIOContext(random)); + DocValues r = Bytes.getValues(dir, "test", mode, fixedSize, maxDoc, COMP, newIOContext(getRandom())); // Verify we can load source twice: for (int iter = 0; iter < 2; iter++) { @@ -124,7 +124,7 @@ public class TestDocValues extends LuceneTestCase { if (mode == Bytes.Mode.SORTED) { final int valueCount = ss.getValueCount(); for (int i = 0; i < 1000; i++) { - BytesRef bytesValue = new BytesRef(_TestUtil.randomFixedByteLengthUnicodeString(random, fixedSize? fixedLength : 1 + random.nextInt(39))); + BytesRef bytesValue = new BytesRef(_TestUtil.randomFixedByteLengthUnicodeString(getRandom(), fixedSize? fixedLength : 1 + getRandom().nextInt(39))); int ord = ss.getByValue(bytesValue, new BytesRef()); if (ord >= 0) { assertTrue(bytesValue @@ -178,14 +178,14 @@ public class TestDocValues extends LuceneTestCase { for (int i = 0; i < minMax.length; i++) { Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.VAR_INTS, newIOContext(random)); + DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.VAR_INTS, newIOContext(getRandom())); valueHolder.numberValue = minMax[i][0]; w.add(0, valueHolder); valueHolder.numberValue = minMax[i][1]; w.add(1, valueHolder); w.finish(2); assertEquals(0, trackBytes.get()); - DocValues r = Ints.getValues(dir, "test", 2, Type.VAR_INTS, newIOContext(random)); + DocValues r = Ints.getValues(dir, "test", 2, Type.VAR_INTS, newIOContext(getRandom())); Source source = getSource(r); assertEquals(i + " with min: " + minMax[i][0] + " max: " + minMax[i][1], expectedTypes[i], source.type()); @@ -214,13 +214,13 @@ public class TestDocValues extends LuceneTestCase { byte[] sourceArray = new byte[] {1,2,3}; Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_8, newIOContext(random)); + DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_8, newIOContext(getRandom())); for (int i = 0; i < sourceArray.length; i++) { valueHolder.numberValue = (long) sourceArray[i]; w.add(i, valueHolder); } w.finish(sourceArray.length); - DocValues r = Ints.getValues(dir, "test", sourceArray.length, Type.FIXED_INTS_8, newIOContext(random)); + DocValues r = Ints.getValues(dir, "test", sourceArray.length, Type.FIXED_INTS_8, newIOContext(getRandom())); Source source = r.getSource(); assertTrue(source.hasArray()); byte[] loaded = ((byte[])source.getArray()); @@ -237,13 +237,13 @@ public class TestDocValues extends LuceneTestCase { short[] sourceArray = new short[] {1,2,3}; Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_16, newIOContext(random)); + DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_16, newIOContext(getRandom())); for (int i = 0; i < sourceArray.length; i++) { valueHolder.numberValue = (long) sourceArray[i]; w.add(i, valueHolder); } w.finish(sourceArray.length); - DocValues r = Ints.getValues(dir, "test", sourceArray.length, Type.FIXED_INTS_16, newIOContext(random)); + DocValues r = Ints.getValues(dir, "test", sourceArray.length, Type.FIXED_INTS_16, newIOContext(getRandom())); Source source = r.getSource(); assertTrue(source.hasArray()); short[] loaded = ((short[])source.getArray()); @@ -260,13 +260,13 @@ public class TestDocValues extends LuceneTestCase { long[] sourceArray = new long[] {1,2,3}; Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_64, newIOContext(random)); + DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_64, newIOContext(getRandom())); for (int i = 0; i < sourceArray.length; i++) { valueHolder.numberValue = sourceArray[i]; w.add(i, valueHolder); } w.finish(sourceArray.length); - DocValues r = Ints.getValues(dir, "test", sourceArray.length, Type.FIXED_INTS_64, newIOContext(random)); + DocValues r = Ints.getValues(dir, "test", sourceArray.length, Type.FIXED_INTS_64, newIOContext(getRandom())); Source source = r.getSource(); assertTrue(source.hasArray()); long[] loaded = ((long[])source.getArray()); @@ -283,13 +283,13 @@ public class TestDocValues extends LuceneTestCase { int[] sourceArray = new int[] {1,2,3}; Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_32, newIOContext(random)); + DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, Type.FIXED_INTS_32, newIOContext(getRandom())); for (int i = 0; i < sourceArray.length; i++) { valueHolder.numberValue = (long) sourceArray[i]; w.add(i, valueHolder); } w.finish(sourceArray.length); - DocValues r = Ints.getValues(dir, "test", sourceArray.length, Type.FIXED_INTS_32, newIOContext(random)); + DocValues r = Ints.getValues(dir, "test", sourceArray.length, Type.FIXED_INTS_32, newIOContext(getRandom())); Source source = r.getSource(); assertTrue(source.hasArray()); int[] loaded = ((int[])source.getArray()); @@ -306,13 +306,13 @@ public class TestDocValues extends LuceneTestCase { float[] sourceArray = new float[] {1,2,3}; Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random), Type.FLOAT_32); + DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(getRandom()), Type.FLOAT_32); for (int i = 0; i < sourceArray.length; i++) { valueHolder.numberValue = sourceArray[i]; w.add(i, valueHolder); } w.finish(sourceArray.length); - DocValues r = Floats.getValues(dir, "test", 3, newIOContext(random), Type.FLOAT_32); + DocValues r = Floats.getValues(dir, "test", 3, newIOContext(getRandom()), Type.FLOAT_32); Source source = r.getSource(); assertTrue(source.hasArray()); float[] loaded = ((float[])source.getArray()); @@ -329,13 +329,13 @@ public class TestDocValues extends LuceneTestCase { double[] sourceArray = new double[] {1,2,3}; Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random), Type.FLOAT_64); + DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(getRandom()), Type.FLOAT_64); for (int i = 0; i < sourceArray.length; i++) { valueHolder.numberValue = sourceArray[i]; w.add(i, valueHolder); } w.finish(sourceArray.length); - DocValues r = Floats.getValues(dir, "test", 3, newIOContext(random), Type.FLOAT_64); + DocValues r = Floats.getValues(dir, "test", 3, newIOContext(getRandom()), Type.FLOAT_64); Source source = r.getSource(); assertTrue(source.hasArray()); double[] loaded = ((double[])source.getArray()); @@ -350,22 +350,22 @@ public class TestDocValues extends LuceneTestCase { private void testInts(Type type, int maxBit) throws IOException { DocValueHolder valueHolder = new DocValueHolder(); long maxV = 1; - final int NUM_VALUES = 333 + random.nextInt(333); + final int NUM_VALUES = 333 + getRandom().nextInt(333); final long[] values = new long[NUM_VALUES]; for (int rx = 1; rx < maxBit; rx++, maxV *= 2) { Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, type, newIOContext(random)); + DocValuesConsumer w = Ints.getWriter(dir, "test", trackBytes, type, newIOContext(getRandom())); for (int i = 0; i < NUM_VALUES; i++) { - final long v = random.nextLong() % (1 + maxV); + final long v = getRandom().nextLong() % (1 + maxV); valueHolder.numberValue = values[i] = v; w.add(i, valueHolder); } - final int additionalDocs = 1 + random.nextInt(9); + final int additionalDocs = 1 + getRandom().nextInt(9); w.finish(NUM_VALUES + additionalDocs); assertEquals(0, trackBytes.get()); - DocValues r = Ints.getValues(dir, "test", NUM_VALUES + additionalDocs, type, newIOContext(random)); + DocValues r = Ints.getValues(dir, "test", NUM_VALUES + additionalDocs, type, newIOContext(getRandom())); for (int iter = 0; iter < 2; iter++) { Source s = getSource(r); assertEquals(type, s.type()); @@ -388,20 +388,20 @@ public class TestDocValues extends LuceneTestCase { DocValueHolder valueHolder = new DocValueHolder(); Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(random), type); - final int NUM_VALUES = 777 + random.nextInt(777); + DocValuesConsumer w = Floats.getWriter(dir, "test", trackBytes, newIOContext(getRandom()), type); + final int NUM_VALUES = 777 + getRandom().nextInt(777); final double[] values = new double[NUM_VALUES]; for (int i = 0; i < NUM_VALUES; i++) { - final double v = type == Type.FLOAT_32 ? random.nextFloat() : random + final double v = type == Type.FLOAT_32 ? getRandom().nextFloat() : getRandom() .nextDouble(); valueHolder.numberValue = values[i] = v; w.add(i, valueHolder); } - final int additionalValues = 1 + random.nextInt(10); + final int additionalValues = 1 + getRandom().nextInt(10); w.finish(NUM_VALUES + additionalValues); assertEquals(0, trackBytes.get()); - DocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues, newIOContext(random), type); + DocValues r = Floats.getValues(dir, "test", NUM_VALUES + additionalValues, newIOContext(getRandom()), type); for (int iter = 0; iter < 2; iter++) { Source s = getSource(r); for (int i = 0; i < NUM_VALUES; i++) { @@ -419,7 +419,7 @@ public class TestDocValues extends LuceneTestCase { private Source getSource(DocValues values) throws IOException { // getSource uses cache internally - switch(random.nextInt(5)) { + switch(getRandom().nextInt(5)) { case 3: return values.load(); case 2: diff --git lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat.java lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat.java index 1b8e1d1..e2d68d4 100644 --- lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat.java +++ lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat.java @@ -96,7 +96,7 @@ public class TestPerFieldPostingsFormat extends LuceneTestCase { public void testMergeUnusedPerFieldCodec() throws IOException { Directory dir = newDirectory(); IndexWriterConfig iwconf = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setCodec(new MockCodec()); + new MockAnalyzer(getRandom())).setOpenMode(OpenMode.CREATE).setCodec(new MockCodec()); IndexWriter writer = newWriter(dir, iwconf); addDocs(writer, 10); writer.commit(); @@ -123,7 +123,7 @@ public class TestPerFieldPostingsFormat extends LuceneTestCase { System.out.println("TEST: make new index"); } IndexWriterConfig iwconf = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setCodec(new MockCodec()); + new MockAnalyzer(getRandom())).setOpenMode(OpenMode.CREATE).setCodec(new MockCodec()); iwconf.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH); //((LogMergePolicy) iwconf.getMergePolicy()).setMergeFactor(10); IndexWriter writer = newWriter(dir, iwconf); @@ -142,7 +142,7 @@ public class TestPerFieldPostingsFormat extends LuceneTestCase { assertQuery(new Term("content", "aaa"), dir, 10); Lucene40Codec codec = (Lucene40Codec)iwconf.getCodec(); - iwconf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + iwconf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setOpenMode(OpenMode.APPEND).setCodec(codec); //((LogMergePolicy) iwconf.getMergePolicy()).setUseCompoundFile(false); //((LogMergePolicy) iwconf.getMergePolicy()).setMergeFactor(10); @@ -234,28 +234,28 @@ public class TestPerFieldPostingsFormat extends LuceneTestCase { */ @Test public void testStressPerFieldCodec() throws IOException { - Directory dir = newDirectory(random); + Directory dir = newDirectory(getRandom()); final int docsPerRound = 97; int numRounds = atLeast(1); for (int i = 0; i < numRounds; i++) { - int num = _TestUtil.nextInt(random, 30, 60); - IndexWriterConfig config = newIndexWriterConfig(random, - TEST_VERSION_CURRENT, new MockAnalyzer(random)); + int num = _TestUtil.nextInt(getRandom(), 30, 60); + IndexWriterConfig config = newIndexWriterConfig(getRandom(), + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())); config.setOpenMode(OpenMode.CREATE_OR_APPEND); IndexWriter writer = newWriter(dir, config); for (int j = 0; j < docsPerRound; j++) { final Document doc = new Document(); for (int k = 0; k < num; k++) { FieldType customType = new FieldType(TextField.TYPE_UNSTORED); - customType.setTokenized(random.nextBoolean()); - customType.setOmitNorms(random.nextBoolean()); + customType.setTokenized(getRandom().nextBoolean()); + customType.setOmitNorms(getRandom().nextBoolean()); Field field = newField("" + k, _TestUtil - .randomRealisticUnicodeString(random, 128), customType); + .randomRealisticUnicodeString(getRandom(), 128), customType); doc.add(field); } writer.addDocument(doc); } - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { writer.forceMerge(1); } writer.commit(); diff --git lucene/core/src/test/org/apache/lucene/codecs/pulsing/Test10KPulsings.java lucene/core/src/test/org/apache/lucene/codecs/pulsing/Test10KPulsings.java index 9f94aed..ece7042 100644 --- lucene/core/src/test/org/apache/lucene/codecs/pulsing/Test10KPulsings.java +++ lucene/core/src/test/org/apache/lucene/codecs/pulsing/Test10KPulsings.java @@ -56,13 +56,13 @@ public class Test10KPulsings extends LuceneTestCase { File f = _TestUtil.getTempDir("10kpulsed"); MockDirectoryWrapper dir = newFSDirectory(f); dir.setCheckIndexOnClose(false); // we do this ourselves explicitly - RandomIndexWriter iw = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodec(cp)); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setCodec(cp)); Document document = new Document(); FieldType ft = new FieldType(TextField.TYPE_STORED); - switch(_TestUtil.nextInt(random, 0, 2)) { + switch(_TestUtil.nextInt(getRandom(), 0, 2)) { case 0: ft.setIndexOptions(IndexOptions.DOCS_ONLY); break; case 1: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); break; default: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); break; @@ -87,7 +87,7 @@ public class Test10KPulsings extends LuceneTestCase { for (int i = 0; i < 10050; i++) { String expected = df.format(i); assertEquals(expected, te.next().utf8ToString()); - de = _TestUtil.docs(random, te, null, de, false); + de = _TestUtil.docs(getRandom(), te, null, de, false); assertTrue(de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertEquals(DocIdSetIterator.NO_MORE_DOCS, de.nextDoc()); } @@ -101,19 +101,19 @@ public class Test10KPulsings extends LuceneTestCase { */ public void test10kNotPulsed() throws Exception { // we always run this test with pulsing codec. - int freqCutoff = _TestUtil.nextInt(random, 1, 10); + int freqCutoff = _TestUtil.nextInt(getRandom(), 1, 10); Codec cp = _TestUtil.alwaysPostingsFormat(new Pulsing40PostingsFormat(freqCutoff)); File f = _TestUtil.getTempDir("10knotpulsed"); MockDirectoryWrapper dir = newFSDirectory(f); dir.setCheckIndexOnClose(false); // we do this ourselves explicitly - RandomIndexWriter iw = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodec(cp)); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setCodec(cp)); Document document = new Document(); FieldType ft = new FieldType(TextField.TYPE_STORED); - switch(_TestUtil.nextInt(random, 0, 2)) { + switch(_TestUtil.nextInt(getRandom(), 0, 2)) { case 0: ft.setIndexOptions(IndexOptions.DOCS_ONLY); break; case 1: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); break; default: ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); break; @@ -145,7 +145,7 @@ public class Test10KPulsings extends LuceneTestCase { for (int i = 0; i < 10050; i++) { String expected = df.format(i); assertEquals(expected, te.next().utf8ToString()); - de = _TestUtil.docs(random, te, null, de, false); + de = _TestUtil.docs(getRandom(), te, null, de, false); assertTrue(de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); assertEquals(DocIdSetIterator.NO_MORE_DOCS, de.nextDoc()); } diff --git lucene/core/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java lucene/core/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java index f47667f..f30821a 100644 --- lucene/core/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java +++ lucene/core/src/test/org/apache/lucene/codecs/pulsing/TestPulsingReuse.java @@ -48,8 +48,8 @@ public class TestPulsingReuse extends LuceneTestCase { // we always run this test with pulsing codec. Codec cp = _TestUtil.alwaysPostingsFormat(new Pulsing40PostingsFormat(1)); Directory dir = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodec(cp)); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setCodec(cp)); Document doc = new Document(); doc.add(new Field("foo", "a b b c c c d e f g g h i i j j k", TextField.TYPE_UNSTORED)); iw.addDocument(doc); @@ -87,8 +87,8 @@ public class TestPulsingReuse extends LuceneTestCase { Codec cp = _TestUtil.alwaysPostingsFormat(new NestedPulsingPostingsFormat()); MockDirectoryWrapper dir = newDirectory(); dir.setCheckIndexOnClose(false); // will do this ourselves, custom codec - RandomIndexWriter iw = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodec(cp)); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setCodec(cp)); Document doc = new Document(); doc.add(new Field("foo", "a b b c c c d e f g g g h i i j j k l l m m m", TextField.TYPE_UNSTORED)); // note: the reuse is imperfect, here we would have 4 enums (lost reuse when we get an enum for 'm') diff --git lucene/core/src/test/org/apache/lucene/document/TestBinaryDocument.java lucene/core/src/test/org/apache/lucene/document/TestBinaryDocument.java index 7e8b29c..baa109f 100644 --- lucene/core/src/test/org/apache/lucene/document/TestBinaryDocument.java +++ lucene/core/src/test/org/apache/lucene/document/TestBinaryDocument.java @@ -51,7 +51,7 @@ public class TestBinaryDocument extends LuceneTestCase { /** add the doc to a ram index */ Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); writer.addDocument(doc); /** open a reader and fetch the document */ @@ -85,7 +85,7 @@ public class TestBinaryDocument extends LuceneTestCase { /** add the doc to a ram index */ Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); writer.addDocument(doc); /** open a reader and fetch the document */ diff --git lucene/core/src/test/org/apache/lucene/document/TestDocument.java lucene/core/src/test/org/apache/lucene/document/TestDocument.java index 90a11df..fbe9374 100644 --- lucene/core/src/test/org/apache/lucene/document/TestDocument.java +++ lucene/core/src/test/org/apache/lucene/document/TestDocument.java @@ -172,7 +172,7 @@ public class TestDocument extends LuceneTestCase { */ public void testGetValuesForIndexedDocument() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); writer.addDocument(makeDocumentWithFields()); IndexReader reader = writer.getReader(); @@ -257,7 +257,7 @@ public class TestDocument extends LuceneTestCase { doc.add(new Field("keyword", "test", StringField.TYPE_STORED)); Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); writer.addDocument(doc); field.setStringValue("id2"); writer.addDocument(doc); @@ -300,7 +300,7 @@ public class TestDocument extends LuceneTestCase { // LUCENE-3682 public void testTransitionAPI() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir); Document doc = new Document(); doc.add(new Field("stored", "abc", Field.Store.YES, Field.Index.NO)); @@ -364,7 +364,7 @@ public class TestDocument extends LuceneTestCase { public void testBoost() throws Exception { Directory dir = newDirectory(); - IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())); iwc.setMergePolicy(newLogMergePolicy()); IndexWriter iw = new IndexWriter(dir, iwc); Document doc = new Document(); diff --git lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java index 7878785..445ecc9 100644 --- lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java +++ lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java @@ -46,7 +46,7 @@ public class Test2BPostings extends LuceneTestCase { dir.setCheckIndexOnClose(false); // don't double-checkindex IndexWriter w = new IndexWriter(dir, - new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH) .setRAMBufferSizeMB(256.0) .setMergeScheduler(new ConcurrentMergeScheduler()) diff --git lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java index 1af4a01..c388cc3 100644 --- lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java +++ lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java @@ -149,7 +149,7 @@ public class Test2BTerms extends LuceneTestCase { System.out.println("Starting Test2B"); final long TERM_COUNT = ((long) Integer.MAX_VALUE) + 100000000; - final int TERMS_PER_DOC = _TestUtil.nextInt(random, 100000, 1000000); + final int TERMS_PER_DOC = _TestUtil.nextInt(getRandom(), 100000, 1000000); List savedTerms = null; @@ -161,7 +161,7 @@ public class Test2BTerms extends LuceneTestCase { if (true) { IndexWriter w = new IndexWriter(dir, - new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH) .setRAMBufferSizeMB(256.0) .setMergeScheduler(new ConcurrentMergeScheduler()) @@ -175,7 +175,7 @@ public class Test2BTerms extends LuceneTestCase { } Document doc = new Document(); - final MyTokenStream ts = new MyTokenStream(random, TERMS_PER_DOC); + final MyTokenStream ts = new MyTokenStream(getRandom(), TERMS_PER_DOC); FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setIndexOptions(IndexOptions.DOCS_ONLY); @@ -227,13 +227,13 @@ public class Test2BTerms extends LuceneTestCase { System.out.println("TEST: findTerms"); final TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator(null); final List savedTerms = new ArrayList(); - int nextSave = _TestUtil.nextInt(random, 500000, 1000000); + int nextSave = _TestUtil.nextInt(getRandom(), 500000, 1000000); BytesRef term; while((term = termsEnum.next()) != null) { if (--nextSave == 0) { savedTerms.add(BytesRef.deepCopyOf(term)); System.out.println("TEST: add " + term); - nextSave = _TestUtil.nextInt(random, 500000, 1000000); + nextSave = _TestUtil.nextInt(getRandom(), 500000, 1000000); } } return savedTerms; @@ -246,7 +246,7 @@ public class Test2BTerms extends LuceneTestCase { TermsEnum termsEnum = MultiFields.getTerms(r, "field").iterator(null); boolean failed = false; for(int iter=0;iter<10*terms.size();iter++) { - final BytesRef term = terms.get(random.nextInt(terms.size())); + final BytesRef term = terms.get(getRandom().nextInt(terms.size())); System.out.println("TEST: search " + term); final long t0 = System.currentTimeMillis(); final int count = s.search(new TermQuery(new Term("field", term)), 1).totalHits; diff --git lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java index c6bb4a0..1f8d1fb 100755 --- lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java +++ lucene/core/src/test/org/apache/lucene/index/TestAddIndexes.java @@ -70,7 +70,7 @@ public class TestAddIndexes extends LuceneTestCase { IndexWriter writer = null; writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)) + new MockAnalyzer(getRandom())) .setOpenMode(OpenMode.CREATE)); // add 100 documents addDocs(writer, 100); @@ -80,7 +80,7 @@ public class TestAddIndexes extends LuceneTestCase { writer = newWriter( aux, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setOpenMode(OpenMode.CREATE). setMergePolicy(newLogMergePolicy(false)) ); @@ -89,14 +89,14 @@ public class TestAddIndexes extends LuceneTestCase { assertEquals(40, writer.maxDoc()); writer.close(); - writer = newWriter(aux2, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + writer = newWriter(aux2, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.CREATE)); // add 50 documents in compound files addDocs2(writer, 50); assertEquals(50, writer.maxDoc()); writer.close(); // test doc count before segments are merged - writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.APPEND)); assertEquals(100, writer.maxDoc()); writer.addIndexes(aux, aux2); assertEquals(190, writer.maxDoc()); @@ -111,14 +111,14 @@ public class TestAddIndexes extends LuceneTestCase { // now add another set in. Directory aux3 = newDirectory(); - writer = newWriter(aux3, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = newWriter(aux3, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); // add 40 documents addDocs(writer, 40); assertEquals(40, writer.maxDoc()); writer.close(); // test doc count before segments are merged - writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.APPEND)); assertEquals(190, writer.maxDoc()); writer.addIndexes(aux3); assertEquals(230, writer.maxDoc()); @@ -132,7 +132,7 @@ public class TestAddIndexes extends LuceneTestCase { verifyTermDocs(dir, new Term("content", "bbb"), 50); // now fully merge it. - writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.APPEND)); writer.forceMerge(1); writer.close(); @@ -145,11 +145,11 @@ public class TestAddIndexes extends LuceneTestCase { // now add a single document Directory aux4 = newDirectory(); - writer = newWriter(aux4, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = newWriter(aux4, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); addDocs2(writer, 1); writer.close(); - writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.APPEND)); assertEquals(230, writer.maxDoc()); writer.addIndexes(aux4); assertEquals(231, writer.maxDoc()); @@ -172,7 +172,7 @@ public class TestAddIndexes extends LuceneTestCase { Directory aux = newDirectory(); setUpDirs(dir, aux); - IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.APPEND)); writer.addIndexes(aux); // Adds 10 docs, then replaces them with another 10 @@ -208,7 +208,7 @@ public class TestAddIndexes extends LuceneTestCase { Directory aux = newDirectory(); setUpDirs(dir, aux); - IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.APPEND)); // Adds 10 docs, then replaces them with another 10 // docs, so 10 pending deletes: @@ -246,7 +246,7 @@ public class TestAddIndexes extends LuceneTestCase { Directory aux = newDirectory(); setUpDirs(dir, aux); - IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.APPEND)); // Adds 10 docs, then replaces them with another 10 // docs, so 10 pending deletes: @@ -286,7 +286,7 @@ public class TestAddIndexes extends LuceneTestCase { IndexWriter writer = null; - writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); // add 100 documents addDocs(writer, 100); assertEquals(100, writer.maxDoc()); @@ -294,7 +294,7 @@ public class TestAddIndexes extends LuceneTestCase { writer = newWriter( aux, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setOpenMode(OpenMode.CREATE). setMaxBufferedDocs(1000). setMergePolicy(newLogMergePolicy(false)) @@ -304,7 +304,7 @@ public class TestAddIndexes extends LuceneTestCase { writer.close(); writer = newWriter( aux, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setOpenMode(OpenMode.CREATE). setMaxBufferedDocs(1000). setMergePolicy(newLogMergePolicy(false)) @@ -312,7 +312,7 @@ public class TestAddIndexes extends LuceneTestCase { addDocs(writer, 100); writer.close(); - writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.APPEND)); try { // cannot add self writer.addIndexes(aux, dir); @@ -342,7 +342,7 @@ public class TestAddIndexes extends LuceneTestCase { IndexWriter writer = newWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setOpenMode(OpenMode.APPEND). setMaxBufferedDocs(10). setMergePolicy(newLogMergePolicy(4)) @@ -371,7 +371,7 @@ public class TestAddIndexes extends LuceneTestCase { IndexWriter writer = newWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setOpenMode(OpenMode.APPEND). setMaxBufferedDocs(9). setMergePolicy(newLogMergePolicy(4)) @@ -400,13 +400,13 @@ public class TestAddIndexes extends LuceneTestCase { IndexWriter writer = newWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setOpenMode(OpenMode.APPEND). setMaxBufferedDocs(10). setMergePolicy(newLogMergePolicy(4)) ); - writer.addIndexes(aux, new MockDirectoryWrapper(random, new RAMDirectory(aux, newIOContext(random)))); + writer.addIndexes(aux, new MockDirectoryWrapper(getRandom(), new RAMDirectory(aux, newIOContext(getRandom())))); assertEquals(1060, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -426,7 +426,7 @@ public class TestAddIndexes extends LuceneTestCase { setUpDirs(dir, aux, true); - IndexWriterConfig dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + IndexWriterConfig dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setMergePolicy(NoMergePolicy.COMPOUND_FILES); IndexWriter writer = new IndexWriter(aux, dontMergeConfig); for (int i = 0; i < 20; i++) { @@ -439,7 +439,7 @@ public class TestAddIndexes extends LuceneTestCase { writer = newWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setOpenMode(OpenMode.APPEND). setMaxBufferedDocs(4). setMergePolicy(newLogMergePolicy(4)) @@ -448,7 +448,7 @@ public class TestAddIndexes extends LuceneTestCase { if (VERBOSE) { System.out.println("\nTEST: now addIndexes"); } - writer.addIndexes(aux, new MockDirectoryWrapper(random, new RAMDirectory(aux, newIOContext(random)))); + writer.addIndexes(aux, new MockDirectoryWrapper(getRandom(), new RAMDirectory(aux, newIOContext(getRandom())))); assertEquals(1020, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -468,7 +468,7 @@ public class TestAddIndexes extends LuceneTestCase { IndexWriter writer = newWriter( aux2, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setOpenMode(OpenMode.CREATE). setMaxBufferedDocs(100). setMergePolicy(newLogMergePolicy(10)) @@ -478,7 +478,7 @@ public class TestAddIndexes extends LuceneTestCase { assertEquals(3, writer.getSegmentCount()); writer.close(); - IndexWriterConfig dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + IndexWriterConfig dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setMergePolicy(NoMergePolicy.COMPOUND_FILES); writer = new IndexWriter(aux, dontMergeConfig); for (int i = 0; i < 27; i++) { @@ -489,7 +489,7 @@ public class TestAddIndexes extends LuceneTestCase { assertEquals(3, reader.numDocs()); reader.close(); - dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setMergePolicy(NoMergePolicy.COMPOUND_FILES); writer = new IndexWriter(aux2, dontMergeConfig); for (int i = 0; i < 8; i++) { @@ -502,7 +502,7 @@ public class TestAddIndexes extends LuceneTestCase { writer = newWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setOpenMode(OpenMode.APPEND). setMaxBufferedDocs(6). setMergePolicy(newLogMergePolicy(4)) @@ -550,7 +550,7 @@ public class TestAddIndexes extends LuceneTestCase { private void verifyTermDocs(Directory dir, Term term, int numDocs) throws IOException { IndexReader reader = IndexReader.open(dir); - DocsEnum docsEnum = _TestUtil.docs(random, reader, term.field, term.bytes, null, null, false); + DocsEnum docsEnum = _TestUtil.docs(getRandom(), reader, term.field, term.bytes, null, null, false); int count = 0; while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) count++; @@ -565,7 +565,7 @@ public class TestAddIndexes extends LuceneTestCase { private void setUpDirs(Directory dir, Directory aux, boolean withID) throws IOException { IndexWriter writer = null; - writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000)); + writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000)); // add 1000 documents in 1 segment if (withID) { addDocsWithID(writer, 1000, 0); @@ -578,7 +578,7 @@ public class TestAddIndexes extends LuceneTestCase { writer = newWriter( aux, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setOpenMode(OpenMode.CREATE). setMaxBufferedDocs(1000). setMergePolicy(newLogMergePolicy(false, 10)) @@ -593,7 +593,7 @@ public class TestAddIndexes extends LuceneTestCase { writer.close(); writer = newWriter( aux, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setOpenMode(OpenMode.APPEND). setMaxBufferedDocs(1000). setMergePolicy(newLogMergePolicy(false, 10)) @@ -612,7 +612,7 @@ public class TestAddIndexes extends LuceneTestCase { lmp.setUseCompoundFile(false); lmp.setMergeFactor(100); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)) + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setMaxBufferedDocs(5).setMergePolicy(lmp)); Document doc = new Document(); @@ -641,7 +641,7 @@ public class TestAddIndexes extends LuceneTestCase { lmp.setUseCompoundFile(false); lmp.setMergeFactor(4); writer = new IndexWriter(dir2, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)) + new MockAnalyzer(getRandom())) .setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(lmp)); writer.addIndexes(dir); writer.close(); @@ -672,16 +672,16 @@ public class TestAddIndexes extends LuceneTestCase { public RunAddIndexesThreads(int numCopy) throws Throwable { NUM_COPY = numCopy; - dir = new MockDirectoryWrapper(random, new RAMDirectory()); + dir = new MockDirectoryWrapper(getRandom(), new RAMDirectory()); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)) + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setMaxBufferedDocs(2)); for (int i = 0; i < NUM_INIT_DOCS; i++) addDoc(writer); writer.close(); dir2 = newDirectory(); - writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); writer2.commit(); @@ -700,7 +700,7 @@ public class TestAddIndexes extends LuceneTestCase { final Directory[] dirs = new Directory[NUM_COPY]; for(int k=0;k= 1) { - final int inc = 1+TestCodecs.random.nextInt(left-1); + if (getRandom().nextInt(3) == 1 && left >= 1) { + final int inc = 1 + getRandom().nextInt(left-1); upto2 += inc; - if (TestCodecs.random.nextInt(2) == 1) { + if (getRandom().nextInt(2) == 1) { doc = docs.advance(term.docs[upto2]); assertEquals(term.docs[upto2], doc); } else { @@ -597,7 +597,7 @@ public class TestCodecs extends LuceneTestCase { assertEquals(term.docs[upto2], doc); if (!field.omitTF) { assertEquals(term.positions[upto2].length, postings.freq()); - if (TestCodecs.random.nextInt(2) == 1) { + if (getRandom().nextInt(2) == 1) { this.verifyPositions(term.positions[upto2], postings); } } @@ -616,9 +616,9 @@ public class TestCodecs extends LuceneTestCase { private void write(final FieldInfos fieldInfos, final Directory dir, final FieldData[] fields, boolean allowPreFlex) throws Throwable { - final int termIndexInterval = _TestUtil.nextInt(random, 13, 27); + final int termIndexInterval = _TestUtil.nextInt(getRandom(), 13, 27); final Codec codec = Codec.getDefault(); - final SegmentWriteState state = new SegmentWriteState(InfoStream.getDefault(), dir, SEGMENT, fieldInfos, 10000, termIndexInterval, codec, null, newIOContext(random)); + final SegmentWriteState state = new SegmentWriteState(InfoStream.getDefault(), dir, SEGMENT, fieldInfos, 10000, termIndexInterval, codec, null, newIOContext(getRandom())); final FieldsConsumer consumer = codec.postingsFormat().fieldsConsumer(state); Arrays.sort(fields); diff --git lucene/core/src/test/org/apache/lucene/index/TestCompoundFile.java lucene/core/src/test/org/apache/lucene/index/TestCompoundFile.java index 7dca18e..57c5391 100644 --- lucene/core/src/test/org/apache/lucene/index/TestCompoundFile.java +++ lucene/core/src/test/org/apache/lucene/index/TestCompoundFile.java @@ -54,7 +54,7 @@ public class TestCompoundFile extends LuceneTestCase private void createRandomFile(Directory dir, String name, int size) throws IOException { - IndexOutput os = dir.createOutput(name, newIOContext(random)); + IndexOutput os = dir.createOutput(name, newIOContext(getRandom())); for (int i=0; i readers = Collections.synchronizedList(new ArrayList()); DirectoryReader firstReader = DirectoryReader.open(dir); DirectoryReader reader = firstReader; - final Random rnd = random; + final Random rnd = getRandom(); ReaderThread[] threads = new ReaderThread[n]; final Set readersToClose = Collections.synchronizedSet(new HashSet()); @@ -301,7 +297,7 @@ public class TestDirectoryReaderReopen extends LuceneTestCase { } } synchronized(this) { - wait(_TestUtil.nextInt(random, 1, 100)); + wait(_TestUtil.nextInt(getRandom(), 1, 100)); } } } @@ -319,7 +315,7 @@ public class TestDirectoryReaderReopen extends LuceneTestCase { } synchronized(this) { - wait(_TestUtil.nextInt(random, 1, 100)); + wait(_TestUtil.nextInt(getRandom(), 1, 100)); } } } @@ -507,20 +503,20 @@ public class TestDirectoryReaderReopen extends LuceneTestCase { if (VERBOSE) { System.out.println("TEST: modify index"); } - IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getStaticRandom()))); w.deleteDocuments(new Term("field2", "a11")); w.deleteDocuments(new Term("field2", "b30")); w.close(); break; } case 1: { - IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getStaticRandom()))); w.forceMerge(1); w.close(); break; } case 2: { - IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getStaticRandom()))); w.addDocument(createDocument(101, 4)); w.forceMerge(1); w.addDocument(createDocument(102, 4)); @@ -529,7 +525,7 @@ public class TestDirectoryReaderReopen extends LuceneTestCase { break; } case 3: { - IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getStaticRandom()))); w.addDocument(createDocument(101, 4)); w.close(); break; @@ -587,7 +583,7 @@ public class TestDirectoryReaderReopen extends LuceneTestCase { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setIndexDeletionPolicy(new KeepAllCommits()). setMaxBufferedDocs(-1). setMergePolicy(newLogMergePolicy(10)) diff --git lucene/core/src/test/org/apache/lucene/index/TestDoc.java lucene/core/src/test/org/apache/lucene/index/TestDoc.java index b84bcad..f1e518a 100644 --- lucene/core/src/test/org/apache/lucene/index/TestDoc.java +++ lucene/core/src/test/org/apache/lucene/index/TestDoc.java @@ -110,7 +110,7 @@ public class TestDoc extends LuceneTestCase { Directory directory = newFSDirectory(indexDir, null); IndexWriter writer = new IndexWriter( directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setOpenMode(OpenMode.CREATE). setMaxBufferedDocs(-1). setMergePolicy(newLogMergePolicy(10)) @@ -145,7 +145,7 @@ public class TestDoc extends LuceneTestCase { directory = newFSDirectory(indexDir, null); writer = new IndexWriter( directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setOpenMode(OpenMode.CREATE). setMaxBufferedDocs(-1). setMergePolicy(newLogMergePolicy(10)) @@ -189,7 +189,7 @@ public class TestDoc extends LuceneTestCase { private SegmentInfo merge(Directory dir, SegmentInfo si1, SegmentInfo si2, String merged, boolean useCompoundFile) throws Exception { - IOContext context = newIOContext(random); + IOContext context = newIOContext(getRandom()); SegmentReader r1 = new SegmentReader(si1, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, context); SegmentReader r2 = new SegmentReader(si2, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, context); @@ -206,7 +206,7 @@ public class TestDoc extends LuceneTestCase { false, codec, fieldInfos); if (useCompoundFile) { - Collection filesToDelete = IndexWriter.createCompoundFile(dir, merged + ".cfs", MergeState.CheckAbort.NONE, info, newIOContext(random)); + Collection filesToDelete = IndexWriter.createCompoundFile(dir, merged + ".cfs", MergeState.CheckAbort.NONE, info, newIOContext(getRandom())); info.setUseCompoundFile(true); for (final String fileToDelete : filesToDelete) si1.dir.deleteFile(fileToDelete); @@ -218,7 +218,7 @@ public class TestDoc extends LuceneTestCase { private void printSegment(PrintWriter out, SegmentInfo si) throws Exception { - SegmentReader reader = new SegmentReader(si, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); + SegmentReader reader = new SegmentReader(si, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(getRandom())); for (int i = 0; i < reader.numDocs(); i++) out.println(reader.document(i)); diff --git lucene/core/src/test/org/apache/lucene/index/TestDocCount.java lucene/core/src/test/org/apache/lucene/index/TestDocCount.java index 9609b42..3d4091f 100644 --- lucene/core/src/test/org/apache/lucene/index/TestDocCount.java +++ lucene/core/src/test/org/apache/lucene/index/TestDocCount.java @@ -34,7 +34,7 @@ public class TestDocCount extends LuceneTestCase { assumeFalse("PreFlex codec does not support docCount statistic!", "Lucene3x".equals(Codec.getDefault().getName())); Directory dir = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, dir); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), dir); int numDocs = atLeast(100); for (int i = 0; i < numDocs; i++) { iw.addDocument(doc()); @@ -52,9 +52,9 @@ public class TestDocCount extends LuceneTestCase { private Document doc() { Document doc = new Document(); - int numFields = _TestUtil.nextInt(random, 1, 10); + int numFields = _TestUtil.nextInt(getRandom(), 1, 10); for (int i = 0; i < numFields; i++) { - doc.add(newField("" + _TestUtil.nextInt(random, 'a', 'z'), "" + _TestUtil.nextInt(random, 'a', 'z'), StringField.TYPE_UNSTORED)); + doc.add(newField("" + _TestUtil.nextInt(getRandom(), 'a', 'z'), "" + _TestUtil.nextInt(getRandom(), 'a', 'z'), StringField.TYPE_UNSTORED)); } return doc; } @@ -75,7 +75,7 @@ public class TestDocCount extends LuceneTestCase { FixedBitSet visited = new FixedBitSet(ir.maxDoc()); TermsEnum te = terms.iterator(null); while (te.next() != null) { - DocsEnum de = _TestUtil.docs(random, te, null, null, false); + DocsEnum de = _TestUtil.docs(getRandom(), te, null, null, false); while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { visited.set(de.docID()); } diff --git lucene/core/src/test/org/apache/lucene/index/TestDocTermOrds.java lucene/core/src/test/org/apache/lucene/index/TestDocTermOrds.java index b2e14c8..36c4c67 100644 --- lucene/core/src/test/org/apache/lucene/index/TestDocTermOrds.java +++ lucene/core/src/test/org/apache/lucene/index/TestDocTermOrds.java @@ -50,7 +50,7 @@ public class TestDocTermOrds extends LuceneTestCase { public void testSimple() throws Exception { Directory dir = newDirectory(); - final RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + final RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergePolicy(newLogMergePolicy())); Document doc = new Document(); Field field = newField("field", "", TextField.TYPE_UNSTORED); doc.add(field); @@ -96,7 +96,7 @@ public class TestDocTermOrds extends LuceneTestCase { final int NUM_TERMS = atLeast(20); final Set terms = new HashSet(); while(terms.size() < NUM_TERMS) { - final String s = _TestUtil.randomRealisticUnicodeString(random); + final String s = _TestUtil.randomRealisticUnicodeString(getRandom()); //final String s = _TestUtil.randomSimpleString(random); if (s.length() > 0) { terms.add(new BytesRef(s)); @@ -107,16 +107,16 @@ public class TestDocTermOrds extends LuceneTestCase { final int NUM_DOCS = atLeast(100); - IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())); // Sometimes swap in codec that impls ord(): - if (random.nextInt(10) == 7) { + if (getRandom().nextInt(10) == 7) { // Make sure terms index has ords: Codec codec = _TestUtil.alwaysPostingsFormat(PostingsFormat.forName("Lucene40WithOrds")); conf.setCodec(codec); } - final RandomIndexWriter w = new RandomIndexWriter(random, dir, conf); + final RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir, conf); final int[][] idToOrds = new int[NUM_DOCS][]; final Set ordsForDocSet = new HashSet(); @@ -126,9 +126,9 @@ public class TestDocTermOrds extends LuceneTestCase { doc.add(new IntField("id", id)); - final int termCount = _TestUtil.nextInt(random, 0, 20*RANDOM_MULTIPLIER); + final int termCount = _TestUtil.nextInt(getRandom(), 0, 20*RANDOM_MULTIPLIER); while(ordsForDocSet.size() < termCount) { - ordsForDocSet.add(random.nextInt(termsArray.length)); + ordsForDocSet.add(getRandom().nextInt(termsArray.length)); } final int[] ordsForDoc = new int[termCount]; int upto = 0; @@ -181,12 +181,12 @@ public class TestDocTermOrds extends LuceneTestCase { MockDirectoryWrapper dir = newDirectory(); final Set prefixes = new HashSet(); - final int numPrefix = _TestUtil.nextInt(random, 2, 7); + final int numPrefix = _TestUtil.nextInt(getRandom(), 2, 7); if (VERBOSE) { System.out.println("TEST: use " + numPrefix + " prefixes"); } while(prefixes.size() < numPrefix) { - prefixes.add(_TestUtil.randomRealisticUnicodeString(random)); + prefixes.add(_TestUtil.randomRealisticUnicodeString(getRandom())); //prefixes.add(_TestUtil.randomSimpleString(random)); } final String[] prefixesArray = prefixes.toArray(new String[prefixes.size()]); @@ -194,7 +194,7 @@ public class TestDocTermOrds extends LuceneTestCase { final int NUM_TERMS = atLeast(20); final Set terms = new HashSet(); while(terms.size() < NUM_TERMS) { - final String s = prefixesArray[random.nextInt(prefixesArray.length)] + _TestUtil.randomRealisticUnicodeString(random); + final String s = prefixesArray[getRandom().nextInt(prefixesArray.length)] + _TestUtil.randomRealisticUnicodeString(getRandom()); //final String s = prefixesArray[random.nextInt(prefixesArray.length)] + _TestUtil.randomSimpleString(random); if (s.length() > 0) { terms.add(new BytesRef(s)); @@ -205,15 +205,15 @@ public class TestDocTermOrds extends LuceneTestCase { final int NUM_DOCS = atLeast(100); - IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())); // Sometimes swap in codec that impls ord(): - if (random.nextInt(10) == 7) { + if (getRandom().nextInt(10) == 7) { Codec codec = _TestUtil.alwaysPostingsFormat(PostingsFormat.forName("Lucene40WithOrds")); conf.setCodec(codec); } - final RandomIndexWriter w = new RandomIndexWriter(random, dir, conf); + final RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir, conf); final int[][] idToOrds = new int[NUM_DOCS][]; final Set ordsForDocSet = new HashSet(); @@ -223,9 +223,9 @@ public class TestDocTermOrds extends LuceneTestCase { doc.add(new IntField("id", id)); - final int termCount = _TestUtil.nextInt(random, 0, 20*RANDOM_MULTIPLIER); + final int termCount = _TestUtil.nextInt(getRandom(), 0, 20*RANDOM_MULTIPLIER); while(ordsForDocSet.size() < termCount) { - ordsForDocSet.add(random.nextInt(termsArray.length)); + ordsForDocSet.add(getRandom().nextInt(termsArray.length)); } final int[] ordsForDoc = new int[termCount]; int upto = 0; @@ -302,7 +302,7 @@ public class TestDocTermOrds extends LuceneTestCase { "field", prefixRef, Integer.MAX_VALUE, - _TestUtil.nextInt(random, 2, 10)); + _TestUtil.nextInt(getRandom(), 2, 10)); final int[] docIDToID = FieldCache.DEFAULT.getInts(r, "id", false); diff --git lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java index 6c3b2fc..d9e721e 100644 --- lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java +++ lucene/core/src/test/org/apache/lucene/index/TestDocValuesIndexing.java @@ -108,30 +108,30 @@ public class TestDocValuesIndexing extends LuceneTestCase { } public void testIndexBytesNoDeletes() throws IOException { - runTestIndexBytes(writerConfig(random.nextBoolean()), false); + runTestIndexBytes(writerConfig(getRandom().nextBoolean()), false); } public void testIndexBytesDeletes() throws IOException { - runTestIndexBytes(writerConfig(random.nextBoolean()), true); + runTestIndexBytes(writerConfig(getRandom().nextBoolean()), true); } public void testIndexNumericsNoDeletes() throws IOException { - runTestNumerics(writerConfig(random.nextBoolean()), false); + runTestNumerics(writerConfig(getRandom().nextBoolean()), false); } public void testIndexNumericsDeletes() throws IOException { - runTestNumerics(writerConfig(random.nextBoolean()), true); + runTestNumerics(writerConfig(getRandom().nextBoolean()), true); } public void testAddIndexes() throws IOException { int valuesPerIndex = 10; List values = Arrays.asList(Type.values()); - Collections.shuffle(values, random); + Collections.shuffle(values, getRandom()); Type first = values.get(0); Type second = values.get(1); // index first index Directory d_1 = newDirectory(); - IndexWriter w_1 = new IndexWriter(d_1, writerConfig(random.nextBoolean())); + IndexWriter w_1 = new IndexWriter(d_1, writerConfig(getRandom().nextBoolean())); indexValues(w_1, valuesPerIndex, first, values, false, 7); w_1.commit(); assertEquals(valuesPerIndex, w_1.maxDoc()); @@ -139,17 +139,17 @@ public class TestDocValuesIndexing extends LuceneTestCase { // index second index Directory d_2 = newDirectory(); - IndexWriter w_2 = new IndexWriter(d_2, writerConfig(random.nextBoolean())); + IndexWriter w_2 = new IndexWriter(d_2, writerConfig(getRandom().nextBoolean())); indexValues(w_2, valuesPerIndex, second, values, false, 7); w_2.commit(); assertEquals(valuesPerIndex, w_2.maxDoc()); _TestUtil.checkIndex(d_2); Directory target = newDirectory(); - IndexWriter w = new IndexWriter(target, writerConfig(random.nextBoolean())); + IndexWriter w = new IndexWriter(target, writerConfig(getRandom().nextBoolean())); IndexReader r_1 = IndexReader.open(w_1, true); IndexReader r_2 = IndexReader.open(w_2, true); - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { w.addIndexes(d_1, d_2); } else { w.addIndexes(r_1, r_2); @@ -237,8 +237,8 @@ public class TestDocValuesIndexing extends LuceneTestCase { private IndexWriterConfig writerConfig(boolean useCompoundFile) { final IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)); - cfg.setMergePolicy(newLogMergePolicy(random)); + new MockAnalyzer(getRandom())); + cfg.setMergePolicy(newLogMergePolicy(getRandom())); LogMergePolicy policy = new LogDocMergePolicy(); cfg.setMergePolicy(policy); policy.setUseCompoundFile(useCompoundFile); @@ -254,7 +254,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { final List numVariantList = new ArrayList(NUMERICS); // run in random order to test if fill works correctly during merges - Collections.shuffle(numVariantList, random); + Collections.shuffle(numVariantList, getRandom()); for (Type val : numVariantList) { FixedBitSet deleted = indexValues(w, numValues, val, numVariantList, withDeletions, 7); @@ -330,7 +330,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { IndexWriter w = new IndexWriter(d, cfg); final List byteVariantList = new ArrayList(BYTES); // run in random order to test if fill works correctly during merges - Collections.shuffle(byteVariantList, random); + Collections.shuffle(byteVariantList, getRandom()); final int numValues = 50 + atLeast(10); for (Type byteIndexValue : byteVariantList) { List closeables = new ArrayList(); @@ -413,11 +413,11 @@ public class TestDocValuesIndexing extends LuceneTestCase { public void testGetArrayNumerics() throws CorruptIndexException, IOException { Directory d = newDirectory(); - IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())); IndexWriter w = new IndexWriter(d, cfg); final int numValues = 50 + atLeast(10); final List numVariantList = new ArrayList(NUMERICS); - Collections.shuffle(numVariantList, random); + Collections.shuffle(numVariantList, getRandom()); for (Type val : numVariantList) { indexValues(w, numValues, val, numVariantList, false, 7); @@ -497,7 +497,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { public void testGetArrayBytes() throws CorruptIndexException, IOException { Directory d = newDirectory(); IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)); + new MockAnalyzer(getRandom())); IndexWriter w = new IndexWriter(d, cfg); final int numValues = 50 + atLeast(10); // only single byte fixed straight supports getArray() @@ -536,7 +536,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { @SuppressWarnings("fallthrough") private Source getSource(DocValues values) throws IOException { // getSource uses cache internally - switch(random.nextInt(5)) { + switch(getRandom().nextInt(5)) { case 3: return values.load(); case 2: @@ -650,17 +650,17 @@ public class TestDocValuesIndexing extends LuceneTestCase { w.addDocument(doc); if (i % 7 == 0) { - if (withDeletions && random.nextBoolean()) { - Type val = valueVarList.get(random.nextInt(1 + valueVarList + if (withDeletions && getRandom().nextBoolean()) { + Type val = valueVarList.get(getRandom().nextInt(1 + valueVarList .indexOf(valueType))); - final int randInt = val == valueType ? random.nextInt(1 + i) : random + final int randInt = val == valueType ? getRandom().nextInt(1 + i) : getRandom() .nextInt(numValues); w.deleteDocuments(new Term("id", val.name() + "_" + randInt)); if (val == valueType) { deleted.set(randInt); } } - if (random.nextInt(10) == 0) { + if (getRandom().nextInt(10) == 0) { w.commit(); } } @@ -668,7 +668,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { w.commit(); // TODO test multi seg with deletions - if (withDeletions || random.nextBoolean()) { + if (withDeletions || getRandom().nextBoolean()) { w.forceMerge(1, true); } return deleted; @@ -676,7 +676,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { public void testMultiValuedDocValuesField() throws Exception { Directory d = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, d); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), d); Document doc = new Document(); DocValuesField f = new DocValuesField("field", 17, Type.VAR_INTS); // Index doc values are single-valued so we should not @@ -703,7 +703,7 @@ public class TestDocValuesIndexing extends LuceneTestCase { public void testDifferentTypedDocValuesField() throws Exception { Directory d = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, d); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), d); Document doc = new Document(); // Index doc values are single-valued so we should not // be able to add same field more than once: @@ -734,17 +734,17 @@ public class TestDocValuesIndexing extends LuceneTestCase { boolean fixed = type == Type.BYTES_FIXED_SORTED; final Directory d = newDirectory(); IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)); + new MockAnalyzer(getRandom())); IndexWriter w = new IndexWriter(d, cfg); int numDocs = atLeast(100); BytesRefHash hash = new BytesRefHash(); Map docToString = new HashMap(); - int len = 1 + random.nextInt(50); + int len = 1 + getRandom().nextInt(50); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.add(newField("id", "" + i, TextField.TYPE_STORED)); - String string =fixed ? _TestUtil.randomFixedByteLengthUnicodeString(random, - len) : _TestUtil.randomRealisticUnicodeString(random, 1, len); + String string =fixed ? _TestUtil.randomFixedByteLengthUnicodeString(getRandom(), + len) : _TestUtil.randomRealisticUnicodeString(getRandom(), 1, len); BytesRef br = new BytesRef(string); doc.add(new DocValuesField("field", br, type)); hash.add(br); @@ -771,8 +771,8 @@ public class TestDocValuesIndexing extends LuceneTestCase { Document doc = new Document(); String id = "" + i + numDocs; doc.add(newField("id", id, TextField.TYPE_STORED)); - String string = fixed ? _TestUtil.randomFixedByteLengthUnicodeString(random, - len) : _TestUtil.randomRealisticUnicodeString(random, 1, len); + String string = fixed ? _TestUtil.randomFixedByteLengthUnicodeString(getRandom(), + len) : _TestUtil.randomRealisticUnicodeString(getRandom(), 1, len); BytesRef br = new BytesRef(string); hash.add(br); docToString.put(id, string); diff --git lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java index 4d31a7e..a9fe06e 100644 --- lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java +++ lucene/core/src/test/org/apache/lucene/index/TestDocsAndPositions.java @@ -38,7 +38,7 @@ public class TestDocsAndPositions extends LuceneTestCase { @Override public void setUp() throws Exception { super.setUp(); - fieldName = "field" + random.nextInt(); + fieldName = "field" + getRandom().nextInt(); } /** @@ -46,8 +46,8 @@ public class TestDocsAndPositions extends LuceneTestCase { */ public void testPositionsSimple() throws IOException { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); for (int i = 0; i < 39; i++) { Document doc = new Document(); FieldType customType = new FieldType(TextField.TYPE_UNSTORED); @@ -72,7 +72,7 @@ public class TestDocsAndPositions extends LuceneTestCase { if (atomicReaderContext.reader().maxDoc() == 0) { continue; } - final int advance = docsAndPosEnum.advance(random.nextInt(atomicReaderContext.reader().maxDoc())); + final int advance = docsAndPosEnum.advance(getRandom().nextInt(atomicReaderContext.reader().maxDoc())); do { String msg = "Advanced to: " + advance + " current doc: " + docsAndPosEnum.docID(); // TODO: + " usePayloads: " + usePayload; @@ -104,11 +104,11 @@ public class TestDocsAndPositions extends LuceneTestCase { */ public void testRandomPositions() throws IOException { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergePolicy(newLogMergePolicy())); int numDocs = atLeast(47); int max = 1051; - int term = random.nextInt(max); + int term = getRandom().nextInt(max); Integer[][] positionsInDoc = new Integer[numDocs][]; FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setOmitNorms(true); @@ -118,7 +118,7 @@ public class TestDocsAndPositions extends LuceneTestCase { StringBuilder builder = new StringBuilder(); int num = atLeast(131); for (int j = 0; j < num; j++) { - int nextInt = random.nextInt(max); + int nextInt = getRandom().nextInt(max); builder.append(nextInt).append(" "); if (nextInt == term) { positions.add(Integer.valueOf(j)); @@ -148,10 +148,10 @@ public class TestDocsAndPositions extends LuceneTestCase { int initDoc = 0; int maxDoc = atomicReaderContext.reader().maxDoc(); // initially advance or do next doc - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { initDoc = docsAndPosEnum.nextDoc(); } else { - initDoc = docsAndPosEnum.advance(random.nextInt(maxDoc)); + initDoc = docsAndPosEnum.advance(getRandom().nextInt(maxDoc)); } // now run through the scorer and check if all positions are there... do { @@ -163,8 +163,8 @@ public class TestDocsAndPositions extends LuceneTestCase { assertEquals(pos.length, docsAndPosEnum.freq()); // number of positions read should be random - don't read all of them // allways - final int howMany = random.nextInt(20) == 0 ? pos.length - - random.nextInt(pos.length) : pos.length; + final int howMany = getRandom().nextInt(20) == 0 ? pos.length + - getRandom().nextInt(pos.length) : pos.length; for (int j = 0; j < howMany; j++) { assertEquals("iteration: " + i + " initDoc: " + initDoc + " doc: " + docID + " base: " + atomicReaderContext.docBase @@ -172,9 +172,9 @@ public class TestDocsAndPositions extends LuceneTestCase { + usePayload*/, pos[j].intValue(), docsAndPosEnum.nextPosition()); } - if (random.nextInt(10) == 0) { // once is a while advance + if (getRandom().nextInt(10) == 0) { // once is a while advance docsAndPosEnum - .advance(docID + 1 + random.nextInt((maxDoc - docID))); + .advance(docID + 1 + getRandom().nextInt((maxDoc - docID))); } } while (docsAndPosEnum.nextDoc() != DocsAndPositionsEnum.NO_MORE_DOCS); @@ -187,11 +187,11 @@ public class TestDocsAndPositions extends LuceneTestCase { public void testRandomDocs() throws IOException { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergePolicy(newLogMergePolicy())); int numDocs = atLeast(49); int max = 15678; - int term = random.nextInt(max); + int term = getRandom().nextInt(max); int[] freqInDoc = new int[numDocs]; FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setOmitNorms(true); @@ -199,7 +199,7 @@ public class TestDocsAndPositions extends LuceneTestCase { Document doc = new Document(); StringBuilder builder = new StringBuilder(); for (int j = 0; j < 199; j++) { - int nextInt = random.nextInt(max); + int nextInt = getRandom().nextInt(max); builder.append(nextInt).append(' '); if (nextInt == term) { freqInDoc[i]++; @@ -219,7 +219,7 @@ public class TestDocsAndPositions extends LuceneTestCase { AtomicReaderContext[] leaves = topReaderContext.leaves(); for (AtomicReaderContext context : leaves) { int maxDoc = context.reader().maxDoc(); - DocsEnum docsEnum = _TestUtil.docs(random, context.reader(), fieldName, bytes, null, null, true); + DocsEnum docsEnum = _TestUtil.docs(getRandom(), context.reader(), fieldName, bytes, null, null, true); if (findNext(freqInDoc, context.docBase, context.docBase + maxDoc) == Integer.MAX_VALUE) { assertNull(docsEnum); continue; @@ -230,7 +230,7 @@ public class TestDocsAndPositions extends LuceneTestCase { if (freqInDoc[context.docBase + j] != 0) { assertEquals(j, docsEnum.docID()); assertEquals(docsEnum.freq(), freqInDoc[context.docBase +j]); - if (i % 2 == 0 && random.nextInt(10) == 0) { + if (i % 2 == 0 && getRandom().nextInt(10) == 0) { int next = findNext(freqInDoc, context.docBase+j+1, context.docBase + maxDoc) - context.docBase; int advancedTo = docsEnum.advance(next); if (next >= maxDoc) { @@ -267,8 +267,8 @@ public class TestDocsAndPositions extends LuceneTestCase { */ public void testLargeNumberOfPositions() throws IOException { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); int howMany = 1000; FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setOmitNorms(true); @@ -304,10 +304,10 @@ public class TestDocsAndPositions extends LuceneTestCase { int initDoc = 0; int maxDoc = atomicReaderContext.reader().maxDoc(); // initially advance or do next doc - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { initDoc = docsAndPosEnum.nextDoc(); } else { - initDoc = docsAndPosEnum.advance(random.nextInt(maxDoc)); + initDoc = docsAndPosEnum.advance(getRandom().nextInt(maxDoc)); } String msg = "Iteration: " + i + " initDoc: " + initDoc; // TODO: + " payloads: " + usePayload; assertEquals(howMany / 2, docsAndPosEnum.freq()); @@ -324,13 +324,13 @@ public class TestDocsAndPositions extends LuceneTestCase { public void testDocsEnumStart() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); Document doc = new Document(); doc.add(newField("foo", "bar", StringField.TYPE_UNSTORED)); writer.addDocument(doc); DirectoryReader reader = writer.getReader(); AtomicReader r = getOnlySegmentReader(reader); - DocsEnum disi = _TestUtil.docs(random, r, "foo", new BytesRef("bar"), null, null, false); + DocsEnum disi = _TestUtil.docs(getRandom(), r, "foo", new BytesRef("bar"), null, null, false); int docid = disi.docID(); assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS); assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); @@ -338,7 +338,7 @@ public class TestDocsAndPositions extends LuceneTestCase { // now reuse and check again TermsEnum te = r.terms("foo").iterator(null); assertTrue(te.seekExact(new BytesRef("bar"), true)); - disi = _TestUtil.docs(random, te, null, disi, false); + disi = _TestUtil.docs(getRandom(), te, null, disi, false); docid = disi.docID(); assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS); assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); @@ -349,7 +349,7 @@ public class TestDocsAndPositions extends LuceneTestCase { public void testDocsAndPositionsEnumStart() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); Document doc = new Document(); doc.add(newField("foo", "bar", TextField.TYPE_UNSTORED)); writer.addDocument(doc); diff --git lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java index fb684e7..b152765 100644 --- lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java +++ lucene/core/src/test/org/apache/lucene/index/TestDocumentWriter.java @@ -58,13 +58,13 @@ public class TestDocumentWriter extends LuceneTestCase { public void testAddDocument() throws Exception { Document testDoc = new Document(); DocHelper.setupDoc(testDoc); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); writer.addDocument(testDoc); writer.commit(); SegmentInfo info = writer.newestSegment(); writer.close(); //After adding the document, we should be able to read it back in - SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); + SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(getRandom())); assertTrue(reader != null); Document doc = reader.document(0); assertTrue(doc != null); @@ -125,7 +125,7 @@ public class TestDocumentWriter extends LuceneTestCase { writer.commit(); SegmentInfo info = writer.newestSegment(); writer.close(); - SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); + SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(getRandom())); DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, MultiFields.getLiveDocs(reader), "repeated", new BytesRef("repeated"), false); @@ -197,7 +197,7 @@ public class TestDocumentWriter extends LuceneTestCase { writer.commit(); SegmentInfo info = writer.newestSegment(); writer.close(); - SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); + SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(getRandom())); DocsAndPositionsEnum termPositions = MultiFields.getTermPositionsEnum(reader, reader.getLiveDocs(), "f1", new BytesRef("a"), false); assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS); @@ -215,7 +215,7 @@ public class TestDocumentWriter extends LuceneTestCase { public void testPreAnalyzedField() throws IOException { IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); Document doc = new Document(); doc.add(new TextField("preanalyzed", new TokenStream() { @@ -241,7 +241,7 @@ public class TestDocumentWriter extends LuceneTestCase { writer.commit(); SegmentInfo info = writer.newestSegment(); writer.close(); - SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); + SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(getRandom())); DocsAndPositionsEnum termPositions = reader.termPositionsEnum(reader.getLiveDocs(), "preanalyzed", new BytesRef("term1"), false); assertTrue(termPositions.nextDoc() != termPositions.NO_MORE_DOCS); @@ -279,7 +279,7 @@ public class TestDocumentWriter extends LuceneTestCase { doc.add(newField("f2", "v2", StringField.TYPE_STORED)); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); writer.addDocument(doc); writer.close(); @@ -319,7 +319,7 @@ public class TestDocumentWriter extends LuceneTestCase { doc.add(newField("f2", "v2", customType2)); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); writer.addDocument(doc); writer.forceMerge(1); // be sure to have a single segment writer.close(); diff --git lucene/core/src/test/org/apache/lucene/index/TestDocumentsWriterDeleteQueue.java lucene/core/src/test/org/apache/lucene/index/TestDocumentsWriterDeleteQueue.java index ec21cc8..192029a 100644 --- lucene/core/src/test/org/apache/lucene/index/TestDocumentsWriterDeleteQueue.java +++ lucene/core/src/test/org/apache/lucene/index/TestDocumentsWriterDeleteQueue.java @@ -36,10 +36,10 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { public void testUpdateDelteSlices() { DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue(); - final int size = 200 + random.nextInt(500) * RANDOM_MULTIPLIER; + final int size = 200 + getRandom().nextInt(500) * RANDOM_MULTIPLIER; Integer[] ids = new Integer[size]; for (int i = 0; i < ids.length; i++) { - ids[i] = random.nextInt(); + ids[i] = getRandom().nextInt(); } DeleteSlice slice1 = queue.newSlice(); DeleteSlice slice2 = queue.newSlice(); @@ -54,14 +54,14 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { Term[] term = new Term[] {new Term("id", i.toString())}; uniqueValues.add(term[0]); queue.addDelete(term); - if (random.nextInt(20) == 0 || j == ids.length - 1) { + if (getRandom().nextInt(20) == 0 || j == ids.length - 1) { queue.updateSlice(slice1); assertTrue(slice1.isTailItem(term)); slice1.apply(bd1, j); assertAllBetween(last1, j, bd1, ids); last1 = j + 1; } - if (random.nextInt(10) == 5 || j == ids.length - 1) { + if (getRandom().nextInt(10) == 5 || j == ids.length - 1) { queue.updateSlice(slice2); assertTrue(slice2.isTailItem(term)); slice2.apply(bd2, j); @@ -96,12 +96,12 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { assertFalse(queue.anyChanges()); queue.clear(); assertFalse(queue.anyChanges()); - final int size = 200 + random.nextInt(500) * RANDOM_MULTIPLIER; + final int size = 200 + getRandom().nextInt(500) * RANDOM_MULTIPLIER; int termsSinceFreeze = 0; int queriesSinceFreeze = 0; for (int i = 0; i < size; i++) { Term term = new Term("id", "" + i); - if (random.nextInt(10) == 0) { + if (getRandom().nextInt(10) == 0) { queue.addDelete(new TermQuery(term)); queriesSinceFreeze++; } else { @@ -109,7 +109,7 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { termsSinceFreeze++; } assertTrue(queue.anyChanges()); - if (random.nextInt(10) == 0) { + if (getRandom().nextInt(10) == 0) { queue.clear(); queue.tryApplyGlobalSlice(); assertFalse(queue.anyChanges()); @@ -120,12 +120,12 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { public void testAnyChanges() { DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue(); - final int size = 200 + random.nextInt(500) * RANDOM_MULTIPLIER; + final int size = 200 + getRandom().nextInt(500) * RANDOM_MULTIPLIER; int termsSinceFreeze = 0; int queriesSinceFreeze = 0; for (int i = 0; i < size; i++) { Term term = new Term("id", "" + i); - if (random.nextInt(10) == 0) { + if (getRandom().nextInt(10) == 0) { queue.addDelete(new TermQuery(term)); queriesSinceFreeze++; } else { @@ -133,7 +133,7 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { termsSinceFreeze++; } assertTrue(queue.anyChanges()); - if (random.nextInt(5) == 0) { + if (getRandom().nextInt(5) == 0) { FrozenBufferedDeletes freezeGlobalBuffer = queue .freezeGlobalBuffer(null); assertEquals(termsSinceFreeze, freezeGlobalBuffer.termCount); @@ -174,15 +174,15 @@ public class TestDocumentsWriterDeleteQueue extends LuceneTestCase { public void testStressDeleteQueue() throws InterruptedException { DocumentsWriterDeleteQueue queue = new DocumentsWriterDeleteQueue(); Set uniqueValues = new HashSet(); - final int size = 10000 + random.nextInt(500) * RANDOM_MULTIPLIER; + final int size = 10000 + getRandom().nextInt(500) * RANDOM_MULTIPLIER; Integer[] ids = new Integer[size]; for (int i = 0; i < ids.length; i++) { - ids[i] = random.nextInt(); + ids[i] = getRandom().nextInt(); uniqueValues.add(new Term("id", ids[i].toString())); } CountDownLatch latch = new CountDownLatch(1); AtomicInteger index = new AtomicInteger(0); - final int numThreads = 2 + random.nextInt(5); + final int numThreads = 2 + getRandom().nextInt(5); UpdateThread[] threads = new UpdateThread[numThreads]; for (int i = 0; i < threads.length; i++) { threads[i] = new UpdateThread(queue, index, ids, latch); diff --git lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java index 989eda6..a89431e 100644 --- lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java +++ lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java @@ -66,11 +66,11 @@ public class TestDuelingCodecs extends LuceneTestCase { // so this would make assertEquals complicated. leftCodec = Codec.forName("SimpleText"); - rightCodec = new RandomCodec(random, false); + rightCodec = new RandomCodec(getRandom(), false); leftDir = newDirectory(); rightDir = newDirectory(); - long seed = random.nextLong(); + long seed = getRandom().nextLong(); // must use same seed because of random payloads, etc Analyzer leftAnalyzer = new MockAnalyzer(new Random(seed)); @@ -209,7 +209,7 @@ public class TestDuelingCodecs extends LuceneTestCase { if (deep) { int numIntersections = atLeast(3); for (int i = 0; i < numIntersections; i++) { - String re = AutomatonTestUtil.randomRegexp(random); + String re = AutomatonTestUtil.randomRegexp(getRandom()); CompiledAutomaton automaton = new CompiledAutomaton(new RegExp(re, RegExp.NONE).toAutomaton()); if (automaton.type == CompiledAutomaton.AUTOMATON_TYPE.NORMAL) { // TODO: test start term too @@ -246,7 +246,7 @@ public class TestDuelingCodecs extends LuceneTestCase { */ public void assertTermsEnum(TermsEnum leftTermsEnum, TermsEnum rightTermsEnum, boolean deep) throws Exception { BytesRef term; - Bits randomBits = new RandomBits(leftReader.maxDoc(), random.nextDouble(), random); + Bits randomBits = new RandomBits(leftReader.maxDoc(), getRandom().nextDouble(), getRandom()); DocsAndPositionsEnum leftPositions = null; DocsAndPositionsEnum rightPositions = null; DocsEnum leftDocs = null; @@ -380,13 +380,13 @@ public class TestDuelingCodecs extends LuceneTestCase { int skipInterval = 16; while (true) { - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { // nextDoc() docid = leftDocs.nextDoc(); assertEquals(info, docid, rightDocs.nextDoc()); } else { // advance() - int skip = docid + (int) Math.ceil(Math.abs(skipInterval + random.nextGaussian() * averageGap)); + int skip = docid + (int) Math.ceil(Math.abs(skipInterval + getRandom().nextGaussian() * averageGap)); docid = leftDocs.advance(skip); assertEquals(info, docid, rightDocs.advance(skip)); } @@ -415,13 +415,13 @@ public class TestDuelingCodecs extends LuceneTestCase { int skipInterval = 16; while (true) { - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { // nextDoc() docid = leftDocs.nextDoc(); assertEquals(info, docid, rightDocs.nextDoc()); } else { // advance() - int skip = docid + (int) Math.ceil(Math.abs(skipInterval + random.nextGaussian() * averageGap)); + int skip = docid + (int) Math.ceil(Math.abs(skipInterval + getRandom().nextGaussian() * averageGap)); docid = leftDocs.advance(skip); assertEquals(info, docid, rightDocs.advance(skip)); } diff --git lucene/core/src/test/org/apache/lucene/index/TestFieldInfos.java lucene/core/src/test/org/apache/lucene/index/TestFieldInfos.java index cf32981..3b239c2 100644 --- lucene/core/src/test/org/apache/lucene/index/TestFieldInfos.java +++ lucene/core/src/test/org/apache/lucene/index/TestFieldInfos.java @@ -52,7 +52,7 @@ public class TestFieldInfos extends LuceneTestCase { assertTrue(fieldInfos.size() == DocHelper.all.size()); //this is all b/c we are using the no-arg constructor - IndexOutput output = dir.createOutput(filename, newIOContext(random)); + IndexOutput output = dir.createOutput(filename, newIOContext(getRandom())); assertTrue(output != null); //Use a RAMOutputStream @@ -120,34 +120,34 @@ public class TestFieldInfos extends LuceneTestCase { } try { - readOnly.addOrUpdate("bogus", random.nextBoolean()); + readOnly.addOrUpdate("bogus", getRandom().nextBoolean()); fail("instance should be read only"); } catch (IllegalStateException e) { // expected } try { - readOnly.addOrUpdate("bogus", random.nextBoolean(), random.nextBoolean()); + readOnly.addOrUpdate("bogus", getRandom().nextBoolean(), getRandom().nextBoolean()); fail("instance should be read only"); } catch (IllegalStateException e) { // expected } try { - readOnly.addOrUpdate("bogus", random.nextBoolean(), random.nextBoolean(), - random.nextBoolean()); + readOnly.addOrUpdate("bogus", getRandom().nextBoolean(), getRandom().nextBoolean(), + getRandom().nextBoolean()); fail("instance should be read only"); } catch (IllegalStateException e) { // expected } try { - readOnly.addOrUpdate("bogus", random.nextBoolean(), random.nextBoolean(), - random.nextBoolean(), - random.nextBoolean(), random.nextBoolean() ? IndexOptions.DOCS_ONLY : IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, null, null); + readOnly.addOrUpdate("bogus", getRandom().nextBoolean(), getRandom().nextBoolean(), + getRandom().nextBoolean(), + getRandom().nextBoolean(), getRandom().nextBoolean() ? IndexOptions.DOCS_ONLY : IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, null, null); fail("instance should be read only"); } catch (IllegalStateException e) { // expected } try { - readOnly.addOrUpdate(Arrays.asList("a", "b", "c"), random.nextBoolean()); + readOnly.addOrUpdate(Arrays.asList("a", "b", "c"), getRandom().nextBoolean()); fail("instance should be read only"); } catch (IllegalStateException e) { // expected diff --git lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java index af66f9d..9460885 100644 --- lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java +++ lucene/core/src/test/org/apache/lucene/index/TestFieldsReader.java @@ -57,7 +57,7 @@ public class TestFieldsReader extends LuceneTestCase { DocHelper.setupDoc(testDoc); _TestUtil.add(testDoc, fieldInfos); dir = newDirectory(); - IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()); + IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getStaticRandom())).setMergePolicy(newLogMergePolicy()); ((LogMergePolicy) conf.getMergePolicy()).setUseCompoundFile(false); IndexWriter writer = new IndexWriter(dir, conf); writer.addDocument(testDoc); @@ -195,7 +195,7 @@ public class TestFieldsReader extends LuceneTestCase { try { Directory dir = new FaultyFSDirectory(indexDir); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.CREATE)); for(int i=0;i<2;i++) writer.addDocument(testDoc); writer.forceMerge(1); @@ -232,7 +232,7 @@ public class TestFieldsReader extends LuceneTestCase { public void testNumericField() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir); final int numDocs = atLeast(500); final Number[] answers = new Number[numDocs]; final NumericType[] typeAnswers = new NumericType[numDocs]; @@ -242,16 +242,16 @@ public class TestFieldsReader extends LuceneTestCase { final Field sf; final Number answer; final NumericType typeAnswer; - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { // float/double - if (random.nextBoolean()) { - final float f = random.nextFloat(); + if (getRandom().nextBoolean()) { + final float f = getRandom().nextFloat(); answer = Float.valueOf(f); nf = new FloatField("nf", f); sf = new StoredField("nf", f); typeAnswer = NumericType.FLOAT; } else { - final double d = random.nextDouble(); + final double d = getRandom().nextDouble(); answer = Double.valueOf(d); nf = new DoubleField("nf", d); sf = new StoredField("nf", d); @@ -259,14 +259,14 @@ public class TestFieldsReader extends LuceneTestCase { } } else { // int/long - if (random.nextBoolean()) { - final int i = random.nextInt(); + if (getRandom().nextBoolean()) { + final int i = getRandom().nextInt(); answer = Integer.valueOf(i); nf = new IntField("nf", i); sf = new StoredField("nf", i); typeAnswer = NumericType.INT; } else { - final long l = random.nextLong(); + final long l = getRandom().nextLong(); answer = Long.valueOf(l); nf = new LongField("nf", l); sf = new StoredField("nf", l); @@ -302,7 +302,7 @@ public class TestFieldsReader extends LuceneTestCase { public void testIndexedBit() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir); Document doc = new Document(); FieldType onlyStored = new FieldType(); onlyStored.setStored(true); diff --git lucene/core/src/test/org/apache/lucene/index/TestFilterAtomicReader.java lucene/core/src/test/org/apache/lucene/index/TestFilterAtomicReader.java index f6caef6..247d975 100644 --- lucene/core/src/test/org/apache/lucene/index/TestFilterAtomicReader.java +++ lucene/core/src/test/org/apache/lucene/index/TestFilterAtomicReader.java @@ -131,7 +131,7 @@ public class TestFilterAtomicReader extends LuceneTestCase { public void testFilterIndexReader() throws Exception { Directory directory = newDirectory(); - IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); Document d1 = new Document(); d1.add(newField("default","one two", TextField.TYPE_STORED)); @@ -152,7 +152,7 @@ public class TestFilterAtomicReader extends LuceneTestCase { // We mess with the postings so this can fail: ((MockDirectoryWrapper) target).setCrossCheckTermVectorsOnClose(false); - writer = new IndexWriter(target, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = new IndexWriter(target, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); IndexReader reader = new TestReader(IndexReader.open(directory)); writer.addIndexes(reader); writer.close(); diff --git lucene/core/src/test/org/apache/lucene/index/TestFlex.java lucene/core/src/test/org/apache/lucene/index/TestFlex.java index e60d8f3..b556d7b 100644 --- lucene/core/src/test/org/apache/lucene/index/TestFlex.java +++ lucene/core/src/test/org/apache/lucene/index/TestFlex.java @@ -33,7 +33,7 @@ public class TestFlex extends LuceneTestCase { IndexWriter w = new IndexWriter( d, - new IndexWriterConfig(Version.LUCENE_31, new MockAnalyzer(random)). + new IndexWriterConfig(Version.LUCENE_31, new MockAnalyzer(getRandom())). setMaxBufferedDocs(7) ); @@ -65,7 +65,7 @@ public class TestFlex extends LuceneTestCase { public void testTermOrd() throws Exception { Directory d = newDirectory(); IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setCodec(_TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat()))); + new MockAnalyzer(getRandom())).setCodec(_TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat()))); Document doc = new Document(); doc.add(newField("f", "a b c", TextField.TYPE_UNSTORED)); w.addDocument(doc); diff --git lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java index a85589d..36decb6 100644 --- lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java +++ lucene/core/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java @@ -39,7 +39,7 @@ public class TestFlushByRamOrCountsPolicy extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { - lineDocFile = new LineFileDocs(random); + lineDocFile = new LineFileDocs(getStaticRandom()); } @AfterClass @@ -51,14 +51,14 @@ public class TestFlushByRamOrCountsPolicy extends LuceneTestCase { public void testFlushByRam() throws CorruptIndexException, LockObtainFailedException, IOException, InterruptedException { final double ramBuffer = (TEST_NIGHTLY ? 1 : 10) + atLeast(2) - + random.nextDouble(); - runFlushByRam(1 + random.nextInt(TEST_NIGHTLY ? 5 : 1), ramBuffer, false); + + getRandom().nextDouble(); + runFlushByRam(1 + getRandom().nextInt(TEST_NIGHTLY ? 5 : 1), ramBuffer, false); } public void testFlushByRamLargeBuffer() throws CorruptIndexException, LockObtainFailedException, IOException, InterruptedException { // with a 256 mb ram buffer we should never stall - runFlushByRam(1 + random.nextInt(TEST_NIGHTLY ? 5 : 1), 256.d, true); + runFlushByRam(1 + getRandom().nextInt(TEST_NIGHTLY ? 5 : 1), 256.d, true); } protected void runFlushByRam(int numThreads, double maxRamMB, @@ -69,7 +69,7 @@ public class TestFlushByRamOrCountsPolicy extends LuceneTestCase { Directory dir = newDirectory(); MockDefaultFlushPolicy flushPolicy = new MockDefaultFlushPolicy(); IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setFlushPolicy(flushPolicy); + new MockAnalyzer(getRandom())).setFlushPolicy(flushPolicy); final int numDWPT = 1 + atLeast(2); DocumentsWriterPerThreadPool threadPool = new ThreadAffinityDocumentsWriterThreadPool( numDWPT); @@ -125,7 +125,7 @@ public class TestFlushByRamOrCountsPolicy extends LuceneTestCase { Directory dir = newDirectory(); MockDefaultFlushPolicy flushPolicy = new MockDefaultFlushPolicy(); IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setFlushPolicy(flushPolicy); + new MockAnalyzer(getRandom())).setFlushPolicy(flushPolicy); final int numDWPT = 1 + atLeast(2); DocumentsWriterPerThreadPool threadPool = new ThreadAffinityDocumentsWriterThreadPool( @@ -168,16 +168,16 @@ public class TestFlushByRamOrCountsPolicy extends LuceneTestCase { } public void testRandom() throws IOException, InterruptedException { - final int numThreads = 1 + random.nextInt(8); + final int numThreads = 1 + getRandom().nextInt(8); final int numDocumentsToIndex = 50 + atLeast(70); AtomicInteger numDocs = new AtomicInteger(numDocumentsToIndex); Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)); + new MockAnalyzer(getRandom())); MockDefaultFlushPolicy flushPolicy = new MockDefaultFlushPolicy(); iwc.setFlushPolicy(flushPolicy); - final int numDWPT = 1 + random.nextInt(8); + final int numDWPT = 1 + getRandom().nextInt(8); DocumentsWriterPerThreadPool threadPool = new ThreadAffinityDocumentsWriterThreadPool( numDWPT); iwc.setIndexerThreadPool(threadPool); @@ -230,15 +230,15 @@ public class TestFlushByRamOrCountsPolicy extends LuceneTestCase { public void testStallControl() throws InterruptedException, CorruptIndexException, LockObtainFailedException, IOException { - int[] numThreads = new int[] { 4 + random.nextInt(8), 1 }; - final int numDocumentsToIndex = 50 + random.nextInt(50); + int[] numThreads = new int[] { 4 + getRandom().nextInt(8), 1 }; + final int numDocumentsToIndex = 50 + getRandom().nextInt(50); for (int i = 0; i < numThreads.length; i++) { AtomicInteger numDocs = new AtomicInteger(numDocumentsToIndex); MockDirectoryWrapper dir = newDirectory(); // mock a very slow harddisk sometimes here so that flushing is very slow dir.setThrottling(MockDirectoryWrapper.Throttling.SOMETIMES); IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)); + new MockAnalyzer(getRandom())); iwc.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH); iwc.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH); FlushPolicy flushPolicy = new FlushByRamOrCountsPolicy(); diff --git lucene/core/src/test/org/apache/lucene/index/TestForTooMuchCloning.java lucene/core/src/test/org/apache/lucene/index/TestForTooMuchCloning.java index 16f0907..dd06033 100644 --- lucene/core/src/test/org/apache/lucene/index/TestForTooMuchCloning.java +++ lucene/core/src/test/org/apache/lucene/index/TestForTooMuchCloning.java @@ -41,13 +41,13 @@ public class TestForTooMuchCloning extends LuceneTestCase { final MockDirectoryWrapper dir = newDirectory(); final TieredMergePolicy tmp = new TieredMergePolicy(); tmp.setMaxMergeAtOnce(2); - final RandomIndexWriter w = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2).setMergePolicy(tmp)); + final RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMaxBufferedDocs(2).setMergePolicy(tmp)); final int numDocs = 20; for(int docs=0;docs 0) { diff --git lucene/core/src/test/org/apache/lucene/index/TestIndexInput.java lucene/core/src/test/org/apache/lucene/index/TestIndexInput.java index 4c6b881..c7ec600 100644 --- lucene/core/src/test/org/apache/lucene/index/TestIndexInput.java +++ lucene/core/src/test/org/apache/lucene/index/TestIndexInput.java @@ -99,10 +99,10 @@ public class TestIndexInput extends LuceneTestCase { // this test checks the raw IndexInput methods as it uses RAMIndexInput which extends IndexInput directly public void testRawIndexInputRead() throws IOException { final RAMDirectory dir = new RAMDirectory(); - final IndexOutput os = dir.createOutput("foo", newIOContext(random)); + final IndexOutput os = dir.createOutput("foo", newIOContext(getRandom())); os.writeBytes(READ_TEST_BYTES, READ_TEST_BYTES.length); os.close(); - final IndexInput is = dir.openInput("foo", newIOContext(random)); + final IndexInput is = dir.openInput("foo", newIOContext(getRandom())); checkReads(is); is.close(); dir.close(); diff --git lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java index 45e7c99..fb54010 100644 --- lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java +++ lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java @@ -78,7 +78,7 @@ public class TestIndexWriter extends LuceneTestCase { try { IndexWriterConfig.setDefaultWriteLockTimeout(2000); assertEquals(2000, IndexWriterConfig.getDefaultWriteLockTimeout()); - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); } finally { IndexWriterConfig.setDefaultWriteLockTimeout(savedWriteLockTimeout); } @@ -91,7 +91,7 @@ public class TestIndexWriter extends LuceneTestCase { writer.close(); // delete 40 documents - writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES)); + writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES)); for (i = 0; i < 40; i++) { writer.deleteDocuments(new Term("id", ""+i)); } @@ -102,7 +102,7 @@ public class TestIndexWriter extends LuceneTestCase { reader.close(); // merge the index down and check that the new doc count is correct - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); assertEquals(60, writer.numDocs()); writer.forceMerge(1); assertEquals(60, writer.maxDoc()); @@ -117,7 +117,7 @@ public class TestIndexWriter extends LuceneTestCase { // make sure opening a new index for create over // this existing one works correctly: - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.CREATE)); assertEquals(0, writer.maxDoc()); assertEquals(0, writer.numDocs()); writer.close(); @@ -143,7 +143,7 @@ public class TestIndexWriter extends LuceneTestCase { public static void assertNoUnreferencedFiles(Directory dir, String message) throws IOException { String[] startFiles = dir.listAll(); - new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))).rollback(); + new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getStaticRandom()))).rollback(); String[] endFiles = dir.listAll(); Arrays.sort(startFiles); @@ -172,7 +172,7 @@ public class TestIndexWriter extends LuceneTestCase { Directory dir = newDirectory(); // add one document & close writer - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); addDoc(writer); writer.close(); @@ -181,7 +181,7 @@ public class TestIndexWriter extends LuceneTestCase { assertEquals("should be one document", reader.numDocs(), 1); // now open index for create: - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.CREATE)); assertEquals("should be zero documents", writer.maxDoc(), 0); addDoc(writer); writer.close(); @@ -200,7 +200,7 @@ public class TestIndexWriter extends LuceneTestCase { IndexWriter writer = null; - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); addDoc(writer); // close @@ -218,7 +218,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testIndexNoDocuments() throws IOException { MockDirectoryWrapper dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); writer.commit(); writer.close(); @@ -227,7 +227,7 @@ public class TestIndexWriter extends LuceneTestCase { assertEquals(0, reader.numDocs()); reader.close(); - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.APPEND)); writer.commit(); writer.close(); @@ -240,7 +240,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testManyFields() throws IOException { MockDirectoryWrapper dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10)); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMaxBufferedDocs(10)); for(int j=0;j<100;j++) { Document doc = new Document(); doc.add(newField("a"+j, "aaa" + j, storedTextType)); @@ -272,7 +272,7 @@ public class TestIndexWriter extends LuceneTestCase { MockDirectoryWrapper dir = newDirectory(); IndexWriter writer = new IndexWriter( dir, - newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setRAMBufferSizeMB(0.000001). setMergePolicy(newLogMergePolicy(10)) ); @@ -295,7 +295,7 @@ public class TestIndexWriter extends LuceneTestCase { // maxBufferedDocs in a write session public void testChangingRAMBuffer() throws IOException { Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); writer.getConfig().setMaxBufferedDocs(10); writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH); @@ -349,7 +349,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testChangingRAMBuffer2() throws IOException { Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); writer.getConfig().setMaxBufferedDocs(10); writer.getConfig().setMaxBufferedDeleteTerms(10); writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH); @@ -410,7 +410,7 @@ public class TestIndexWriter extends LuceneTestCase { public void testDiverseDocs() throws IOException { MockDirectoryWrapper dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setRAMBufferSizeMB(0.5)); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setRAMBufferSizeMB(0.5)); int n = atLeast(1); for(int i=0;i data = new HashMap(); @@ -637,7 +637,7 @@ public class TestIndexWriterCommit extends LuceneTestCase { assertEquals("test1", r.getIndexCommit().getUserData().get("label")); r.close(); - w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); w.forceMerge(1); w.close(); diff --git lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java index efcec8b..ea02551 100644 --- lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java +++ lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java @@ -51,7 +51,7 @@ public class TestIndexWriterConfig extends LuceneTestCase { @Test public void testDefaults() throws Exception { - IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())); assertEquals(MockAnalyzer.class, conf.getAnalyzer().getClass()); assertNull(conf.getIndexCommit()); assertEquals(KeepOnlyLastCommitDeletionPolicy.class, conf.getIndexDeletionPolicy().getClass()); @@ -138,7 +138,7 @@ public class TestIndexWriterConfig extends LuceneTestCase { @Test public void testToString() throws Exception { - String str = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).toString(); + String str = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).toString(); for (Field f : IndexWriterConfig.class.getDeclaredFields()) { int modifiers = f.getModifiers(); if (Modifier.isStatic(modifiers) && Modifier.isFinal(modifiers)) { @@ -155,7 +155,7 @@ public class TestIndexWriterConfig extends LuceneTestCase { @Test public void testClone() throws Exception { - IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())); IndexWriterConfig clone = (IndexWriterConfig) conf.clone(); // Clone is shallow since not all parameters are cloneable. @@ -167,7 +167,7 @@ public class TestIndexWriterConfig extends LuceneTestCase { @Test public void testInvalidValues() throws Exception { - IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())); // Test IndexDeletionPolicy assertEquals(KeepOnlyLastCommitDeletionPolicy.class, conf.getIndexDeletionPolicy().getClass()); diff --git lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java index 526094e..d62aef9 100644 --- lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java +++ lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java @@ -52,7 +52,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(1)); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(1)); FieldType custom1 = new FieldType(); custom1.setStored(true); @@ -91,7 +91,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) .setMaxBufferedDeleteTerms(2)); int id = 0; int value = 100; @@ -124,7 +124,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testMaxBufferedDeletes() throws IOException { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(1)); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(1)); writer.addDocument(new Document()); writer.deleteDocuments(new Term("foobar", "1")); @@ -143,7 +143,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { } Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(4) + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(4) .setMaxBufferedDeleteTerms(4)); int id = 0; int value = 100; @@ -181,7 +181,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testBothDeletes() throws IOException { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(100) + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(100) .setMaxBufferedDeleteTerms(100)); int id = 0; @@ -215,7 +215,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testBatchDeletes() throws IOException { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) .setMaxBufferedDeleteTerms(2)); int id = 0; @@ -258,7 +258,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testDeleteAll() throws IOException { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) .setMaxBufferedDeleteTerms(2)); int id = 0; @@ -304,7 +304,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testDeleteAllRollback() throws IOException { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) .setMaxBufferedDeleteTerms(2)); int id = 0; @@ -341,7 +341,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testDeleteAllNRT() throws IOException { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) .setMaxBufferedDeleteTerms(2)); int id = 0; @@ -429,7 +429,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { MockDirectoryWrapper startDir = newDirectory(); // TODO: find the resource leak that only occurs sometimes here. startDir.setNoDeleteOpenFile(false); - IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); for (int i = 0; i < 157; i++) { Document d = new Document(); d.add(newField("id", Integer.toString(i), StringField.TYPE_STORED)); @@ -450,11 +450,11 @@ public class TestIndexWriterDelete extends LuceneTestCase { if (VERBOSE) { System.out.println("TEST: cycle"); } - MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir, newIOContext(random))); + MockDirectoryWrapper dir = new MockDirectoryWrapper(getRandom(), new RAMDirectory(startDir, newIOContext(getRandom()))); dir.setPreventDoubleWrite(false); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)) + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)) .setMaxBufferedDocs(1000) .setMaxBufferedDeleteTerms(1000) .setMergeScheduler(new ConcurrentMergeScheduler())); @@ -691,7 +691,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { MockDirectoryWrapper dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(2).setReaderPooling(false).setMergePolicy(newLogMergePolicy())); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)).setMaxBufferedDeleteTerms(2).setReaderPooling(false).setMergePolicy(newLogMergePolicy())); LogMergePolicy lmp = (LogMergePolicy) modifier.getConfig().getMergePolicy(); lmp.setUseCompoundFile(true); @@ -815,7 +815,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { String[] text = { "Amsterdam", "Venice" }; MockDirectoryWrapper dir = newDirectory(); - IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); modifier.commit(); dir.failOn(failure.reset()); @@ -845,7 +845,7 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testDeleteNullQuery() throws IOException { Directory dir = newDirectory(); - IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); for (int i = 0; i < 5; i++) { addDoc(modifier, i, 2*i); @@ -860,23 +860,23 @@ public class TestIndexWriterDelete extends LuceneTestCase { public void testDeleteAllSlowly() throws Exception { final Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir); final int NUM_DOCS = atLeast(1000); final List ids = new ArrayList(NUM_DOCS); for(int id=0;id doFail = new ThreadLocal(); private class MockIndexWriter extends IndexWriter { - Random r = new Random(random.nextLong()); + Random r = new Random(getRandom().nextLong()); public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException { super(dir, conf); @@ -223,7 +223,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { } MockDirectoryWrapper dir = newDirectory(); - MockAnalyzer analyzer = new MockAnalyzer(random); + MockAnalyzer analyzer = new MockAnalyzer(getRandom()); analyzer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases. MockIndexWriter writer = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer) .setRAMBufferSizeMB(0.1).setMergeScheduler(new ConcurrentMergeScheduler())); @@ -266,7 +266,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { public void testRandomExceptionsThreads() throws Throwable { MockDirectoryWrapper dir = newDirectory(); - MockAnalyzer analyzer = new MockAnalyzer(random); + MockAnalyzer analyzer = new MockAnalyzer(getRandom()); analyzer.setEnableChecks(false); // disable workflow checking as we forcefully close() in exceptional cases. MockIndexWriter writer = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer) .setRAMBufferSizeMB(0.2).setMergeScheduler(new ConcurrentMergeScheduler())); @@ -354,7 +354,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { public void testExceptionDocumentsWriterInit() throws IOException { Directory dir = newDirectory(); - MockIndexWriter2 w = new MockIndexWriter2(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + MockIndexWriter2 w = new MockIndexWriter2(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); Document doc = new Document(); doc.add(newField("field", "a field", TextField.TYPE_STORED)); w.addDocument(doc); @@ -372,7 +372,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { // LUCENE-1208 public void testExceptionJustBeforeFlush() throws IOException { Directory dir = newDirectory(); - MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); + MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMaxBufferedDocs(2)); Document doc = new Document(); doc.add(newField("field", "a field", TextField.TYPE_STORED)); w.addDocument(doc); @@ -422,7 +422,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { // LUCENE-1210 public void testExceptionOnMergeInit() throws IOException { Directory dir = newDirectory(); - IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)) + IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()).setMergePolicy(newLogMergePolicy()); ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2); MockIndexWriter3 w = new MockIndexWriter3(dir, conf); @@ -501,7 +501,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { // Make sure the doc that hit the exception was marked // as deleted: - DocsEnum tdocs = _TestUtil.docs(random, reader, + DocsEnum tdocs = _TestUtil.docs(getRandom(), reader, t.field(), new BytesRef(t.text()), MultiFields.getLiveDocs(reader), @@ -561,7 +561,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { failure.setDoFail(); dir.failOn(failure); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMaxBufferedDocs(2)); Document doc = new Document(); String contents = "aa bb cc dd ee ff gg hh ii jj kk"; doc.add(newField("content", contents, TextField.TYPE_UNSTORED)); @@ -698,7 +698,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { final IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(-1) .setMergePolicy( - random.nextBoolean() ? NoMergePolicy.COMPOUND_FILES + getRandom().nextBoolean() ? NoMergePolicy.COMPOUND_FILES : NoMergePolicy.NO_COMPOUND_FILES)); // don't use a merge policy here they depend on the DWPThreadPool and its max thread states etc. final int finalI = i; @@ -824,7 +824,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { IndexWriter writer = new IndexWriter( dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setMaxBufferedDocs(2). setMergeScheduler(new ConcurrentMergeScheduler()). setMergePolicy(newLogMergePolicy(5)) @@ -907,7 +907,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { MockDirectoryWrapper dir = newDirectory(); dir.setFailOnCreateOutput(false); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); Document doc = new Document(); doc.add(newField("field", "a field", TextField.TYPE_STORED)); w.addDocument(doc); @@ -929,7 +929,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { public void testForceMergeExceptions() throws IOException { Directory startDir = newDirectory(); - IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy()); + IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy()); ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(100); IndexWriter w = new IndexWriter(startDir, conf); for(int i=0;i<27;i++) @@ -941,8 +941,8 @@ public class TestIndexWriterExceptions extends LuceneTestCase { if (VERBOSE) { System.out.println("TEST: iter " + i); } - MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir, newIOContext(random))); - conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergeScheduler(new ConcurrentMergeScheduler()); + MockDirectoryWrapper dir = new MockDirectoryWrapper(getRandom(), new RAMDirectory(startDir, newIOContext(getRandom()))); + conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergeScheduler(new ConcurrentMergeScheduler()); ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions(); w = new IndexWriter(dir, conf); dir.setRandomIOExceptionRate(0.5); @@ -965,7 +965,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { final AtomicBoolean thrown = new AtomicBoolean(false); final Directory dir = newDirectory(); final IndexWriter writer = new IndexWriter(dir, - newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setInfoStream(new InfoStream() { + newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setInfoStream(new InfoStream() { @Override public void message(String component, final String message) { if (message.startsWith("now flush at close") && thrown.compareAndSet(false, true)) { @@ -1013,7 +1013,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { // LUCENE-1347 public void testRollbackExceptionHang() throws Throwable { Directory dir = newDirectory(); - MockIndexWriter4 w = new MockIndexWriter4(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + MockIndexWriter4 w = new MockIndexWriter4(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); addDoc(w); w.doFail = true; @@ -1035,7 +1035,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { IndexWriter writer = null; - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); // add 100 documents for (int i = 0; i < 100; i++) { @@ -1049,8 +1049,8 @@ public class TestIndexWriterExceptions extends LuceneTestCase { assertTrue("segment generation should be > 0 but got " + gen, gen > 0); final String segmentsFileName = SegmentInfos.getLastCommitSegmentsFileName(dir); - IndexInput in = dir.openInput(segmentsFileName, newIOContext(random)); - IndexOutput out = dir.createOutput(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen), newIOContext(random)); + IndexInput in = dir.openInput(segmentsFileName, newIOContext(getRandom())); + IndexOutput out = dir.createOutput(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen), newIOContext(getRandom())); out.copyBytes(in, in.length()-1); byte b = in.readByte(); out.writeByte((byte) (1+b)); @@ -1077,7 +1077,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase { IndexWriter writer = null; - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); // add 100 documents for (int i = 0; i < 100; i++) { @@ -1094,8 +1094,8 @@ public class TestIndexWriterExceptions extends LuceneTestCase { String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen); - IndexInput in = dir.openInput(fileNameIn, newIOContext(random)); - IndexOutput out = dir.createOutput(fileNameOut, newIOContext(random)); + IndexInput in = dir.openInput(fileNameIn, newIOContext(getRandom())); + IndexOutput out = dir.createOutput(fileNameOut, newIOContext(getRandom())); long length = in.length(); for(int i=0;i docs = new ArrayList(); - final int numDocs2 = random.nextInt(25); + final int numDocs2 = getRandom().nextInt(25); for(int docCount=0;docCount=1 && freq <= 4); for(int pos=0;pos=1 && freq <= 4); for(int pos=0;pos=1 && freq <= 4); } @@ -415,9 +415,9 @@ public class TestLongPostings extends LuceneTestCase { // advance final int targetDocID; if (docID == -1) { - targetDocID = random.nextInt(NUM_DOCS+1); + targetDocID = getRandom().nextInt(NUM_DOCS+1); } else { - targetDocID = docID + _TestUtil.nextInt(random, 1, NUM_DOCS - docID); + targetDocID = docID + _TestUtil.nextInt(getRandom(), 1, NUM_DOCS - docID); } if (VERBOSE) { System.out.println("TEST: docID=" + docID + "; do advance(" + targetDocID + ")"); @@ -443,7 +443,7 @@ public class TestLongPostings extends LuceneTestCase { break; } - if (random.nextInt(6) == 3 && postings != null) { + if (getRandom().nextInt(6) == 3 && postings != null) { final int freq = postings.freq(); assertTrue("got invalid freq=" + freq, freq >=1 && freq <= 4); } diff --git lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java index 66637c5..a603ade 100644 --- lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java +++ lucene/core/src/test/org/apache/lucene/index/TestMaxTermFrequency.java @@ -46,9 +46,9 @@ public class TestMaxTermFrequency extends LuceneTestCase { super.setUp(); dir = newDirectory(); IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy()); + new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy()); config.setSimilarity(new TestSimilarity()); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, config); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, config); Document doc = new Document(); Field foo = newField("foo", "", TextField.TYPE_UNSTORED); doc.add(foo); @@ -81,16 +81,16 @@ public class TestMaxTermFrequency extends LuceneTestCase { */ private String addValue() { List terms = new ArrayList(); - int maxCeiling = _TestUtil.nextInt(random, 0, 255); + int maxCeiling = _TestUtil.nextInt(getRandom(), 0, 255); int max = 0; for (char ch = 'a'; ch <= 'z'; ch++) { - int num = _TestUtil.nextInt(random, 0, maxCeiling); + int num = _TestUtil.nextInt(getRandom(), 0, maxCeiling); for (int i = 0; i < num; i++) terms.add(Character.toString(ch)); max = Math.max(max, num); } expected.add(max); - Collections.shuffle(terms, random); + Collections.shuffle(terms, getRandom()); return Arrays.toString(terms.toArray(new String[terms.size()])); } diff --git lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java index 7d6cbf9..f7be455 100644 --- lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java +++ lucene/core/src/test/org/apache/lucene/index/TestMixedCodecs.java @@ -46,8 +46,8 @@ public class TestMixedCodecs extends LuceneTestCase { int docUpto = 0; while (docUpto < NUM_DOCS) { if (docsLeftInThisSegment == 0) { - final IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); - if (random.nextBoolean()) { + final IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())); + if (getRandom().nextBoolean()) { // Make sure we aggressively mix in SimpleText // since it has different impls for all codec // formats... @@ -56,8 +56,8 @@ public class TestMixedCodecs extends LuceneTestCase { if (w != null) { w.close(); } - w = new RandomIndexWriter(random, dir, iwc); - docsLeftInThisSegment = _TestUtil.nextInt(random, 10, 100); + w = new RandomIndexWriter(getRandom(), dir, iwc); + docsLeftInThisSegment = _TestUtil.nextInt(getRandom(), 10, 100); } final Document doc = new Document(); doc.add(newField("id", String.valueOf(docUpto), StringField.TYPE_STORED)); @@ -69,11 +69,11 @@ public class TestMixedCodecs extends LuceneTestCase { // Random delete half the docs: final Set deleted = new HashSet(); while(deleted.size() < NUM_DOCS/2) { - final Integer toDelete = random.nextInt(NUM_DOCS); + final Integer toDelete = getRandom().nextInt(NUM_DOCS); if (!deleted.contains(toDelete)) { deleted.add(toDelete); w.deleteDocuments(new Term("id", String.valueOf(toDelete))); - if (random.nextInt(17) == 6) { + if (getRandom().nextInt(17) == 6) { final IndexReader r = w.getReader(); assertEquals(NUM_DOCS - deleted.size(), r.numDocs()); r.close(); diff --git lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java index 645b172..8a266e3 100644 --- lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java +++ lucene/core/src/test/org/apache/lucene/index/TestMultiFields.java @@ -35,34 +35,34 @@ public class TestMultiFields extends LuceneTestCase { Directory dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); _TestUtil.keepFullyDeletedSegments(w); Map> docs = new HashMap>(); Set deleted = new HashSet(); List terms = new ArrayList(); - int numDocs = _TestUtil.nextInt(random, 1, 100 * RANDOM_MULTIPLIER); + int numDocs = _TestUtil.nextInt(getRandom(), 1, 100 * RANDOM_MULTIPLIER); Document doc = new Document(); Field f = newField("field", "", StringField.TYPE_UNSTORED); doc.add(f); Field id = newField("id", "", StringField.TYPE_UNSTORED); doc.add(id); - boolean onlyUniqueTerms = random.nextBoolean(); + boolean onlyUniqueTerms = getRandom().nextBoolean(); if (VERBOSE) { System.out.println("TEST: onlyUniqueTerms=" + onlyUniqueTerms + " numDocs=" + numDocs); } Set uniqueTerms = new HashSet(); for(int i=0;i 0) { + if (!onlyUniqueTerms && getRandom().nextBoolean() && terms.size() > 0) { // re-use existing term - BytesRef term = terms.get(random.nextInt(terms.size())); + BytesRef term = terms.get(getRandom().nextInt(terms.size())); docs.get(term).add(i); f.setStringValue(term.utf8ToString()); } else { - String s = _TestUtil.randomUnicodeString(random, 10); + String s = _TestUtil.randomUnicodeString(getRandom(), 10); BytesRef term = new BytesRef(s); if (!docs.containsKey(term)) { docs.put(term, new ArrayList()); @@ -74,11 +74,11 @@ public class TestMultiFields extends LuceneTestCase { } id.setStringValue(""+i); w.addDocument(doc); - if (random.nextInt(4) == 1) { + if (getRandom().nextInt(4) == 1) { w.commit(); } - if (i > 0 && random.nextInt(20) == 1) { - int delID = random.nextInt(i); + if (i > 0 && getRandom().nextInt(20) == 1) { + int delID = getRandom().nextInt(i); deleted.add(delID); w.deleteDocuments(new Term("id", ""+delID)); if (VERBOSE) { @@ -115,12 +115,12 @@ public class TestMultiFields extends LuceneTestCase { } for(int i=0;i<100;i++) { - BytesRef term = terms.get(random.nextInt(terms.size())); + BytesRef term = terms.get(getRandom().nextInt(terms.size())); if (VERBOSE) { System.out.println("TEST: seek term="+ UnicodeUtil.toHexString(term.utf8ToString()) + " " + term); } - DocsEnum docsEnum = _TestUtil.docs(random, reader, "field", term, liveDocs, null, false); + DocsEnum docsEnum = _TestUtil.docs(getRandom(), reader, "field", term, liveDocs, null, false); assertNotNull(docsEnum); for(int docID : docs.get(term)) { @@ -153,7 +153,7 @@ public class TestMultiFields extends LuceneTestCase { public void testSeparateEnums() throws Exception { Directory dir = newDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); Document d = new Document(); d.add(newField("f", "j", StringField.TYPE_UNSTORED)); w.addDocument(d); @@ -161,8 +161,8 @@ public class TestMultiFields extends LuceneTestCase { w.addDocument(d); IndexReader r = w.getReader(); w.close(); - DocsEnum d1 = _TestUtil.docs(random, r, "f", new BytesRef("j"), null, null, false); - DocsEnum d2 = _TestUtil.docs(random, r, "f", new BytesRef("j"), null, null, false); + DocsEnum d1 = _TestUtil.docs(getRandom(), r, "f", new BytesRef("j"), null, null, false); + DocsEnum d2 = _TestUtil.docs(getRandom(), r, "f", new BytesRef("j"), null, null, false); assertEquals(0, d1.nextDoc()); assertEquals(0, d2.nextDoc()); r.close(); diff --git lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java index 82e19f3..f40432d 100644 --- lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java +++ lucene/core/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java @@ -48,7 +48,7 @@ public class TestMultiLevelSkipList extends LuceneTestCase { class CountingRAMDirectory extends MockDirectoryWrapper { public CountingRAMDirectory(Directory delegate) { - super(random, delegate); + super(getRandom(), delegate); } @Override diff --git lucene/core/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java lucene/core/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java index 9a991ad..462f513 100644 --- lucene/core/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java +++ lucene/core/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java @@ -32,7 +32,7 @@ public class TestNRTReaderWithThreads extends LuceneTestCase { Directory mainDir = newDirectory(); IndexWriter writer = new IndexWriter( mainDir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setMaxBufferedDocs(10). setMergePolicy(newLogMergePolicy(false,2)) ); @@ -76,7 +76,7 @@ public class TestNRTReaderWithThreads extends LuceneTestCase { int delCount = 0; int addCount = 0; int type; - final Random r = new Random(random.nextLong()); + final Random r = new Random(getRandom().nextLong()); public RunThread(int type, IndexWriter writer) { this.type = type; diff --git lucene/core/src/test/org/apache/lucene/index/TestNRTThreads.java lucene/core/src/test/org/apache/lucene/index/TestNRTThreads.java index 7d7e2fc..d55e971 100644 --- lucene/core/src/test/org/apache/lucene/index/TestNRTThreads.java +++ lucene/core/src/test/org/apache/lucene/index/TestNRTThreads.java @@ -39,7 +39,7 @@ public class TestNRTThreads extends ThreadedIndexingAndSearchingTestCase { DirectoryReader r = IndexReader.open(writer, true); while (System.currentTimeMillis() < stopTime && !failed.get()) { - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { if (VERBOSE) { System.out.println("TEST: now reopen r=" + r); } @@ -106,7 +106,7 @@ public class TestNRTThreads extends ThreadedIndexingAndSearchingTestCase { @Override protected IndexSearcher getFinalSearcher() throws Exception { final IndexReader r2; - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { r2 = writer.getReader(); } else { writer.commit(); diff --git lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java index d76831a..3bd60ed 100644 --- lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java +++ lucene/core/src/test/org/apache/lucene/index/TestNeverDelete.java @@ -42,15 +42,15 @@ public class TestNeverDelete extends LuceneTestCase { // them. This is still worth running on Windows since // some files the IR opens and closes. d.setNoDeleteOpenFile(false); - final RandomIndexWriter w = new RandomIndexWriter(random, + final RandomIndexWriter w = new RandomIndexWriter(getRandom(), d, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)) + new MockAnalyzer(getRandom())) .setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE)); - w.w.getConfig().setMaxBufferedDocs(_TestUtil.nextInt(random, 5, 30)); + w.w.getConfig().setMaxBufferedDocs(_TestUtil.nextInt(getRandom(), 5, 30)); w.commit(); - Thread[] indexThreads = new Thread[random.nextInt(4)]; + Thread[] indexThreads = new Thread[getRandom().nextInt(4)]; final long stopTime = System.currentTimeMillis() + atLeast(1000); for (int x=0; x < indexThreads.length; x++) { indexThreads[x] = new Thread() { diff --git lucene/core/src/test/org/apache/lucene/index/TestNewestSegment.java lucene/core/src/test/org/apache/lucene/index/TestNewestSegment.java index 56ec3f3..eb792b6 100644 --- lucene/core/src/test/org/apache/lucene/index/TestNewestSegment.java +++ lucene/core/src/test/org/apache/lucene/index/TestNewestSegment.java @@ -24,7 +24,7 @@ import org.apache.lucene.util.LuceneTestCase; public class TestNewestSegment extends LuceneTestCase { public void testNewestSegment() throws Exception { Directory directory = newDirectory(); - IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); assertNull(writer.newestSegment()); writer.close(); directory.close(); diff --git lucene/core/src/test/org/apache/lucene/index/TestNoDeletionPolicy.java lucene/core/src/test/org/apache/lucene/index/TestNoDeletionPolicy.java index 2517eac..31063db 100644 --- lucene/core/src/test/org/apache/lucene/index/TestNoDeletionPolicy.java +++ lucene/core/src/test/org/apache/lucene/index/TestNoDeletionPolicy.java @@ -69,7 +69,7 @@ public class TestNoDeletionPolicy extends LuceneTestCase { public void testAllCommitsRemain() throws Exception { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)) + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE)); for (int i = 0; i < 10; i++) { Document doc = new Document(); diff --git lucene/core/src/test/org/apache/lucene/index/TestNorms.java lucene/core/src/test/org/apache/lucene/index/TestNorms.java index 4b04285..0bd2eb9 100755 --- lucene/core/src/test/org/apache/lucene/index/TestNorms.java +++ lucene/core/src/test/org/apache/lucene/index/TestNorms.java @@ -60,9 +60,9 @@ public class TestNorms extends LuceneTestCase { // LUCENE-1260 public void testCustomEncoder() throws Exception { Directory dir = newDirectory(); - IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())); config.setSimilarity(new CustomNormEncodingSimilarity()); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, config); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, config); Document doc = new Document(); Field foo = newField("foo", "", TextField.TYPE_UNSTORED); Field bar = newField("bar", "", TextField.TYPE_UNSTORED); @@ -116,11 +116,11 @@ public class TestNorms extends LuceneTestCase { */ public void testNormsNotPresent() throws IOException { Directory dir = newDirectory(); - boolean firstWriteNorm = random.nextBoolean(); + boolean firstWriteNorm = getRandom().nextBoolean(); buildIndex(dir, firstWriteNorm); Directory otherDir = newDirectory(); - boolean secondWriteNorm = random.nextBoolean(); + boolean secondWriteNorm = getRandom().nextBoolean(); buildIndex(otherDir, secondWriteNorm); AtomicReader reader = SlowCompositeReaderWrapper.wrap(IndexReader.open(otherDir)); @@ -135,8 +135,8 @@ public class TestNorms extends LuceneTestCase { } IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, config); + new MockAnalyzer(getRandom())); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, config); writer.addIndexes(reader); AtomicReader mergedReader = SlowCompositeReaderWrapper.wrap(writer.getReader()); if (!firstWriteNorm && !secondWriteNorm) { @@ -175,15 +175,15 @@ public class TestNorms extends LuceneTestCase { public void buildIndex(Directory dir, boolean writeNorms) throws IOException, CorruptIndexException { IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)); + new MockAnalyzer(getRandom())); Similarity provider = new MySimProvider(writeNorms); config.setSimilarity(provider); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, config); - final LineFileDocs docs = new LineFileDocs(random); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, config); + final LineFileDocs docs = new LineFileDocs(getRandom()); int num = atLeast(100); for (int i = 0; i < num; i++) { Document doc = docs.nextDoc(); - int boost = writeNorms ? 1 + random.nextInt(255) : 0; + int boost = writeNorms ? 1 + getRandom().nextInt(255) : 0; Field f = new Field(byteTestField, "" + boost, TextField.TYPE_STORED); f.setBoost(boost); diff --git lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java index cedae87..910565c 100644 --- lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java +++ lucene/core/src/test/org/apache/lucene/index/TestOmitNorms.java @@ -34,7 +34,7 @@ public class TestOmitNorms extends LuceneTestCase { // omitNorms bit in the FieldInfo public void testOmitNorms() throws Exception { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); Document d = new Document(); @@ -79,7 +79,7 @@ public class TestOmitNorms extends LuceneTestCase { // omitNorms for the same field works public void testMixedMerge() throws Exception { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); IndexWriter writer = new IndexWriter( ram, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer). @@ -134,7 +134,7 @@ public class TestOmitNorms extends LuceneTestCase { // field, public void testMixedRAM() throws Exception { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); IndexWriter writer = new IndexWriter( ram, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer). @@ -188,7 +188,7 @@ public class TestOmitNorms extends LuceneTestCase { // Verifies no *.nrm exists when all fields omit norms: public void testNoNrmFile() throws Throwable { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(3).setMergePolicy(newLogMergePolicy())); LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy(); @@ -265,10 +265,10 @@ public class TestOmitNorms extends LuceneTestCase { * Indexes at least 1 document with f1, and at least 1 document with f2. * returns the norms for "field". */ - static byte[] getNorms(String field, Field f1, Field f2) throws IOException { + byte[] getNorms(String field, Field f1, Field f2) throws IOException { Directory dir = newDirectory(); - IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()); - RandomIndexWriter riw = new RandomIndexWriter(random, dir, iwc); + IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergePolicy(newLogMergePolicy()); + RandomIndexWriter riw = new RandomIndexWriter(getRandom(), dir, iwc); // add f1 Document d = new Document(); @@ -281,10 +281,10 @@ public class TestOmitNorms extends LuceneTestCase { riw.addDocument(d); // add a mix of f1's and f2's - int numExtraDocs = _TestUtil.nextInt(random, 1, 1000); + int numExtraDocs = _TestUtil.nextInt(getRandom(), 1, 1000); for (int i = 0; i < numExtraDocs; i++) { d = new Document(); - d.add(random.nextBoolean() ? f1 : f2); + d.add(getRandom().nextBoolean() ? f1 : f2); riw.addDocument(d); } diff --git lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java index 9f2a45c..92da255 100644 --- lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java +++ lucene/core/src/test/org/apache/lucene/index/TestOmitPositions.java @@ -38,7 +38,7 @@ public class TestOmitPositions extends LuceneTestCase { public void testBasic() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir); Document doc = new Document(); FieldType ft = new FieldType(TextField.TYPE_UNSTORED); ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); @@ -53,7 +53,7 @@ public class TestOmitPositions extends LuceneTestCase { assertNull(MultiFields.getTermPositionsEnum(reader, null, "foo", new BytesRef("test"), false)); - DocsEnum de = _TestUtil.docs(random, reader, "foo", new BytesRef("test"), null, null, true); + DocsEnum de = _TestUtil.docs(getRandom(), reader, "foo", new BytesRef("test"), null, null, true); while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { assertEquals(2, de.freq()); } @@ -66,7 +66,7 @@ public class TestOmitPositions extends LuceneTestCase { // omitTermFreqAndPositions bit in the FieldInfo public void testPositions() throws Exception { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)); Document d = new Document(); @@ -189,7 +189,7 @@ public class TestOmitPositions extends LuceneTestCase { // Verifies no *.prx exists when all fields omit term positions: public void testNoPrxFile() throws Throwable { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(3).setMergePolicy(newLogMergePolicy())); LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy(); diff --git lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java index 121ab32..379516d 100644 --- lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java +++ lucene/core/src/test/org/apache/lucene/index/TestOmitTf.java @@ -61,7 +61,7 @@ public class TestOmitTf extends LuceneTestCase { // omitTermFreqAndPositions bit in the FieldInfo public void testOmitTermFreqAndPositions() throws Exception { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)); Document d = new Document(); @@ -106,7 +106,7 @@ public class TestOmitTf extends LuceneTestCase { // omitTermFreqAndPositions for the same field works public void testMixedMerge() throws Exception { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); IndexWriter writer = new IndexWriter( ram, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer). @@ -159,7 +159,7 @@ public class TestOmitTf extends LuceneTestCase { // field, public void testMixedRAM() throws Exception { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); IndexWriter writer = new IndexWriter( ram, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer). @@ -208,7 +208,7 @@ public class TestOmitTf extends LuceneTestCase { // Verifies no *.prx exists when all fields omit term freq: public void testNoPrxFile() throws Throwable { Directory ram = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); IndexWriter writer = new IndexWriter(ram, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(3).setMergePolicy(newLogMergePolicy())); LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy(); @@ -247,7 +247,7 @@ public class TestOmitTf extends LuceneTestCase { // Test scores with one field with Term Freqs and one without, otherwise with equal content public void testBasic() throws Exception { Directory dir = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer). @@ -427,8 +427,8 @@ public class TestOmitTf extends LuceneTestCase { /** test that when freqs are omitted, that totalTermFreq and sumTotalTermFreq are -1 */ public void testStats() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); Document doc = new Document(); FieldType ft = new FieldType(TextField.TYPE_UNSTORED); ft.setIndexOptions(IndexOptions.DOCS_ONLY); diff --git lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java index 885a199..c3719e8 100644 --- lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java +++ lucene/core/src/test/org/apache/lucene/index/TestParallelAtomicReader.java @@ -34,8 +34,8 @@ public class TestParallelAtomicReader extends LuceneTestCase { private Directory dir, dir1, dir2; public void testQueries() throws Exception { - single = single(random); - parallel = parallel(random); + single = single(getRandom()); + parallel = parallel(getRandom()); queryTest(new TermQuery(new Term("f1", "v1"))); queryTest(new TermQuery(new Term("f1", "v2"))); @@ -59,8 +59,8 @@ public class TestParallelAtomicReader extends LuceneTestCase { } public void testFieldNames() throws Exception { - Directory dir1 = getDir1(random); - Directory dir2 = getDir2(random); + Directory dir1 = getDir1(getRandom()); + Directory dir2 = getDir2(getRandom()); ParallelAtomicReader pr = new ParallelAtomicReader(SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1)), SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir2))); FieldInfos fieldInfos = pr.getFieldInfos(); @@ -75,8 +75,8 @@ public class TestParallelAtomicReader extends LuceneTestCase { } public void testRefCounts1() throws IOException { - Directory dir1 = getDir1(random); - Directory dir2 = getDir2(random); + Directory dir1 = getDir1(getRandom()); + Directory dir2 = getDir2(getRandom()); AtomicReader ir1, ir2; // close subreaders, ParallelReader will not change refCounts, but close on its own close ParallelAtomicReader pr = new ParallelAtomicReader(ir1 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1)), @@ -93,8 +93,8 @@ public class TestParallelAtomicReader extends LuceneTestCase { } public void testRefCounts2() throws IOException { - Directory dir1 = getDir1(random); - Directory dir2 = getDir2(random); + Directory dir1 = getDir1(getRandom()); + Directory dir2 = getDir2(getRandom()); AtomicReader ir1 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1)); AtomicReader ir2 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir2)); // don't close subreaders, so ParallelReader will increment refcounts @@ -115,11 +115,11 @@ public class TestParallelAtomicReader extends LuceneTestCase { public void testIncompatibleIndexes() throws IOException { // two documents: - Directory dir1 = getDir1(random); + Directory dir1 = getDir1(getRandom()); // one document only: Directory dir2 = newDirectory(); - IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); Document d3 = new Document(); d3.add(newField("f3", "v1", TextField.TYPE_STORED)); @@ -137,7 +137,7 @@ public class TestParallelAtomicReader extends LuceneTestCase { } try { - new ParallelAtomicReader(random.nextBoolean(), + new ParallelAtomicReader(getRandom().nextBoolean(), new AtomicReader[] {ir1, ir2}, new AtomicReader[] {ir1, ir2}); fail("didn't get expected exception: indexes don't have same number of documents"); @@ -154,8 +154,8 @@ public class TestParallelAtomicReader extends LuceneTestCase { } public void testIgnoreStoredFields() throws IOException { - Directory dir1 = getDir1(random); - Directory dir2 = getDir2(random); + Directory dir1 = getDir1(getRandom()); + Directory dir2 = getDir2(getRandom()); AtomicReader ir1 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1)); AtomicReader ir2 = SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir2)); diff --git lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java index 3594742..0db7231 100644 --- lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java +++ lucene/core/src/test/org/apache/lucene/index/TestParallelCompositeReader.java @@ -34,8 +34,8 @@ public class TestParallelCompositeReader extends LuceneTestCase { private Directory dir, dir1, dir2; public void testQueries() throws Exception { - single = single(random, false); - parallel = parallel(random, false); + single = single(getRandom(), false); + parallel = parallel(getRandom(), false); queries(); @@ -47,8 +47,8 @@ public class TestParallelCompositeReader extends LuceneTestCase { } public void testQueriesCompositeComposite() throws Exception { - single = single(random, true); - parallel = parallel(random, true); + single = single(getRandom(), true); + parallel = parallel(getRandom(), true); queries(); @@ -76,8 +76,8 @@ public class TestParallelCompositeReader extends LuceneTestCase { } public void testRefCounts1() throws IOException { - Directory dir1 = getDir1(random); - Directory dir2 = getDir2(random); + Directory dir1 = getDir1(getRandom()); + Directory dir2 = getDir2(getRandom()); DirectoryReader ir1, ir2; // close subreaders, ParallelReader will not change refCounts, but close on its own close ParallelCompositeReader pr = new ParallelCompositeReader(ir1 = DirectoryReader.open(dir1), @@ -93,8 +93,8 @@ public class TestParallelCompositeReader extends LuceneTestCase { } public void testRefCounts2() throws IOException { - Directory dir1 = getDir1(random); - Directory dir2 = getDir2(random); + Directory dir1 = getDir1(getRandom()); + Directory dir2 = getDir2(getRandom()); DirectoryReader ir1 = DirectoryReader.open(dir1); DirectoryReader ir2 = DirectoryReader.open(dir2); @@ -116,11 +116,11 @@ public class TestParallelCompositeReader extends LuceneTestCase { public void testIncompatibleIndexes1() throws IOException { // two documents: - Directory dir1 = getDir1(random); + Directory dir1 = getDir1(getRandom()); // one document only: Directory dir2 = newDirectory(); - IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter w2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); Document d3 = new Document(); d3.add(newField("f3", "v1", TextField.TYPE_STORED)); @@ -136,7 +136,7 @@ public class TestParallelCompositeReader extends LuceneTestCase { // expected exception } try { - new ParallelCompositeReader(random.nextBoolean(), ir1, ir2); + new ParallelCompositeReader(getRandom().nextBoolean(), ir1, ir2); fail("didn't get expected exception: indexes don't have same number of documents"); } catch (IllegalArgumentException e) { // expected exception @@ -152,8 +152,8 @@ public class TestParallelCompositeReader extends LuceneTestCase { } public void testIncompatibleIndexes2() throws IOException { - Directory dir1 = getDir1(random); - Directory dir2 = getInvalidStructuredDir2(random); + Directory dir1 = getDir1(getRandom()); + Directory dir2 = getInvalidStructuredDir2(getRandom()); DirectoryReader ir1 = DirectoryReader.open(dir1), ir2 = DirectoryReader.open(dir2); @@ -165,7 +165,7 @@ public class TestParallelCompositeReader extends LuceneTestCase { // expected exception } try { - new ParallelCompositeReader(random.nextBoolean(), readers, readers); + new ParallelCompositeReader(getRandom().nextBoolean(), readers, readers); fail("didn't get expected exception: indexes don't have same subreader structure"); } catch (IllegalArgumentException e) { // expected exception @@ -181,8 +181,8 @@ public class TestParallelCompositeReader extends LuceneTestCase { } public void testIncompatibleIndexes3() throws IOException { - Directory dir1 = getDir1(random); - Directory dir2 = getDir2(random); + Directory dir1 = getDir1(getRandom()); + Directory dir2 = getDir2(getRandom()); CompositeReader ir1 = new MultiReader(DirectoryReader.open(dir1), SlowCompositeReaderWrapper.wrap(DirectoryReader.open(dir1))), ir2 = new MultiReader(DirectoryReader.open(dir2), DirectoryReader.open(dir2)); @@ -194,7 +194,7 @@ public class TestParallelCompositeReader extends LuceneTestCase { // expected exception } try { - new ParallelCompositeReader(random.nextBoolean(), readers, readers); + new ParallelCompositeReader(getRandom().nextBoolean(), readers, readers); fail("didn't get expected exception: indexes don't have same subreader structure"); } catch (IllegalArgumentException e) { // expected exception @@ -210,8 +210,8 @@ public class TestParallelCompositeReader extends LuceneTestCase { } public void testIgnoreStoredFields() throws IOException { - Directory dir1 = getDir1(random); - Directory dir2 = getDir2(random); + Directory dir1 = getDir1(getRandom()); + Directory dir2 = getDir2(getRandom()); CompositeReader ir1 = DirectoryReader.open(dir1); CompositeReader ir2 = DirectoryReader.open(dir2); diff --git lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java index 958ff97..481d2d4 100644 --- lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java +++ lucene/core/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java @@ -44,14 +44,14 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { */ public void testEmptyIndex() throws IOException { Directory rd1 = newDirectory(); - IndexWriter iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); iw.close(); // create a copy: Directory rd2 = newDirectory(rd1); Directory rdOut = newDirectory(); - IndexWriter iwOut = new IndexWriter(rdOut, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter iwOut = new IndexWriter(rdOut, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); ParallelAtomicReader apr = new ParallelAtomicReader( SlowCompositeReaderWrapper.wrap(DirectoryReader.open(rd1)), @@ -91,7 +91,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { public void testEmptyIndexWithVectors() throws IOException { Directory rd1 = newDirectory(); { - IndexWriter iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); Document doc = new Document(); Field idField = newField("id", "", TextField.TYPE_UNSTORED); doc.add(idField); @@ -105,7 +105,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { iw.addDocument(doc); iw.close(); - IndexWriterConfig dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + IndexWriterConfig dontMergeConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setMergePolicy(NoMergePolicy.COMPOUND_FILES); IndexWriter writer = new IndexWriter(rd1, dontMergeConfig); @@ -116,14 +116,14 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { assertEquals(1, ir.numDocs()); ir.close(); - iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.APPEND)); iw.forceMerge(1); iw.close(); } Directory rd2 = newDirectory(); { - IndexWriter iw = new IndexWriter(rd2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter iw = new IndexWriter(rd2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); Document doc = new Document(); iw.addDocument(doc); iw.close(); @@ -131,7 +131,7 @@ public class TestParallelReaderEmptyIndex extends LuceneTestCase { Directory rdOut = newDirectory(); - IndexWriter iwOut = new IndexWriter(rdOut, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter iwOut = new IndexWriter(rdOut, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); final DirectoryReader reader1, reader2; ParallelAtomicReader pr = new ParallelAtomicReader( SlowCompositeReaderWrapper.wrap(reader1 = DirectoryReader.open(rd1)), diff --git lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java index 9db6cf3..07b0a43 100755 --- lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java +++ lucene/core/src/test/org/apache/lucene/index/TestParallelTermEnum.java @@ -38,7 +38,7 @@ public class TestParallelTermEnum extends LuceneTestCase { super.setUp(); Document doc; rd1 = newDirectory(); - IndexWriter iw1 = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter iw1 = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); doc = new Document(); doc.add(newField("field1", "the quick brown fox jumps", TextField.TYPE_STORED)); @@ -48,7 +48,7 @@ public class TestParallelTermEnum extends LuceneTestCase { iw1.close(); rd2 = newDirectory(); - IndexWriter iw2 = new IndexWriter(rd2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter iw2 = new IndexWriter(rd2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); doc = new Document(); doc.add(newField("field0", "", TextField.TYPE_UNSTORED)); @@ -87,31 +87,31 @@ public class TestParallelTermEnum extends LuceneTestCase { TermsEnum te = terms.iterator(null); assertEquals("brown", te.next().utf8ToString()); - DocsEnum td = _TestUtil.docs(random, te, liveDocs, null, false); + DocsEnum td = _TestUtil.docs(getRandom(), te, liveDocs, null, false); assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS); assertEquals("fox", te.next().utf8ToString()); - td = _TestUtil.docs(random, te, liveDocs, td, false); + td = _TestUtil.docs(getRandom(), te, liveDocs, td, false); assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS); assertEquals("jumps", te.next().utf8ToString()); - td = _TestUtil.docs(random, te, liveDocs, td, false); + td = _TestUtil.docs(getRandom(), te, liveDocs, td, false); assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS); assertEquals("quick", te.next().utf8ToString()); - td = _TestUtil.docs(random, te, liveDocs, td, false); + td = _TestUtil.docs(getRandom(), te, liveDocs, td, false); assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS); assertEquals("the", te.next().utf8ToString()); - td = _TestUtil.docs(random, te, liveDocs, td, false); + td = _TestUtil.docs(getRandom(), te, liveDocs, td, false); assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS); @@ -124,31 +124,31 @@ public class TestParallelTermEnum extends LuceneTestCase { te = terms.iterator(null); assertEquals("brown", te.next().utf8ToString()); - td = _TestUtil.docs(random, te, liveDocs, td, false); + td = _TestUtil.docs(getRandom(), te, liveDocs, td, false); assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS); assertEquals("fox", te.next().utf8ToString()); - td = _TestUtil.docs(random, te, liveDocs, td, false); + td = _TestUtil.docs(getRandom(), te, liveDocs, td, false); assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS); assertEquals("jumps", te.next().utf8ToString()); - td = _TestUtil.docs(random, te, liveDocs, td, false); + td = _TestUtil.docs(getRandom(), te, liveDocs, td, false); assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS); assertEquals("quick", te.next().utf8ToString()); - td = _TestUtil.docs(random, te, liveDocs, td, false); + td = _TestUtil.docs(getRandom(), te, liveDocs, td, false); assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS); assertEquals("the", te.next().utf8ToString()); - td = _TestUtil.docs(random, te, liveDocs, td, false); + td = _TestUtil.docs(getRandom(), te, liveDocs, td, false); assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS); @@ -161,37 +161,37 @@ public class TestParallelTermEnum extends LuceneTestCase { te = terms.iterator(null); assertEquals("dog", te.next().utf8ToString()); - td = _TestUtil.docs(random, te, liveDocs, td, false); + td = _TestUtil.docs(getRandom(), te, liveDocs, td, false); assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS); assertEquals("fox", te.next().utf8ToString()); - td = _TestUtil.docs(random, te, liveDocs, td, false); + td = _TestUtil.docs(getRandom(), te, liveDocs, td, false); assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS); assertEquals("jumps", te.next().utf8ToString()); - td = _TestUtil.docs(random, te, liveDocs, td, false); + td = _TestUtil.docs(getRandom(), te, liveDocs, td, false); assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS); assertEquals("lazy", te.next().utf8ToString()); - td = _TestUtil.docs(random, te, liveDocs, td, false); + td = _TestUtil.docs(getRandom(), te, liveDocs, td, false); assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS); assertEquals("over", te.next().utf8ToString()); - td = _TestUtil.docs(random, te, liveDocs, td, false); + td = _TestUtil.docs(getRandom(), te, liveDocs, td, false); assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS); assertEquals("the", te.next().utf8ToString()); - td = _TestUtil.docs(random, te, liveDocs, td, false); + td = _TestUtil.docs(getRandom(), te, liveDocs, td, false); assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); assertEquals(0, td.docID()); assertEquals(td.nextDoc(), DocsEnum.NO_MORE_DOCS); diff --git lucene/core/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java lucene/core/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java index 3a7fd13..d59e203 100644 --- lucene/core/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java +++ lucene/core/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java @@ -221,25 +221,25 @@ public class TestPayloadProcessorProvider extends LuceneTestCase { @Test public void testAddIndexes() throws Exception { // addIndexes - single commit in each - doTest(random, true, 0, false); + doTest(getRandom(), true, 0, false); // addIndexes - multiple commits in each - doTest(random, true, 0, true); + doTest(getRandom(), true, 0, true); } @Test public void testAddIndexesIntoExisting() throws Exception { // addIndexes - single commit in each - doTest(random, false, NUM_DOCS, false); + doTest(getRandom(), false, NUM_DOCS, false); // addIndexes - multiple commits in each - doTest(random, false, NUM_DOCS, true); + doTest(getRandom(), false, NUM_DOCS, true); } @Test public void testRegularMerges() throws Exception { Directory dir = newDirectory(); - populateDocs(random, dir, true); + populateDocs(getRandom(), dir, true); verifyPayloadExists(dir, "p", new BytesRef("p1"), NUM_DOCS); verifyPayloadExists(dir, "p", new BytesRef("p2"), NUM_DOCS); @@ -247,7 +247,7 @@ public class TestPayloadProcessorProvider extends LuceneTestCase { // won't get processed. Map processors = new HashMap(); processors.put(dir, new PerTermPayloadProcessor()); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); writer.setPayloadProcessorProvider(new PerDirPayloadProcessor(processors)); writer.forceMerge(1); writer.close(); diff --git lucene/core/src/test/org/apache/lucene/index/TestPayloads.java lucene/core/src/test/org/apache/lucene/index/TestPayloads.java index 920043b..e3a9ccf 100644 --- lucene/core/src/test/org/apache/lucene/index/TestPayloads.java +++ lucene/core/src/test/org/apache/lucene/index/TestPayloads.java @@ -243,7 +243,7 @@ public class TestPayloads extends LuceneTestCase { // handle a caller that mucks with the // returned payload: if (rarely()) { - br.bytes = new byte[random.nextInt(5)]; + br.bytes = new byte[getRandom().nextInt(5)]; } br.length = 0; br.offset = 0; @@ -351,7 +351,7 @@ public class TestPayloads extends LuceneTestCase { static final Charset utf8 = Charset.forName("UTF-8"); private void generateRandomData(byte[] data) { // this test needs the random data to be valid unicode - String s = _TestUtil.randomFixedByteLengthUnicodeString(random, data.length); + String s = _TestUtil.randomFixedByteLengthUnicodeString(getRandom(), data.length); byte b[] = s.getBytes(utf8); assert b.length == data.length; System.arraycopy(b, 0, data, 0, b.length); @@ -503,7 +503,7 @@ public class TestPayloads extends LuceneTestCase { Directory dir = newDirectory(); final IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); final String field = "test"; Thread[] ingesters = new Thread[numThreads]; @@ -612,15 +612,15 @@ public class TestPayloads extends LuceneTestCase { public void testAcrossFields() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - new MockAnalyzer(random, MockTokenizer.WHITESPACE, true)); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, + new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, true)); Document doc = new Document(); doc.add(new Field("hasMaybepayload", "here we go", TextField.TYPE_STORED)); writer.addDocument(doc); writer.close(); - writer = new RandomIndexWriter(random, dir, - new MockAnalyzer(random, MockTokenizer.WHITESPACE, true)); + writer = new RandomIndexWriter(getRandom(), dir, + new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, true)); doc = new Document(); doc.add(new Field("hasMaybepayload2", "here we go", TextField.TYPE_STORED)); writer.addDocument(doc); diff --git lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java index 91ba6f9..8113ded 100644 --- lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java +++ lucene/core/src/test/org/apache/lucene/index/TestPerSegmentDeletes.java @@ -36,9 +36,9 @@ import org.apache.lucene.util._TestUtil; public class TestPerSegmentDeletes extends LuceneTestCase { public void testDeletes1() throws Exception { //IndexWriter.debug2 = System.out; - Directory dir = new MockDirectoryWrapper(new Random(random.nextLong()), new RAMDirectory()); + Directory dir = new MockDirectoryWrapper(new Random(getRandom().nextLong()), new RAMDirectory()); IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)); + new MockAnalyzer(getRandom())); iwc.setMergeScheduler(new SerialMergeScheduler()); iwc.setMaxBufferedDocs(5000); iwc.setRAMBufferSizeMB(100); @@ -219,13 +219,13 @@ public class TestPerSegmentDeletes extends LuceneTestCase { } } - public static int[] toDocsArray(Term term, Bits bits, IndexReader reader) + public int[] toDocsArray(Term term, Bits bits, IndexReader reader) throws IOException { Fields fields = MultiFields.getFields(reader); Terms cterms = fields.terms(term.field); TermsEnum ctermsEnum = cterms.iterator(null); if (ctermsEnum.seekExact(new BytesRef(term.text()), false)) { - DocsEnum docsEnum = _TestUtil.docs(random, ctermsEnum, bits, null, false); + DocsEnum docsEnum = _TestUtil.docs(getRandom(), ctermsEnum, bits, null, false); return toArray(docsEnum); } return null; diff --git lucene/core/src/test/org/apache/lucene/index/TestPersistentSnapshotDeletionPolicy.java lucene/core/src/test/org/apache/lucene/index/TestPersistentSnapshotDeletionPolicy.java index b18acf2..761639a 100644 --- lucene/core/src/test/org/apache/lucene/index/TestPersistentSnapshotDeletionPolicy.java +++ lucene/core/src/test/org/apache/lucene/index/TestPersistentSnapshotDeletionPolicy.java @@ -79,7 +79,7 @@ public class TestPersistentSnapshotDeletionPolicy extends TestSnapshotDeletionPo int numSnapshots = 3; Directory dir = newDirectory(); PersistentSnapshotDeletionPolicy psdp = (PersistentSnapshotDeletionPolicy) getDeletionPolicy(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, psdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(getRandom(), psdp)); prepareIndexAndSnapshots(psdp, writer, numSnapshots, "snapshot"); writer.close(); psdp.close(); @@ -88,7 +88,7 @@ public class TestPersistentSnapshotDeletionPolicy extends TestSnapshotDeletionPo psdp = new PersistentSnapshotDeletionPolicy( new KeepOnlyLastCommitDeletionPolicy(), snapshotDir, OpenMode.APPEND, TEST_VERSION_CURRENT); - new IndexWriter(dir, getConfig(random, psdp)).close(); + new IndexWriter(dir, getConfig(getRandom(), psdp)).close(); assertSnapshotExists(dir, psdp, numSnapshots); assertEquals(numSnapshots, psdp.getSnapshots().size()); @@ -104,7 +104,7 @@ public class TestPersistentSnapshotDeletionPolicy extends TestSnapshotDeletionPo @Test public void testInvalidSnapshotInfos() throws Exception { // Add the correct number of documents (1), but without snapshot information - IndexWriter writer = new IndexWriter(snapshotDir, getConfig(random, null)); + IndexWriter writer = new IndexWriter(snapshotDir, getConfig(getRandom(), null)); writer.addDocument(new Document()); writer.close(); try { @@ -119,7 +119,7 @@ public class TestPersistentSnapshotDeletionPolicy extends TestSnapshotDeletionPo @Test public void testNoSnapshotInfos() throws Exception { // Initialize an empty index in snapshotDir - PSDP should initialize successfully. - new IndexWriter(snapshotDir, getConfig(random, null)).close(); + new IndexWriter(snapshotDir, getConfig(getRandom(), null)).close(); new PersistentSnapshotDeletionPolicy( new KeepOnlyLastCommitDeletionPolicy(), snapshotDir, OpenMode.APPEND, TEST_VERSION_CURRENT).close(); @@ -128,7 +128,7 @@ public class TestPersistentSnapshotDeletionPolicy extends TestSnapshotDeletionPo @Test(expected=IllegalStateException.class) public void testTooManySnapshotInfos() throws Exception { // Write two documents to the snapshots directory - illegal. - IndexWriter writer = new IndexWriter(snapshotDir, getConfig(random, null)); + IndexWriter writer = new IndexWriter(snapshotDir, getConfig(getRandom(), null)); writer.addDocument(new Document()); writer.addDocument(new Document()); writer.close(); @@ -143,7 +143,7 @@ public class TestPersistentSnapshotDeletionPolicy extends TestSnapshotDeletionPo public void testSnapshotRelease() throws Exception { Directory dir = newDirectory(); PersistentSnapshotDeletionPolicy psdp = (PersistentSnapshotDeletionPolicy) getDeletionPolicy(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, psdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(getRandom(), psdp)); prepareIndexAndSnapshots(psdp, writer, 1, "snapshot"); writer.close(); @@ -166,7 +166,7 @@ public class TestPersistentSnapshotDeletionPolicy extends TestSnapshotDeletionPo int numSnapshots = 1; Directory dir = newDirectory(); PersistentSnapshotDeletionPolicy psdp = (PersistentSnapshotDeletionPolicy) getDeletionPolicy(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, psdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(getRandom(), psdp)); prepareIndexAndSnapshots(psdp, writer, numSnapshots, "snapshot"); writer.close(); dir.close(); diff --git lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java index 16ba32e..73e20b3 100644 --- lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java +++ lucene/core/src/test/org/apache/lucene/index/TestPostingsOffsets.java @@ -50,7 +50,7 @@ public class TestPostingsOffsets extends LuceneTestCase { super.setUp(); // Currently only SimpleText and Lucene40 can index offsets into postings: assumeTrue("codec does not support offsets", Codec.getDefault().getName().equals("SimpleText") || Codec.getDefault().getName().equals("Lucene40")); - iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())); if (Codec.getDefault().getName().equals("Lucene40")) { // pulsing etc are not implemented @@ -61,7 +61,7 @@ public class TestPostingsOffsets extends LuceneTestCase { public void testBasic() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir, iwc); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir, iwc); Document doc = new Document(); FieldType ft = new FieldType(TextField.TYPE_UNSTORED); @@ -122,21 +122,21 @@ public class TestPostingsOffsets extends LuceneTestCase { public void doTestNumbers(boolean withPayloads) throws Exception { Directory dir = newDirectory(); - Analyzer analyzer = withPayloads ? new MockPayloadAnalyzer() : new MockAnalyzer(random); + Analyzer analyzer = withPayloads ? new MockPayloadAnalyzer() : new MockAnalyzer(getRandom()); iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer); if (Codec.getDefault().getName().equals("Lucene40")) { // pulsing etc are not implemented iwc.setCodec(_TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat())); } iwc.setMergePolicy(newLogMergePolicy()); // will rely on docids a bit for skipping - RandomIndexWriter w = new RandomIndexWriter(random, dir, iwc); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir, iwc); FieldType ft = new FieldType(TextField.TYPE_STORED); ft.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { ft.setStoreTermVectors(true); - ft.setStoreTermVectorOffsets(random.nextBoolean()); - ft.setStoreTermVectorPositions(random.nextBoolean()); + ft.setStoreTermVectorOffsets(getRandom().nextBoolean()); + ft.setStoreTermVectorPositions(getRandom().nextBoolean()); } int numDocs = atLeast(500); @@ -181,7 +181,7 @@ public class TestPostingsOffsets extends LuceneTestCase { int numSkippingTests = atLeast(50); for (int j = 0; j < numSkippingTests; j++) { - int num = _TestUtil.nextInt(random, 100, Math.min(numDocs-1, 999)); + int num = _TestUtil.nextInt(getRandom(), 100, Math.min(numDocs-1, 999)); DocsAndPositionsEnum dp = MultiFields.getTermPositionsEnum(reader, null, "numbers", new BytesRef("hundred"), true); int doc = dp.advance(num); assertEquals(num, doc); @@ -221,7 +221,7 @@ public class TestPostingsOffsets extends LuceneTestCase { final Map>> actualTokens = new HashMap>>(); Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir, iwc); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir, iwc); final int numDocs = atLeast(20); //final int numDocs = atLeast(5); @@ -231,10 +231,10 @@ public class TestPostingsOffsets extends LuceneTestCase { // TODO: randomize what IndexOptions we use; also test // changing this up in one IW buffered segment...: ft.setIndexOptions(FieldInfo.IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { ft.setStoreTermVectors(true); - ft.setStoreTermVectorOffsets(random.nextBoolean()); - ft.setStoreTermVectorPositions(random.nextBoolean()); + ft.setStoreTermVectorOffsets(getRandom().nextBoolean()); + ft.setStoreTermVectorPositions(getRandom().nextBoolean()); } for(int docCount=0;docCount= SIZE && random.nextInt(50) == 17) { + if (docIter >= SIZE && getRandom().nextInt(50) == 17) { if (r != null) { r.close(); } - final boolean applyDeletions = random.nextBoolean(); + final boolean applyDeletions = getRandom().nextBoolean(); r = w.getReader(applyDeletions); assertTrue("applyDeletions=" + applyDeletions + " r.numDocs()=" + r.numDocs() + " vs SIZE=" + SIZE, !applyDeletions || r.numDocs() == SIZE); } @@ -88,12 +88,12 @@ public class TestRollingUpdates extends LuceneTestCase { public void testUpdateSameDoc() throws Exception { final Directory dir = newDirectory(); - final LineFileDocs docs = new LineFileDocs(random); + final LineFileDocs docs = new LineFileDocs(getRandom()); for (int r = 0; r < 3; r++) { final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMaxBufferedDocs(2)); final int numUpdates = atLeast(20); - int numThreads = _TestUtil.nextInt(random, 2, 6); + int numThreads = _TestUtil.nextInt(getRandom(), 2, 6); IndexingThread[] threads = new IndexingThread[numThreads]; for (int i = 0; i < numThreads; i++) { threads[i] = new IndexingThread(docs, w, numUpdates); @@ -133,7 +133,7 @@ public class TestRollingUpdates extends LuceneTestCase { Document doc = new Document();// docs.nextDoc(); doc.add(newField("id", "test", StringField.TYPE_UNSTORED)); writer.updateDocument(new Term("id", "test"), doc); - if (random.nextInt(3) == 0) { + if (getStaticRandom().nextInt(3) == 0) { if (open == null) { open = IndexReader.open(writer, true); } diff --git lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java index 40a52b7..13e3805 100644 --- lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java +++ lucene/core/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java @@ -36,7 +36,7 @@ public class TestSameTokenSamePosition extends LuceneTestCase { */ public void test() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter riw = new RandomIndexWriter(random, dir); + RandomIndexWriter riw = new RandomIndexWriter(getRandom(), dir); Document doc = new Document(); doc.add(new TextField("eng", new BugReproTokenStream())); riw.addDocument(doc); @@ -49,7 +49,7 @@ public class TestSameTokenSamePosition extends LuceneTestCase { */ public void testMoreDocs() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter riw = new RandomIndexWriter(random, dir); + RandomIndexWriter riw = new RandomIndexWriter(getRandom(), dir); for (int i = 0; i < 100; i++) { Document doc = new Document(); doc.add(new TextField("eng", new BugReproTokenStream())); diff --git lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java index a1d83f5..4e342b7 100644 --- lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java +++ lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java @@ -50,11 +50,11 @@ public class TestSegmentMerger extends LuceneTestCase { merge1Dir = newDirectory(); merge2Dir = newDirectory(); DocHelper.setupDoc(doc1); - SegmentInfo info1 = DocHelper.writeDoc(random, merge1Dir, doc1); + SegmentInfo info1 = DocHelper.writeDoc(getRandom(), merge1Dir, doc1); DocHelper.setupDoc(doc2); - SegmentInfo info2 = DocHelper.writeDoc(random, merge2Dir, doc2); - reader1 = new SegmentReader(info1, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); - reader2 = new SegmentReader(info2, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); + SegmentInfo info2 = DocHelper.writeDoc(getRandom(), merge2Dir, doc2); + reader1 = new SegmentReader(info1, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(getRandom())); + reader2 = new SegmentReader(info2, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(getRandom())); } @Override @@ -77,7 +77,7 @@ public class TestSegmentMerger extends LuceneTestCase { public void testMerge() throws IOException { final Codec codec = Codec.getDefault(); - SegmentMerger merger = new SegmentMerger(InfoStream.getDefault(), mergedDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, mergedSegment, MergeState.CheckAbort.NONE, null, new FieldInfos(new FieldInfos.FieldNumberBiMap()), codec, newIOContext(random)); + SegmentMerger merger = new SegmentMerger(InfoStream.getDefault(), mergedDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, mergedSegment, MergeState.CheckAbort.NONE, null, new FieldInfos(new FieldInfos.FieldNumberBiMap()), codec, newIOContext(getRandom())); merger.add(reader1); merger.add(reader2); MergeState mergeState = merger.merge(); @@ -87,7 +87,7 @@ public class TestSegmentMerger extends LuceneTestCase { //Should be able to open a new SegmentReader against the new directory SegmentReader mergedReader = new SegmentReader(new SegmentInfo(mergedSegment, docsMerged, mergedDir, false, codec, fieldInfos), - DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random)); + DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(getRandom())); assertTrue(mergedReader != null); assertTrue(mergedReader.numDocs() == 2); Document newDoc1 = mergedReader.document(0); @@ -98,7 +98,7 @@ public class TestSegmentMerger extends LuceneTestCase { assertTrue(newDoc2 != null); assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); - DocsEnum termDocs = _TestUtil.docs(random, mergedReader, + DocsEnum termDocs = _TestUtil.docs(getRandom(), mergedReader, DocHelper.TEXT_FIELD_2_KEY, new BytesRef("field"), MultiFields.getLiveDocs(mergedReader), diff --git lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java index 0902c57..437e860 100644 --- lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java +++ lucene/core/src/test/org/apache/lucene/index/TestSegmentReader.java @@ -40,7 +40,7 @@ public class TestSegmentReader extends LuceneTestCase { super.setUp(); dir = newDirectory(); DocHelper.setupDoc(testDoc); - SegmentInfo info = DocHelper.writeDoc(random, dir, testDoc); + SegmentInfo info = DocHelper.writeDoc(getRandom(), dir, testDoc); reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, IOContext.READ); } @@ -127,7 +127,7 @@ public class TestSegmentReader extends LuceneTestCase { } } - DocsEnum termDocs = _TestUtil.docs(random, reader, + DocsEnum termDocs = _TestUtil.docs(getRandom(), reader, DocHelper.TEXT_FIELD_1_KEY, new BytesRef("field"), MultiFields.getLiveDocs(reader), @@ -135,7 +135,7 @@ public class TestSegmentReader extends LuceneTestCase { false); assertTrue(termDocs.nextDoc() != DocsEnum.NO_MORE_DOCS); - termDocs = _TestUtil.docs(random, reader, + termDocs = _TestUtil.docs(getRandom(), reader, DocHelper.NO_NORMS_KEY, new BytesRef(DocHelper.NO_NORMS_TEXT), MultiFields.getLiveDocs(reader), diff --git lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java index 2732e94..88ca697 100644 --- lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java +++ lucene/core/src/test/org/apache/lucene/index/TestSegmentTermDocs.java @@ -37,7 +37,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { super.setUp(); dir = newDirectory(); DocHelper.setupDoc(testDoc); - info = DocHelper.writeDoc(random, dir, testDoc); + info = DocHelper.writeDoc(getRandom(), dir, testDoc); } @Override @@ -56,13 +56,13 @@ public class TestSegmentTermDocs extends LuceneTestCase { public void testTermDocs(int indexDivisor) throws IOException { //After adding the document, we should be able to read it back in - SegmentReader reader = new SegmentReader(info, indexDivisor, newIOContext(random)); + SegmentReader reader = new SegmentReader(info, indexDivisor, newIOContext(getRandom())); assertTrue(reader != null); assertEquals(indexDivisor, reader.getTermInfosIndexDivisor()); TermsEnum terms = reader.fields().terms(DocHelper.TEXT_FIELD_2_KEY).iterator(null); terms.seekCeil(new BytesRef("field")); - DocsEnum termDocs = _TestUtil.docs(random, terms, reader.getLiveDocs(), null, true); + DocsEnum termDocs = _TestUtil.docs(getRandom(), terms, reader.getLiveDocs(), null, true); if (termDocs.nextDoc() != DocsEnum.NO_MORE_DOCS) { int docId = termDocs.docID(); assertTrue(docId == 0); @@ -79,9 +79,9 @@ public class TestSegmentTermDocs extends LuceneTestCase { public void testBadSeek(int indexDivisor) throws IOException { { //After adding the document, we should be able to read it back in - SegmentReader reader = new SegmentReader(info, indexDivisor, newIOContext(random)); + SegmentReader reader = new SegmentReader(info, indexDivisor, newIOContext(getRandom())); assertTrue(reader != null); - DocsEnum termDocs = _TestUtil.docs(random, reader, + DocsEnum termDocs = _TestUtil.docs(getRandom(), reader, "textField2", new BytesRef("bad"), reader.getLiveDocs(), @@ -93,9 +93,9 @@ public class TestSegmentTermDocs extends LuceneTestCase { } { //After adding the document, we should be able to read it back in - SegmentReader reader = new SegmentReader(info, indexDivisor, newIOContext(random)); + SegmentReader reader = new SegmentReader(info, indexDivisor, newIOContext(getRandom())); assertTrue(reader != null); - DocsEnum termDocs = _TestUtil.docs(random, reader, + DocsEnum termDocs = _TestUtil.docs(getRandom(), reader, "junk", new BytesRef("bad"), reader.getLiveDocs(), @@ -112,7 +112,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { public void testSkipTo(int indexDivisor) throws IOException { Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergePolicy(newLogMergePolicy())); Term ta = new Term("content","aaa"); for(int i = 0; i < 10; i++) @@ -132,7 +132,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { IndexReader reader = IndexReader.open(dir, indexDivisor); - DocsEnum tdocs = _TestUtil.docs(random, reader, + DocsEnum tdocs = _TestUtil.docs(getRandom(), reader, ta.field(), new BytesRef(ta.text()), MultiFields.getLiveDocs(reader), @@ -157,7 +157,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { assertFalse(tdocs.advance(10) != DocsEnum.NO_MORE_DOCS); // without next - tdocs = _TestUtil.docs(random, reader, + tdocs = _TestUtil.docs(getRandom(), reader, ta.field(), new BytesRef(ta.text()), MultiFields.getLiveDocs(reader), @@ -175,7 +175,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { // exactly skipInterval documents and therefore with optimization // with next - tdocs = _TestUtil.docs(random, reader, + tdocs = _TestUtil.docs(getRandom(), reader, tb.field(), new BytesRef(tb.text()), MultiFields.getLiveDocs(reader), @@ -199,7 +199,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { assertFalse(tdocs.advance(26) != DocsEnum.NO_MORE_DOCS); // without next - tdocs = _TestUtil.docs(random, reader, + tdocs = _TestUtil.docs(getRandom(), reader, tb.field(), new BytesRef(tb.text()), MultiFields.getLiveDocs(reader), @@ -219,7 +219,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { // much more than skipInterval documents and therefore with optimization // with next - tdocs = _TestUtil.docs(random, reader, + tdocs = _TestUtil.docs(getRandom(), reader, tc.field(), new BytesRef(tc.text()), MultiFields.getLiveDocs(reader), @@ -245,7 +245,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { assertFalse(tdocs.advance(76) != DocsEnum.NO_MORE_DOCS); //without next - tdocs = _TestUtil.docs(random, reader, + tdocs = _TestUtil.docs(getRandom(), reader, tc.field(), new BytesRef(tc.text()), MultiFields.getLiveDocs(reader), @@ -270,7 +270,7 @@ public class TestSegmentTermDocs extends LuceneTestCase { public void testIndexDivisor() throws IOException { testDoc = new Document(); DocHelper.setupDoc(testDoc); - DocHelper.writeDoc(random, dir, testDoc); + DocHelper.writeDoc(getRandom(), dir, testDoc); testTermDocs(2); testBadSeek(2); testSkipTo(2); diff --git lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java index 259e5d6..89de14c 100644 --- lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java +++ lucene/core/src/test/org/apache/lucene/index/TestSegmentTermEnum.java @@ -49,7 +49,7 @@ public class TestSegmentTermEnum extends LuceneTestCase { public void testTermEnum() throws IOException { IndexWriter writer = null; - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); // ADD 100 documents with term : aaa // add 100 documents with terms: aaa bbb @@ -65,7 +65,7 @@ public class TestSegmentTermEnum extends LuceneTestCase { verifyDocFreq(); // merge segments - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.APPEND)); writer.forceMerge(1); writer.close(); @@ -75,7 +75,7 @@ public class TestSegmentTermEnum extends LuceneTestCase { public void testPrevTermAtEnd() throws IOException { - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodec(_TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat()))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setCodec(_TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat()))); addDoc(writer, "aaa bbb"); writer.close(); SegmentReader reader = getOnlySegmentReader(IndexReader.open(dir)); diff --git lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java index 1e9f6d5..bd2e5db 100644 --- lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java +++ lucene/core/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java @@ -92,7 +92,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { @Test public void testSnapshotDeletionPolicy() throws Exception { Directory fsDir = newDirectory(); - runTest(random, fsDir); + runTest(getRandom(), fsDir); fsDir.close(); } @@ -206,7 +206,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { byte[] buffer = new byte[4096]; private void readFile(Directory dir, String name) throws Exception { - IndexInput input = dir.openInput(name, newIOContext(random)); + IndexInput input = dir.openInput(name, newIOContext(getRandom())); try { long size = dir.fileLength(name); long bytesLeft = size; @@ -238,7 +238,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { // Create 3 snapshots: snapshot0, snapshot1, snapshot2 Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(getRandom(), sdp)); prepareIndexAndSnapshots(sdp, writer, numSnapshots, "snapshot"); writer.close(); @@ -249,7 +249,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { // open a new IndexWriter w/ no snapshots to keep and assert that all snapshots are gone. sdp = getDeletionPolicy(); - writer = new IndexWriter(dir, getConfig(random, sdp)); + writer = new IndexWriter(dir, getConfig(getRandom(), sdp)); writer.deleteUnusedFiles(); writer.close(); assertEquals("no snapshots should exist", 1, DirectoryReader.listCommits(dir).size()); @@ -269,7 +269,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { public void testMultiThreadedSnapshotting() throws Exception { Directory dir = newDirectory(); final SnapshotDeletionPolicy sdp = getDeletionPolicy(); - final IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp)); + final IndexWriter writer = new IndexWriter(dir, getConfig(getRandom(), sdp)); Thread[] threads = new Thread[10]; for (int i = 0; i < threads.length; i++) { @@ -314,12 +314,12 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { int numSnapshots = 2; Directory dir = newDirectory(); SnapshotDeletionPolicy sdp = getDeletionPolicy(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(getRandom(), sdp)); prepareIndexAndSnapshots(sdp, writer, numSnapshots, "snapshot"); writer.close(); // now open the writer on "snapshot0" - make sure it succeeds - writer = new IndexWriter(dir, getConfig(random, sdp).setIndexCommit(sdp.getSnapshot("snapshot0"))); + writer = new IndexWriter(dir, getConfig(getRandom(), sdp).setIndexCommit(sdp.getSnapshot("snapshot0"))); // this does the actual rollback writer.commit(); writer.deleteUnusedFiles(); @@ -336,7 +336,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { public void testReleaseSnapshot() throws Exception { Directory dir = newDirectory(); SnapshotDeletionPolicy sdp = getDeletionPolicy(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(getRandom(), sdp)); prepareIndexAndSnapshots(sdp, writer, 1, "snapshot"); // Create another commit - we must do that, because otherwise the "snapshot" @@ -368,13 +368,13 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { int numSnapshots = 3; Directory dir = newDirectory(); SnapshotDeletionPolicy sdp = getDeletionPolicy(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(getRandom(), sdp)); prepareIndexAndSnapshots(sdp, writer, numSnapshots, "snapshot"); writer.close(); // Make a new policy and initialize with snapshots. sdp = getDeletionPolicy(sdp.getSnapshots()); - writer = new IndexWriter(dir, getConfig(random, sdp)); + writer = new IndexWriter(dir, getConfig(getRandom(), sdp)); // attempt to delete unused files - the snapshotted files should not be deleted writer.deleteUnusedFiles(); writer.close(); @@ -386,7 +386,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { public void testSnapshotLastCommitTwice() throws Exception { Directory dir = newDirectory(); SnapshotDeletionPolicy sdp = getDeletionPolicy(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(getRandom(), sdp)); writer.addDocument(new Document()); writer.commit(); @@ -415,7 +415,7 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { // on onInit(). Directory dir = newDirectory(); SnapshotDeletionPolicy sdp = getDeletionPolicy(); - IndexWriter writer = new IndexWriter(dir, getConfig(random, sdp)); + IndexWriter writer = new IndexWriter(dir, getConfig(getRandom(), sdp)); writer.addDocument(new Document()); writer.commit(); IndexCommit ic = sdp.snapshot("s1"); @@ -426,14 +426,14 @@ public class TestSnapshotDeletionPolicy extends LuceneTestCase { // open a new writer w/ KeepOnlyLastCommit policy, so it will delete "s1" // commit. - new IndexWriter(dir, getConfig(random, null)).close(); + new IndexWriter(dir, getConfig(getRandom(), null)).close(); assertFalse("snapshotted commit should not exist", dir.fileExists(ic.getSegmentsFileName())); // Now reinit SDP from the commits in the index - the snapshot id should not // exist anymore. sdp = getDeletionPolicy(sdp.getSnapshots()); - new IndexWriter(dir, getConfig(random, sdp)).close(); + new IndexWriter(dir, getConfig(getRandom(), sdp)).close(); try { sdp.getSnapshot("s1"); diff --git lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java index d9addb7..6baac57 100644 --- lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java +++ lucene/core/src/test/org/apache/lucene/index/TestStressAdvance.java @@ -33,7 +33,7 @@ public class TestStressAdvance extends LuceneTestCase { System.out.println("\nTEST: iter=" + iter); } Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir); final Set aDocs = new HashSet(); final Document doc = new Document(); final Field f = newField("field", "", StringField.TYPE_UNSTORED); @@ -42,7 +42,7 @@ public class TestStressAdvance extends LuceneTestCase { doc.add(idField); int num = atLeast(4097); for(int id=0;id docs = indexRandom(5, 3, 100, dir1, maxThreadStates, doReaderPooling); - indexSerial(random, docs, dir2); + indexSerial(getRandom(), docs, dir2); // verifying verify // verifyEquals(dir1, dir1, "id"); @@ -100,16 +100,16 @@ public class TestStressIndexing2 extends LuceneTestCase { if (VERBOSE) { System.out.println("\n\nTEST: top iter=" + i); } - sameFieldOrder=random.nextBoolean(); - mergeFactor=random.nextInt(3)+2; - maxBufferedDocs=random.nextInt(3)+2; - int maxThreadStates = 1+random.nextInt(10); - boolean doReaderPooling = random.nextBoolean(); + sameFieldOrder=getRandom().nextBoolean(); + mergeFactor=getRandom().nextInt(3)+2; + maxBufferedDocs=getRandom().nextInt(3)+2; + int maxThreadStates = 1+getRandom().nextInt(10); + boolean doReaderPooling = getRandom().nextBoolean(); seed++; - int nThreads=random.nextInt(5)+1; - int iter=random.nextInt(5)+1; - int range=random.nextInt(20)+1; + int nThreads=getRandom().nextInt(5)+1; + int iter=getRandom().nextInt(5)+1; + int range=getRandom().nextInt(20)+1; Directory dir1 = newDirectory(); Directory dir2 = newDirectory(); if (VERBOSE) { @@ -119,7 +119,7 @@ public class TestStressIndexing2 extends LuceneTestCase { if (VERBOSE) { System.out.println("TEST: index serial"); } - indexSerial(random, docs, dir2); + indexSerial(getRandom(), docs, dir2); if (VERBOSE) { System.out.println("TEST: verify"); } @@ -150,7 +150,7 @@ public class TestStressIndexing2 extends LuceneTestCase { public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException { Map docs = new HashMap(); IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setRAMBufferSizeMB( + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.CREATE).setRAMBufferSizeMB( 0.1).setMaxBufferedDocs(maxBufferedDocs).setMergePolicy(newLogMergePolicy())); w.commit(); LogMergePolicy lmp = (LogMergePolicy) w.getConfig().getMergePolicy(); @@ -201,7 +201,7 @@ public class TestStressIndexing2 extends LuceneTestCase { boolean doReaderPooling) throws IOException, InterruptedException { Map docs = new HashMap(); IndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE) + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.CREATE) .setRAMBufferSizeMB(0.1).setMaxBufferedDocs(maxBufferedDocs).setIndexerThreadPool(new ThreadAffinityDocumentsWriterThreadPool(maxThreadStates)) .setReaderPooling(doReaderPooling).setMergePolicy(newLogMergePolicy())); LogMergePolicy lmp = (LogMergePolicy) w.getConfig().getMergePolicy(); @@ -265,13 +265,13 @@ public class TestStressIndexing2 extends LuceneTestCase { w.close(); } - public static void verifyEquals(Random r, DirectoryReader r1, Directory dir2, String idField) throws Throwable { + public void verifyEquals(Random r, DirectoryReader r1, Directory dir2, String idField) throws Throwable { DirectoryReader r2 = IndexReader.open(dir2); verifyEquals(r1, r2, idField); r2.close(); } - public static void verifyEquals(Directory dir1, Directory dir2, String idField) throws Throwable { + public void verifyEquals(Directory dir1, Directory dir2, String idField) throws Throwable { DirectoryReader r1 = IndexReader.open(dir1); DirectoryReader r2 = IndexReader.open(dir2); verifyEquals(r1, r2, idField); @@ -297,7 +297,7 @@ public class TestStressIndexing2 extends LuceneTestCase { } - public static void verifyEquals(DirectoryReader r1, DirectoryReader r2, String idField) throws Throwable { + public void verifyEquals(DirectoryReader r1, DirectoryReader r2, String idField) throws Throwable { if (VERBOSE) { System.out.println("\nr1 docs:"); printDocs(r1); @@ -335,7 +335,7 @@ public class TestStressIndexing2 extends LuceneTestCase { // deleted docs): DocsEnum docs = null; while(termsEnum.next() != null) { - docs = _TestUtil.docs(random, termsEnum, null, docs, false); + docs = _TestUtil.docs(getRandom(), termsEnum, null, docs, false); while(docs.nextDoc() != DocsEnum.NO_MORE_DOCS) { fail("r1 is not empty but r2 is"); } @@ -355,9 +355,9 @@ public class TestStressIndexing2 extends LuceneTestCase { break; } - termDocs1 = _TestUtil.docs(random, termsEnum, liveDocs1, termDocs1, false); + termDocs1 = _TestUtil.docs(getRandom(), termsEnum, liveDocs1, termDocs1, false); if (termsEnum2.seekExact(term, false)) { - termDocs2 = _TestUtil.docs(random, termsEnum2, liveDocs2, termDocs2, false); + termDocs2 = _TestUtil.docs(getRandom(), termsEnum2, liveDocs2, termDocs2, false); } else { termDocs2 = null; } @@ -416,7 +416,7 @@ public class TestStressIndexing2 extends LuceneTestCase { System.out.println(" pos=" + dpEnum.nextPosition()); } } else { - dEnum = _TestUtil.docs(random, termsEnum3, null, dEnum, true); + dEnum = _TestUtil.docs(getRandom(), termsEnum3, null, dEnum, true); assertNotNull(dEnum); assertTrue(dEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); final int freq = dEnum.freq(); @@ -450,7 +450,7 @@ public class TestStressIndexing2 extends LuceneTestCase { System.out.println(" pos=" + dpEnum.nextPosition()); } } else { - dEnum = _TestUtil.docs(random, termsEnum3, null, dEnum, true); + dEnum = _TestUtil.docs(getRandom(), termsEnum3, null, dEnum, true); assertNotNull(dEnum); assertTrue(dEnum.nextDoc() != DocsEnum.NO_MORE_DOCS); final int freq = dEnum.freq(); @@ -507,7 +507,7 @@ public class TestStressIndexing2 extends LuceneTestCase { } //System.out.println("TEST: term1=" + term1); - docs1 = _TestUtil.docs(random, termsEnum1, liveDocs1, docs1, true); + docs1 = _TestUtil.docs(getRandom(), termsEnum1, liveDocs1, docs1, true); while (docs1.nextDoc() != DocsEnum.NO_MORE_DOCS) { int d = docs1.docID(); int f = docs1.freq(); @@ -541,7 +541,7 @@ public class TestStressIndexing2 extends LuceneTestCase { } //System.out.println("TEST: term1=" + term1); - docs2 = _TestUtil.docs(random, termsEnum2, liveDocs2, docs2, true); + docs2 = _TestUtil.docs(getRandom(), termsEnum2, liveDocs2, docs2, true); while (docs2.nextDoc() != DocsEnum.NO_MORE_DOCS) { int d = r2r1[docs2.docID()]; int f = docs2.freq(); @@ -668,8 +668,8 @@ public class TestStressIndexing2 extends LuceneTestCase { assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum1.nextDoc()); assertEquals(DocsEnum.NO_MORE_DOCS, dpEnum2.nextDoc()); } else { - dEnum1 = _TestUtil.docs(random, termsEnum1, null, dEnum1, true); - dEnum2 = _TestUtil.docs(random, termsEnum2, null, dEnum2, true); + dEnum1 = _TestUtil.docs(getStaticRandom(), termsEnum1, null, dEnum1, true); + dEnum2 = _TestUtil.docs(getStaticRandom(), termsEnum2, null, dEnum2, true); assertNotNull(dEnum1); assertNotNull(dEnum2); int docID1 = dEnum1.nextDoc(); diff --git lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java index a607e60..91c643c 100644 --- lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java +++ lucene/core/src/test/org/apache/lucene/index/TestStressNRT.java @@ -66,20 +66,20 @@ public class TestStressNRT extends LuceneTestCase { public void test() throws Exception { // update variables - final int commitPercent = random.nextInt(20); - final int softCommitPercent = random.nextInt(100); // what percent of the commits are soft - final int deletePercent = random.nextInt(50); - final int deleteByQueryPercent = random.nextInt(25); + final int commitPercent = getRandom().nextInt(20); + final int softCommitPercent = getRandom().nextInt(100); // what percent of the commits are soft + final int deletePercent = getRandom().nextInt(50); + final int deleteByQueryPercent = getRandom().nextInt(25); final int ndocs = atLeast(50); - final int nWriteThreads = _TestUtil.nextInt(random, 1, TEST_NIGHTLY ? 10 : 5); - final int maxConcurrentCommits = _TestUtil.nextInt(random, 1, TEST_NIGHTLY ? 10 : 5); // number of committers at a time... needed if we want to avoid commit errors due to exceeding the max + final int nWriteThreads = _TestUtil.nextInt(getRandom(), 1, TEST_NIGHTLY ? 10 : 5); + final int maxConcurrentCommits = _TestUtil.nextInt(getRandom(), 1, TEST_NIGHTLY ? 10 : 5); // number of committers at a time... needed if we want to avoid commit errors due to exceeding the max - final boolean tombstones = random.nextBoolean(); + final boolean tombstones = getRandom().nextBoolean(); // query variables final AtomicLong operations = new AtomicLong(atLeast(10000)); // number of query operations to perform in total - final int nReadThreads = _TestUtil.nextInt(random, 1, TEST_NIGHTLY ? 10 : 5); + final int nReadThreads = _TestUtil.nextInt(getRandom(), 1, TEST_NIGHTLY ? 10 : 5); initModel(ndocs); final FieldType storedOnlyType = new FieldType(); @@ -106,14 +106,14 @@ public class TestStressNRT extends LuceneTestCase { Directory dir = newDirectory(); - final RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + final RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); writer.setDoRandomForceMergeAssert(false); writer.commit(); reader = IndexReader.open(dir); for (int i=0; i acceptTerms = new HashSet(); final TreeSet sortedAcceptTerms = new TreeSet(); - final double keepPct = random.nextDouble(); + final double keepPct = getRandom().nextDouble(); Automaton a; if (iter == 0) { if (VERBOSE) { @@ -255,7 +255,7 @@ public class TestTermsEnum extends LuceneTestCase { } for (String s : terms) { final String s2; - if (random.nextDouble() <= keepPct) { + if (getRandom().nextDouble() <= keepPct) { s2 = s; } else { s2 = getRandomString(); @@ -287,7 +287,7 @@ public class TestTermsEnum extends LuceneTestCase { } for(int iter2=0;iter2<100;iter2++) { - final BytesRef startTerm = acceptTermsArray.length == 0 || random.nextBoolean() ? null : acceptTermsArray[random.nextInt(acceptTermsArray.length)]; + final BytesRef startTerm = acceptTermsArray.length == 0 || getRandom().nextBoolean() ? null : acceptTermsArray[getRandom().nextInt(acceptTermsArray.length)]; if (VERBOSE) { System.out.println("\nTEST: iter2=" + iter2 + " startTerm=" + (startTerm == null ? "" : startTerm.utf8ToString())); @@ -331,7 +331,7 @@ public class TestTermsEnum extends LuceneTestCase { } assertEquals(expected, actual); assertEquals(1, te.docFreq()); - docsEnum = _TestUtil.docs(random, te, null, docsEnum, false); + docsEnum = _TestUtil.docs(getRandom(), te, null, docsEnum, false); final int docID = docsEnum.nextDoc(); assertTrue(docID != DocsEnum.NO_MORE_DOCS); assertEquals(docIDToID[docID], termToID.get(expected).intValue()); @@ -355,13 +355,13 @@ public class TestTermsEnum extends LuceneTestCase { private IndexReader makeIndex(String... terms) throws Exception { d = newDirectory(); - IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())); /* iwc.setCodec(new StandardCodec(minTermsInBlock, maxTermsInBlock)); */ - final RandomIndexWriter w = new RandomIndexWriter(random, d, iwc); + final RandomIndexWriter w = new RandomIndexWriter(getRandom(), d, iwc); for(String term : terms) { Document doc = new Document(); Field f = newField(FIELD, term, StringField.TYPE_UNSTORED); @@ -501,7 +501,7 @@ public class TestTermsEnum extends LuceneTestCase { public void testZeroTerms() throws Exception { d = newDirectory(); - final RandomIndexWriter w = new RandomIndexWriter(random, d); + final RandomIndexWriter w = new RandomIndexWriter(getRandom(), d); Document doc = new Document(); doc.add(newField("field", "one two three", TextField.TYPE_UNSTORED)); doc = new Document(); @@ -524,18 +524,18 @@ public class TestTermsEnum extends LuceneTestCase { private String getRandomString() { //return _TestUtil.randomSimpleString(random); - return _TestUtil.randomRealisticUnicodeString(random); + return _TestUtil.randomRealisticUnicodeString(getRandom()); } public void testRandomTerms() throws Exception { - final String[] terms = new String[_TestUtil.nextInt(random, 1, atLeast(1000))]; + final String[] terms = new String[_TestUtil.nextInt(getRandom(), 1, atLeast(1000))]; final Set seen = new HashSet(); - final boolean allowEmptyString = random.nextBoolean(); + final boolean allowEmptyString = getRandom().nextBoolean(); - if (random.nextInt(10) == 7 && terms.length > 2) { + if (getRandom().nextInt(10) == 7 && terms.length > 2) { // Sometimes add a bunch of terms sharing a longish common prefix: - final int numTermsSamePrefix = random.nextInt(terms.length/2); + final int numTermsSamePrefix = getRandom().nextInt(terms.length/2); if (numTermsSamePrefix > 0) { String prefix; while(true) { @@ -570,7 +570,7 @@ public class TestTermsEnum extends LuceneTestCase { // sugar private boolean seekExact(TermsEnum te, String term) throws IOException { - return te.seekExact(new BytesRef(term), random.nextBoolean()); + return te.seekExact(new BytesRef(term), getRandom().nextBoolean()); } // sugar @@ -627,7 +627,7 @@ public class TestTermsEnum extends LuceneTestCase { final BytesRef t; int loc; final TermState termState; - if (random.nextInt(6) == 4) { + if (getRandom().nextInt(6) == 4) { // pick term that doens't exist: t = getNonExistTerm(validTerms); termState = null; @@ -635,8 +635,8 @@ public class TestTermsEnum extends LuceneTestCase { System.out.println("\nTEST: invalid term=" + t.utf8ToString()); } loc = Arrays.binarySearch(validTerms, t); - } else if (termStates.size() != 0 && random.nextInt(4) == 1) { - final TermAndState ts = termStates.get(random.nextInt(termStates.size())); + } else if (termStates.size() != 0 && getRandom().nextInt(4) == 1) { + final TermAndState ts = termStates.get(getRandom().nextInt(termStates.size())); t = ts.term; loc = Arrays.binarySearch(validTerms, t); assertTrue(loc >= 0); @@ -646,7 +646,7 @@ public class TestTermsEnum extends LuceneTestCase { } } else { // pick valid term - loc = random.nextInt(validTerms.length); + loc = getRandom().nextInt(validTerms.length); t = BytesRef.deepCopyOf(validTerms[loc]); termState = null; if (VERBOSE) { @@ -655,7 +655,7 @@ public class TestTermsEnum extends LuceneTestCase { } // seekCeil or seekExact: - final boolean doSeekExact = random.nextBoolean(); + final boolean doSeekExact = getRandom().nextBoolean(); if (termState != null) { if (VERBOSE) { System.out.println(" seekExact termState"); @@ -665,13 +665,13 @@ public class TestTermsEnum extends LuceneTestCase { if (VERBOSE) { System.out.println(" seekExact"); } - assertEquals(loc >= 0, te.seekExact(t, random.nextBoolean())); + assertEquals(loc >= 0, te.seekExact(t, getRandom().nextBoolean())); } else { if (VERBOSE) { System.out.println(" seekCeil"); } - final TermsEnum.SeekStatus result = te.seekCeil(t, random.nextBoolean()); + final TermsEnum.SeekStatus result = te.seekCeil(t, getRandom().nextBoolean()); if (VERBOSE) { System.out.println(" got " + result); } @@ -699,7 +699,7 @@ public class TestTermsEnum extends LuceneTestCase { } // Do a bunch of next's after the seek - final int numNext = random.nextInt(validTerms.length); + final int numNext = getRandom().nextInt(validTerms.length); for(int nextCount=0;nextCount matchedTerms = new ArrayList(); for(BytesRef t : terms) { @@ -114,21 +114,21 @@ public class TestTermsEnum2 extends LuceneTestCase { /** seeks to every term accepted by some automata */ public void testSeeking() throws Exception { for (int i = 0; i < numIterations; i++) { - String reg = AutomatonTestUtil.randomRegexp(random); + String reg = AutomatonTestUtil.randomRegexp(getRandom()); Automaton automaton = new RegExp(reg, RegExp.NONE).toAutomaton(); TermsEnum te = MultiFields.getTerms(reader, "field").iterator(null); ArrayList unsortedTerms = new ArrayList(terms); - Collections.shuffle(unsortedTerms, random); + Collections.shuffle(unsortedTerms, getRandom()); for (BytesRef term : unsortedTerms) { if (BasicOperations.run(automaton, term.utf8ToString())) { // term is accepted - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { // seek exact - assertTrue(te.seekExact(term, random.nextBoolean())); + assertTrue(te.seekExact(term, getRandom().nextBoolean())); } else { // seek ceil - assertEquals(SeekStatus.FOUND, te.seekCeil(term, random.nextBoolean())); + assertEquals(SeekStatus.FOUND, te.seekCeil(term, getRandom().nextBoolean())); assertEquals(term, te.term()); } } @@ -142,14 +142,14 @@ public class TestTermsEnum2 extends LuceneTestCase { TermsEnum te = MultiFields.getTerms(reader, "field").iterator(null); for (BytesRef term : terms) { - int c = random.nextInt(3); + int c = getRandom().nextInt(3); if (c == 0) { assertEquals(term, te.next()); } else if (c == 1) { - assertEquals(SeekStatus.FOUND, te.seekCeil(term, random.nextBoolean())); + assertEquals(SeekStatus.FOUND, te.seekCeil(term, getRandom().nextBoolean())); assertEquals(term, te.term()); } else { - assertTrue(te.seekExact(term, random.nextBoolean())); + assertTrue(te.seekExact(term, getRandom().nextBoolean())); } } } @@ -158,7 +158,7 @@ public class TestTermsEnum2 extends LuceneTestCase { /** tests intersect: TODO start at a random term! */ public void testIntersect() throws Exception { for (int i = 0; i < numIterations; i++) { - String reg = AutomatonTestUtil.randomRegexp(random); + String reg = AutomatonTestUtil.randomRegexp(getRandom()); Automaton automaton = new RegExp(reg, RegExp.NONE).toAutomaton(); CompiledAutomaton ca = new CompiledAutomaton(automaton, SpecialOperations.isFinite(automaton), false); TermsEnum te = MultiFields.getTerms(reader, "field").intersect(ca, null); diff --git lucene/core/src/test/org/apache/lucene/index/TestThreadedForceMerge.java lucene/core/src/test/org/apache/lucene/index/TestThreadedForceMerge.java index 5207919..a8f173e 100644 --- lucene/core/src/test/org/apache/lucene/index/TestThreadedForceMerge.java +++ lucene/core/src/test/org/apache/lucene/index/TestThreadedForceMerge.java @@ -28,12 +28,13 @@ import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.util.English; import org.apache.lucene.util.LuceneTestCase; +import org.junit.BeforeClass; import java.util.Random; public class TestThreadedForceMerge extends LuceneTestCase { - - private static final Analyzer ANALYZER = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + + private static Analyzer ANALYZER; private final static int NUM_THREADS = 3; //private final static int NUM_THREADS = 5; @@ -44,6 +45,11 @@ public class TestThreadedForceMerge extends LuceneTestCase { private volatile boolean failed; + @BeforeClass + public static void setup() { + ANALYZER = new MockAnalyzer(getStaticRandom(), MockTokenizer.SIMPLE, true); + } + private void setFailed() { failed = true; } @@ -137,7 +143,7 @@ public class TestThreadedForceMerge extends LuceneTestCase { */ public void testThreadedForceMerge() throws Exception { Directory directory = newDirectory(); - runTest(random, directory); + runTest(getRandom(), directory); directory.close(); } } diff --git lucene/core/src/test/org/apache/lucene/index/TestTieredMergePolicy.java lucene/core/src/test/org/apache/lucene/index/TestTieredMergePolicy.java index 6b61e1a..0b92a7d 100644 --- lucene/core/src/test/org/apache/lucene/index/TestTieredMergePolicy.java +++ lucene/core/src/test/org/apache/lucene/index/TestTieredMergePolicy.java @@ -29,7 +29,7 @@ public class TestTieredMergePolicy extends LuceneTestCase { public void testForceMergeDeletes() throws Exception { Directory dir = newDirectory(); - IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())); TieredMergePolicy tmp = newTieredMergePolicy(); conf.setMergePolicy(tmp); conf.setMaxBufferedDocs(4); @@ -72,7 +72,7 @@ public class TestTieredMergePolicy extends LuceneTestCase { System.out.println("TEST: iter=" + iter); } Directory dir = newDirectory(); - IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())); conf.setMergeScheduler(new SerialMergeScheduler()); TieredMergePolicy tmp = newTieredMergePolicy(); conf.setMergePolicy(tmp); @@ -82,7 +82,7 @@ public class TestTieredMergePolicy extends LuceneTestCase { IndexWriter w = new IndexWriter(dir, conf); int maxCount = 0; - final int numDocs = _TestUtil.nextInt(random, 20, 100); + final int numDocs = _TestUtil.nextInt(getRandom(), 20, 100); for(int i=0;i data = new HashMap(); data.put("index", "Rolled back to 1-"+id); @@ -127,7 +127,7 @@ public class TestTransactionRollback extends LuceneTestCase { dir = newDirectory(); //Build index, of records 1 to 100, committing after each batch of 10 IndexDeletionPolicy sdp=new KeepAllDeletionPolicy(); - IndexWriter w=new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(sdp)); + IndexWriter w=new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setIndexDeletionPolicy(sdp)); for(int currentRecordId=1;currentRecordId<=100;currentRecordId++) { Document doc=new Document(); @@ -202,7 +202,7 @@ public class TestTransactionRollback extends LuceneTestCase { for(int i=0;i<2;i++) { // Unless you specify a prior commit point, rollback // should not work: - new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)) + new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setIndexDeletionPolicy(new DeleteLastCommitPolicy())).close(); IndexReader r = IndexReader.open(dir); assertEquals(100, r.numDocs()); diff --git lucene/core/src/test/org/apache/lucene/index/TestTransactions.java lucene/core/src/test/org/apache/lucene/index/TestTransactions.java index f48c423..a4959c6 100644 --- lucene/core/src/test/org/apache/lucene/index/TestTransactions.java +++ lucene/core/src/test/org/apache/lucene/index/TestTransactions.java @@ -37,7 +37,7 @@ public class TestTransactions extends LuceneTestCase { private class RandomFailure extends MockDirectoryWrapper.Failure { @Override public void eval(MockDirectoryWrapper dir) throws IOException { - if (TestTransactions.doFail && random.nextInt() % 10 <= 3) { + if (TestTransactions.doFail && getRandom().nextInt() % 10 <= 3) { throw new IOException("now failing randomly but on purpose"); } } @@ -96,7 +96,7 @@ public class TestTransactions extends LuceneTestCase { IndexWriter writer1 = new IndexWriter( dir1, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setMaxBufferedDocs(3). setMergeScheduler(new ConcurrentMergeScheduler()). setMergePolicy(newLogMergePolicy(2)) @@ -107,7 +107,7 @@ public class TestTransactions extends LuceneTestCase { // happen @ different times IndexWriter writer2 = new IndexWriter( dir2, - newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setMaxBufferedDocs(2). setMergeScheduler(new ConcurrentMergeScheduler()). setMergePolicy(newLogMergePolicy(3)) @@ -152,7 +152,7 @@ public class TestTransactions extends LuceneTestCase { customType.setStoreTermVectors(true); for(int j=0; j<10; j++) { Document d = new Document(); - int n = random.nextInt(); + int n = getRandom().nextInt(); d.add(newField("id", Integer.toString(nextID++), customType)); d.add(newField("contents", English.intToEnglish(n), TextField.TYPE_UNSTORED)); writer.addDocument(d); @@ -194,10 +194,10 @@ public class TestTransactions extends LuceneTestCase { } public void initIndex(Directory dir) throws Throwable { - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); for(int j=0; j<7; j++) { Document d = new Document(); - int n = random.nextInt(); + int n = getRandom().nextInt(); d.add(newField("contents", English.intToEnglish(n), TextField.TYPE_UNSTORED)); writer.addDocument(d); } @@ -206,8 +206,8 @@ public class TestTransactions extends LuceneTestCase { public void testTransactions() throws Throwable { // we cant use non-ramdir on windows, because this test needs to double-write. - MockDirectoryWrapper dir1 = new MockDirectoryWrapper(random, new RAMDirectory()); - MockDirectoryWrapper dir2 = new MockDirectoryWrapper(random, new RAMDirectory()); + MockDirectoryWrapper dir1 = new MockDirectoryWrapper(getRandom(), new RAMDirectory()); + MockDirectoryWrapper dir2 = new MockDirectoryWrapper(getRandom(), new RAMDirectory()); dir1.setPreventDoubleWrite(false); dir2.setPreventDoubleWrite(false); dir1.failOn(new RandomFailure()); diff --git lucene/core/src/test/org/apache/lucene/index/TestTypePromotion.java lucene/core/src/test/org/apache/lucene/index/TestTypePromotion.java index 5f40fdb..4d61637 100644 --- lucene/core/src/test/org/apache/lucene/index/TestTypePromotion.java +++ lucene/core/src/test/org/apache/lucene/index/TestTypePromotion.java @@ -69,28 +69,28 @@ public class TestTypePromotion extends LuceneTestCase { throws CorruptIndexException, IOException { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); int num_1 = atLeast(200); int num_2 = atLeast(200); int num_3 = atLeast(200); long[] values = new long[num_1 + num_2 + num_3]; index(writer, - randomValueType(types, random), values, 0, num_1); + randomValueType(types, getRandom()), values, 0, num_1); writer.commit(); index(writer, - randomValueType(types, random), values, num_1, num_2); + randomValueType(types, getRandom()), values, num_1, num_2); writer.commit(); - if (random.nextInt(4) == 0) { + if (getRandom().nextInt(4) == 0) { // once in a while use addIndexes writer.forceMerge(1); Directory dir_2 = newDirectory() ; IndexWriter writer_2 = new IndexWriter(dir_2, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); index(writer_2, - randomValueType(types, random), values, num_1 + num_2, num_3); + randomValueType(types, getRandom()), values, num_1 + num_2, num_3); writer_2.commit(); writer_2.close(); if (rarely()) { @@ -104,7 +104,7 @@ public class TestTypePromotion extends LuceneTestCase { dir_2.close(); } else { index(writer, - randomValueType(types, random), values, num_1 + num_2, num_3); + randomValueType(types, getRandom()), values, num_1 + num_2, num_3); } writer.forceMerge(1); @@ -214,28 +214,28 @@ public class TestTypePromotion extends LuceneTestCase { doc.add(new Field("id", i + "", TextField.TYPE_STORED)); switch (valueType) { case VAR_INTS: - values[i] = random.nextInt(); + values[i] = getRandom().nextInt(); valField.setLongValue(values[i]); break; case FIXED_INTS_16: - values[i] = random.nextInt(Short.MAX_VALUE); + values[i] = getRandom().nextInt(Short.MAX_VALUE); valField.setIntValue((short) values[i]); break; case FIXED_INTS_32: - values[i] = random.nextInt(); + values[i] = getRandom().nextInt(); valField.setIntValue((int) values[i]); break; case FIXED_INTS_64: - values[i] = random.nextLong(); + values[i] = getRandom().nextLong(); valField.setLongValue(values[i]); break; case FLOAT_64: - double nextDouble = random.nextDouble(); + double nextDouble = getRandom().nextDouble(); values[i] = Double.doubleToRawLongBits(nextDouble); valField.setDoubleValue(nextDouble); break; case FLOAT_32: - final float nextFloat = random.nextFloat(); + final float nextFloat = getRandom().nextFloat(); values[i] = Double.doubleToRawLongBits(nextFloat); valField.setFloatValue(nextFloat); break; @@ -246,7 +246,7 @@ public class TestTypePromotion extends LuceneTestCase { case BYTES_FIXED_DEREF: case BYTES_FIXED_SORTED: case BYTES_FIXED_STRAIGHT: - values[i] = random.nextLong(); + values[i] = getRandom().nextLong(); byte bytes[] = new byte[8]; ByteArrayDataOutput out = new ByteArrayDataOutput(bytes, 0, 8); out.writeLong(values[i]); @@ -258,12 +258,12 @@ public class TestTypePromotion extends LuceneTestCase { byte lbytes[] = new byte[8]; ByteArrayDataOutput lout = new ByteArrayDataOutput(lbytes, 0, 8); final int len; - if (random.nextBoolean()) { - values[i] = random.nextInt(); + if (getRandom().nextBoolean()) { + values[i] = getRandom().nextInt(); lout.writeInt((int)values[i]); len = 4; } else { - values[i] = random.nextLong(); + values[i] = getRandom().nextLong(); lout.writeLong(values[i]); len = 8; } @@ -275,7 +275,7 @@ public class TestTypePromotion extends LuceneTestCase { } doc.add(valField); writer.addDocument(doc); - if (random.nextInt(10) == 0) { + if (getRandom().nextInt(10) == 0) { writer.commit(); } } @@ -300,26 +300,26 @@ public class TestTypePromotion extends LuceneTestCase { public void testMergeIncompatibleTypes() throws IOException { Directory dir = newDirectory(); - IndexWriterConfig writerConfig = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig writerConfig = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())); writerConfig.setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES); // no merges until we are done with adding values IndexWriter writer = new IndexWriter(dir, writerConfig); int num_1 = atLeast(200); int num_2 = atLeast(200); long[] values = new long[num_1 + num_2]; index(writer, - randomValueType(INTEGERS, random), values, 0, num_1); + randomValueType(INTEGERS, getRandom()), values, 0, num_1); writer.commit(); - if (random.nextInt(4) == 0) { + if (getRandom().nextInt(4) == 0) { // once in a while use addIndexes Directory dir_2 = newDirectory() ; IndexWriter writer_2 = new IndexWriter(dir_2, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); index(writer_2, - randomValueType(random.nextBoolean() ? UNSORTED_BYTES : SORTED_BYTES, random), values, num_1, num_2); + randomValueType(getRandom().nextBoolean() ? UNSORTED_BYTES : SORTED_BYTES, getRandom()), values, num_1, num_2); writer_2.commit(); writer_2.close(); - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { writer.addIndexes(dir_2); } else { // do a real merge here @@ -330,11 +330,11 @@ public class TestTypePromotion extends LuceneTestCase { dir_2.close(); } else { index(writer, - randomValueType(random.nextBoolean() ? UNSORTED_BYTES : SORTED_BYTES, random), values, num_1, num_2); + randomValueType(getRandom().nextBoolean() ? UNSORTED_BYTES : SORTED_BYTES, getRandom()), values, num_1, num_2); writer.commit(); } writer.close(); - writerConfig = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + writerConfig = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())); if (writerConfig.getMergePolicy() instanceof NoMergePolicy) { writerConfig.setMergePolicy(newLogMergePolicy()); // make sure we merge to one segment (merge everything together) } diff --git lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java index 850b507..a711a12 100644 --- lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java +++ lucene/core/src/test/org/apache/lucene/index/TestUniqueTermCount.java @@ -45,9 +45,9 @@ public class TestUniqueTermCount extends LuceneTestCase { super.setUp(); dir = newDirectory(); IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy()); + new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy()); config.setSimilarity(new TestSimilarity()); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, config); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, config); Document doc = new Document(); Field foo = newField("foo", "", TextField.TYPE_UNSTORED); doc.add(foo); @@ -79,10 +79,10 @@ public class TestUniqueTermCount extends LuceneTestCase { private String addValue() { StringBuilder sb = new StringBuilder(); HashSet terms = new HashSet(); - int num = _TestUtil.nextInt(random, 0, 255); + int num = _TestUtil.nextInt(getRandom(), 0, 255); for (int i = 0; i < num; i++) { sb.append(' '); - char term = (char) _TestUtil.nextInt(random, 'a', 'z'); + char term = (char) _TestUtil.nextInt(getRandom(), 'a', 'z'); sb.append(term); terms.add("" + term); } diff --git lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java index f12f4c6..9fcc5e1 100644 --- lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java +++ lucene/core/src/test/org/apache/lucene/search/BaseTestRangeFilter.java @@ -95,10 +95,10 @@ public class BaseTestRangeFilter extends LuceneTestCase { @BeforeClass public static void beforeClassBaseTestRangeFilter() throws Exception { maxId = atLeast(500); - signedIndexDir = new TestIndex(random, Integer.MAX_VALUE, Integer.MIN_VALUE, true); - unsignedIndexDir = new TestIndex(random, Integer.MAX_VALUE, 0, false); - signedIndexReader = build(random, signedIndexDir); - unsignedIndexReader = build(random, unsignedIndexDir); + signedIndexDir = new TestIndex(getStaticRandom(), Integer.MAX_VALUE, Integer.MIN_VALUE, true); + unsignedIndexDir = new TestIndex(getStaticRandom(), Integer.MAX_VALUE, 0, false); + signedIndexReader = build(getStaticRandom(), signedIndexDir); + unsignedIndexReader = build(getStaticRandom(), unsignedIndexDir); } @AfterClass diff --git lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java index da5b24f..12ade88 100644 --- lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java +++ lucene/core/src/test/org/apache/lucene/search/TestAutomatonQuery.java @@ -46,7 +46,7 @@ public class TestAutomatonQuery extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); Document doc = new Document(); Field titleField = newField("title", "some title", TextField.TYPE_UNSTORED); Field field = newField(FN, "this is document one 2345", TextField.TYPE_UNSTORED); diff --git lucene/core/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java lucene/core/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java index ad043b1..24e1e7c 100644 --- lucene/core/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java +++ lucene/core/src/test/org/apache/lucene/search/TestAutomatonQueryUnicode.java @@ -46,7 +46,7 @@ public class TestAutomatonQueryUnicode extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); Document doc = new Document(); Field titleField = newField("title", "some title", TextField.TYPE_UNSTORED); Field field = newField(FN, "", TextField.TYPE_UNSTORED); diff --git lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java index ff9c6bb..19af5b3 100644 --- lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java +++ lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java @@ -56,7 +56,7 @@ public class TestBoolean2 extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { directory = newDirectory(); - RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer= new RandomIndexWriter(getStaticRandom(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getStaticRandom())).setMergePolicy(newLogMergePolicy())); for (int i = 0; i < docFields.length; i++) { Document doc = new Document(); doc.add(newField(field, docFields[i], TextField.TYPE_UNSTORED)); @@ -67,23 +67,23 @@ public class TestBoolean2 extends LuceneTestCase { searcher = new IndexSearcher(littleReader); // Make big index - dir2 = new MockDirectoryWrapper(random, new RAMDirectory(directory, IOContext.DEFAULT)); + dir2 = new MockDirectoryWrapper(getStaticRandom(), new RAMDirectory(directory, IOContext.DEFAULT)); // First multiply small test index: mulFactor = 1; int docCount = 0; do { - final Directory copy = new MockDirectoryWrapper(random, new RAMDirectory(dir2, IOContext.DEFAULT)); - RandomIndexWriter w = new RandomIndexWriter(random, dir2); + final Directory copy = new MockDirectoryWrapper(getStaticRandom(), new RAMDirectory(dir2, IOContext.DEFAULT)); + RandomIndexWriter w = new RandomIndexWriter(getStaticRandom(), dir2); w.addIndexes(copy); docCount = w.maxDoc(); w.close(); mulFactor *= 2; } while(docCount < 3000); - RandomIndexWriter w = new RandomIndexWriter(random, dir2, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) - .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000))); + RandomIndexWriter w = new RandomIndexWriter(getStaticRandom(), dir2, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getStaticRandom())) + .setMaxBufferedDocs(_TestUtil.nextInt(getStaticRandom(), 50, 1000))); Document doc = new Document(); doc.add(newField("field2", "xxx", TextField.TYPE_UNSTORED)); for(int i=0;i numTerms) { - terms.remove(random.nextInt(terms.size())); + terms.remove(getRandom().nextInt(terms.size())); } if (VERBOSE) { @@ -256,13 +256,13 @@ public class TestBooleanQuery extends LuceneTestCase { final int nextUpto; final int nextDoc; final int left = hits.size() - upto; - if (left == 1 || random.nextBoolean()) { + if (left == 1 || getRandom().nextBoolean()) { // next nextUpto = 1+upto; nextDoc = scorer.nextDoc(); } else { // advance - int inc = _TestUtil.nextInt(random, 1, left-1); + int inc = _TestUtil.nextInt(getRandom(), 1, left-1); nextUpto = inc + upto; nextDoc = scorer.advance(hits.get(nextUpto).doc); } diff --git lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java index 16420e3..6ceca74 100644 --- lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java +++ lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java @@ -42,7 +42,7 @@ public class TestBooleanScorer extends LuceneTestCase String[] values = new String[] { "1", "2", "3", "4" }; - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); for (int i = 0; i < values.length; i++) { Document doc = new Document(); doc.add(newField(FIELD, values[i], StringField.TYPE_STORED)); @@ -73,7 +73,7 @@ public class TestBooleanScorer extends LuceneTestCase // changes, we have a test to back it up. Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); writer.commit(); IndexReader ir = writer.getReader(); writer.close(); @@ -127,7 +127,7 @@ public class TestBooleanScorer extends LuceneTestCase public void testMoreThan32ProhibitedClauses() throws Exception { final Directory d = newDirectory(); - final RandomIndexWriter w = new RandomIndexWriter(random, d); + final RandomIndexWriter w = new RandomIndexWriter(getRandom(), d); Document doc = new Document(); doc.add(new TextField("field", "0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33")); w.addDocument(doc); diff --git lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java index 873a3da..ce16978 100755 --- lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java +++ lucene/core/src/test/org/apache/lucene/search/TestCachingCollector.java @@ -153,7 +153,7 @@ public class TestCachingCollector extends LuceneTestCase { // caching would terminate even if a smaller length would suffice. // set RAM limit enough for 150 docs + random(10000) - int numDocs = random.nextInt(10000) + 150; + int numDocs = getRandom().nextInt(10000) + 150; for (boolean cacheScores : new boolean[] { false, true }) { int bytesPerDoc = cacheScores ? 8 : 4; CachingCollector cc = CachingCollector.create(new NoOpCollector(false), diff --git lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java index 8cdb545..2de5cef 100644 --- lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java +++ lucene/core/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java @@ -39,7 +39,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase { public void testCachingWorks() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); writer.close(); IndexReader reader = SlowCompositeReaderWrapper.wrap(IndexReader.open(dir)); @@ -65,7 +65,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase { public void testNullDocIdSet() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); writer.close(); IndexReader reader = SlowCompositeReaderWrapper.wrap(IndexReader.open(dir)); @@ -88,7 +88,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase { public void testNullDocIdSetIterator() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); writer.close(); IndexReader reader = SlowCompositeReaderWrapper.wrap(IndexReader.open(dir)); @@ -132,7 +132,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase { public void testIsCacheAble() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); writer.addDocument(new Document()); writer.close(); @@ -159,9 +159,9 @@ public class TestCachingWrapperFilter extends LuceneTestCase { public void testEnforceDeletions() throws Exception { Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter( - random, + getRandom(), dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setMergeScheduler(new SerialMergeScheduler()). // asserts below requires no unexpected merges: setMergePolicy(newLogMergePolicy(10)) diff --git lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java index bf7bbbc..f911096 100644 --- lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java +++ lucene/core/src/test/org/apache/lucene/search/TestConstantScoreQuery.java @@ -87,7 +87,7 @@ public class TestConstantScoreQuery extends LuceneTestCase { IndexSearcher searcher = null; try { directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter (random, directory); + RandomIndexWriter writer = new RandomIndexWriter (getRandom(), directory); Document doc = new Document(); doc.add(newField("field", "term", StringField.TYPE_UNSTORED)); @@ -132,7 +132,7 @@ public class TestConstantScoreQuery extends LuceneTestCase { public void testConstantScoreQueryAndFilter() throws Exception { Directory d = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, d); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), d); Document doc = new Document(); doc.add(newField("field", "a", StringField.TYPE_UNSTORED)); w.addDocument(doc); diff --git lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java index 1011700..24a679d 100644 --- lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java +++ lucene/core/src/test/org/apache/lucene/search/TestCustomSearcherSort.java @@ -50,8 +50,8 @@ public class TestCustomSearcherSort extends LuceneTestCase { super.setUp(); INDEX_SIZE = atLeast(2000); index = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, index); - RandomGen random = new RandomGen(this.random); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), index); + RandomGen random = new RandomGen(this.getRandom()); for (int i = 0; i < INDEX_SIZE; ++i) { // don't decrease; if to low the // problem doesn't show up Document doc = new Document(); diff --git lucene/core/src/test/org/apache/lucene/search/TestDateFilter.java lucene/core/src/test/org/apache/lucene/search/TestDateFilter.java index 40a4bb4..66d6e40 100644 --- lucene/core/src/test/org/apache/lucene/search/TestDateFilter.java +++ lucene/core/src/test/org/apache/lucene/search/TestDateFilter.java @@ -42,7 +42,7 @@ public class TestDateFilter extends LuceneTestCase { public void testBefore() throws IOException { // create an index Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), indexStore); long now = System.currentTimeMillis(); @@ -107,7 +107,7 @@ public class TestDateFilter extends LuceneTestCase { public void testAfter() throws IOException { // create an index Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), indexStore); long now = System.currentTimeMillis(); diff --git lucene/core/src/test/org/apache/lucene/search/TestDateSort.java lucene/core/src/test/org/apache/lucene/search/TestDateSort.java index 3031ec1..0a2a8f0 100644 --- lucene/core/src/test/org/apache/lucene/search/TestDateSort.java +++ lucene/core/src/test/org/apache/lucene/search/TestDateSort.java @@ -48,7 +48,7 @@ public class TestDateSort extends LuceneTestCase { super.setUp(); // Create an index writer. directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); // oldest doc: // Add the first document. text = "Document 1" dateTime = Oct 10 03:25:22 EDT 2007 diff --git lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java index b680bc6..e0f98cd 100644 --- lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java +++ lucene/core/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java @@ -91,8 +91,8 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { super.setUp(); index = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, index, - newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)) + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), index, + newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setSimilarity(sim).setMergePolicy(newLogMergePolicy())); // hed is the most important field, dek is secondary @@ -167,7 +167,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { dq.add(tq("id", "d1")); dq.add(tq("dek", "DOES_NOT_EXIST")); - QueryUtils.check(random, dq, s); + QueryUtils.check(getRandom(), dq, s); assertTrue(s.getTopReaderContext() instanceof AtomicReaderContext); final Weight dw = s.createNormalizedWeight(dq); AtomicReaderContext context = (AtomicReaderContext)s.getTopReaderContext(); @@ -184,7 +184,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { dq.add(tq("dek", "albino")); dq.add(tq("dek", "DOES_NOT_EXIST")); assertTrue(s.getTopReaderContext() instanceof AtomicReaderContext); - QueryUtils.check(random, dq, s); + QueryUtils.check(getRandom(), dq, s); final Weight dw = s.createNormalizedWeight(dq); AtomicReaderContext context = (AtomicReaderContext)s.getTopReaderContext(); final Scorer ds = dw.scorer(context, true, false, context.reader().getLiveDocs()); @@ -198,7 +198,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f); q.add(tq("hed", "albino")); q.add(tq("hed", "elephant")); - QueryUtils.check(random, q, s); + QueryUtils.check(getRandom(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; @@ -222,7 +222,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.0f); q.add(tq("dek", "albino")); q.add(tq("dek", "elephant")); - QueryUtils.check(random, q, s); + QueryUtils.check(getRandom(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; @@ -247,7 +247,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { q.add(tq("hed", "elephant")); q.add(tq("dek", "albino")); q.add(tq("dek", "elephant")); - QueryUtils.check(random, q, s); + QueryUtils.check(getRandom(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; @@ -270,7 +270,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { DisjunctionMaxQuery q = new DisjunctionMaxQuery(0.01f); q.add(tq("dek", "albino")); q.add(tq("dek", "elephant")); - QueryUtils.check(random, q, s); + QueryUtils.check(getRandom(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; @@ -298,7 +298,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { q1.add(tq("hed", "albino")); q1.add(tq("dek", "albino")); q.add(q1, BooleanClause.Occur.MUST);// true,false); - QueryUtils.check(random, q1, s); + QueryUtils.check(getRandom(), q1, s); } { @@ -306,10 +306,10 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { q2.add(tq("hed", "elephant")); q2.add(tq("dek", "elephant")); q.add(q2, BooleanClause.Occur.MUST);// true,false); - QueryUtils.check(random, q2, s); + QueryUtils.check(getRandom(), q2, s); } - QueryUtils.check(random, q, s); + QueryUtils.check(getRandom(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; @@ -341,7 +341,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { q2.add(tq("dek", "elephant")); q.add(q2, BooleanClause.Occur.SHOULD);// false,false); } - QueryUtils.check(random, q, s); + QueryUtils.check(getRandom(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; @@ -377,7 +377,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { q2.add(tq("dek", "elephant")); q.add(q2, BooleanClause.Occur.SHOULD);// false,false); } - QueryUtils.check(random, q, s); + QueryUtils.check(getRandom(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; @@ -431,7 +431,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase { q2.add(tq("dek", "elephant")); q.add(q2, BooleanClause.Occur.SHOULD);// false,false); } - QueryUtils.check(random, q, s); + QueryUtils.check(getRandom(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; diff --git lucene/core/src/test/org/apache/lucene/search/TestDocBoost.java lucene/core/src/test/org/apache/lucene/search/TestDocBoost.java index b154040..25436cb 100644 --- lucene/core/src/test/org/apache/lucene/search/TestDocBoost.java +++ lucene/core/src/test/org/apache/lucene/search/TestDocBoost.java @@ -36,7 +36,7 @@ public class TestDocBoost extends LuceneTestCase { public void testDocBoost() throws Exception { Directory store = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, store, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), store, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergePolicy(newLogMergePolicy())); Field f1 = newField("field", "word", TextField.TYPE_STORED); Field f2 = newField("field", "word", TextField.TYPE_STORED); diff --git lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java index f2a5553..3d2d923 100644 --- lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java +++ lucene/core/src/test/org/apache/lucene/search/TestDocIdSet.java @@ -101,7 +101,7 @@ public class TestDocIdSet extends LuceneTestCase { // Tests that if a Filter produces a null DocIdSet, which is given to // IndexSearcher, everything works fine. This came up in LUCENE-1754. Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); Document doc = new Document(); doc.add(newField("c", "val", StringField.TYPE_UNSTORED)); writer.addDocument(doc); diff --git lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java index c8fee43..7df7e92 100644 --- lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java +++ lucene/core/src/test/org/apache/lucene/search/TestDocValuesScoring.java @@ -52,7 +52,7 @@ public class TestDocValuesScoring extends LuceneTestCase { "Lucene3x".equals(Codec.getDefault().getName())); Directory dir = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, dir); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), dir); Document doc = new Document(); Field field = newField("foo", "", TextField.TYPE_UNSTORED); doc.add(field); @@ -98,8 +98,8 @@ public class TestDocValuesScoring extends LuceneTestCase { // in this case, we searched on field "foo". first document should have 2x the score. TermQuery tq = new TermQuery(new Term("foo", "quick")); - QueryUtils.check(random, tq, searcher1); - QueryUtils.check(random, tq, searcher2); + QueryUtils.check(getRandom(), tq, searcher1); + QueryUtils.check(getRandom(), tq, searcher2); TopDocs noboost = searcher1.search(tq, 10); TopDocs boost = searcher2.search(tq, 10); @@ -111,8 +111,8 @@ public class TestDocValuesScoring extends LuceneTestCase { // this query matches only the second document, which should have 4x the score. tq = new TermQuery(new Term("foo", "jumps")); - QueryUtils.check(random, tq, searcher1); - QueryUtils.check(random, tq, searcher2); + QueryUtils.check(getRandom(), tq, searcher1); + QueryUtils.check(getRandom(), tq, searcher2); noboost = searcher1.search(tq, 10); boost = searcher2.search(tq, 10); @@ -124,8 +124,8 @@ public class TestDocValuesScoring extends LuceneTestCase { // search on on field bar just for kicks, nothing should happen, since we setup // our sim provider to only use foo_boost for field foo. tq = new TermQuery(new Term("bar", "quick")); - QueryUtils.check(random, tq, searcher1); - QueryUtils.check(random, tq, searcher2); + QueryUtils.check(getRandom(), tq, searcher1); + QueryUtils.check(getRandom(), tq, searcher2); noboost = searcher1.search(tq, 10); boost = searcher2.search(tq, 10); diff --git lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java index 47e0446..b487304 100644 --- lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java +++ lucene/core/src/test/org/apache/lucene/search/TestElevationComparator.java @@ -39,7 +39,7 @@ public class TestElevationComparator extends LuceneTestCase { Directory directory = newDirectory(); IndexWriter writer = new IndexWriter( directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setMaxBufferedDocs(2). setMergePolicy(newLogMergePolicy(1000)). setSimilarity(new DefaultSimilarity()) diff --git lucene/core/src/test/org/apache/lucene/search/TestExplanations.java lucene/core/src/test/org/apache/lucene/search/TestExplanations.java index 568636c..a054bd0 100644 --- lucene/core/src/test/org/apache/lucene/search/TestExplanations.java +++ lucene/core/src/test/org/apache/lucene/search/TestExplanations.java @@ -71,7 +71,7 @@ public class TestExplanations extends LuceneTestCase { @BeforeClass public static void beforeClassTestExplanations() throws Exception { directory = newDirectory(); - RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer= new RandomIndexWriter(getStaticRandom(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getStaticRandom())).setMergePolicy(newLogMergePolicy())); for (int i = 0; i < docFields.length; i++) { Document doc = new Document(); doc.add(newField(KEY, ""+i, StringField.TYPE_UNSTORED)); @@ -95,7 +95,7 @@ public class TestExplanations extends LuceneTestCase { /** check the expDocNrs first, then check the query (and the explanations) */ public void qtest(Query q, int[] expDocNrs) throws Exception { - CheckHits.checkHitCollector(random, q, FIELD, searcher, expDocNrs); + CheckHits.checkHitCollector(getRandom(), q, FIELD, searcher, expDocNrs); } /** diff --git lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java index 3d31c26..f05b9cc 100644 --- lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java +++ lucene/core/src/test/org/apache/lucene/search/TestFieldCache.java @@ -53,7 +53,7 @@ public class TestFieldCache extends LuceneTestCase { NUM_DOCS = atLeast(500); NUM_ORDS = atLeast(2); directory = newDirectory(); - RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer= new RandomIndexWriter(getStaticRandom(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getStaticRandom())).setMergePolicy(newLogMergePolicy())); long theLong = Long.MAX_VALUE; double theDouble = Double.MAX_VALUE; byte theByte = Byte.MAX_VALUE; @@ -82,13 +82,13 @@ public class TestFieldCache extends LuceneTestCase { } // sometimes skip the field: - if (random.nextInt(40) != 17) { + if (getStaticRandom().nextInt(40) != 17) { unicodeStrings[i] = generateString(i); doc.add(newField("theRandomUnicodeString", unicodeStrings[i], StringField.TYPE_STORED)); } // sometimes skip the field: - if (random.nextInt(10) != 8) { + if (getStaticRandom().nextInt(10) != 8) { for (int j = 0; j < NUM_ORDS; j++) { String newValue = generateString(i); multiValued[i][j] = new BytesRef(newValue); @@ -128,54 +128,54 @@ public class TestFieldCache extends LuceneTestCase { public void test() throws IOException { FieldCache cache = FieldCache.DEFAULT; - double [] doubles = cache.getDoubles(reader, "theDouble", random.nextBoolean()); - assertSame("Second request to cache return same array", doubles, cache.getDoubles(reader, "theDouble", random.nextBoolean())); - assertSame("Second request with explicit parser return same array", doubles, cache.getDoubles(reader, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER, random.nextBoolean())); + double [] doubles = cache.getDoubles(reader, "theDouble", getRandom().nextBoolean()); + assertSame("Second request to cache return same array", doubles, cache.getDoubles(reader, "theDouble", getRandom().nextBoolean())); + assertSame("Second request with explicit parser return same array", doubles, cache.getDoubles(reader, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER, getRandom().nextBoolean())); assertTrue("doubles Size: " + doubles.length + " is not: " + NUM_DOCS, doubles.length == NUM_DOCS); for (int i = 0; i < doubles.length; i++) { assertTrue(doubles[i] + " does not equal: " + (Double.MAX_VALUE - i), doubles[i] == (Double.MAX_VALUE - i)); } - long [] longs = cache.getLongs(reader, "theLong", random.nextBoolean()); - assertSame("Second request to cache return same array", longs, cache.getLongs(reader, "theLong", random.nextBoolean())); - assertSame("Second request with explicit parser return same array", longs, cache.getLongs(reader, "theLong", FieldCache.DEFAULT_LONG_PARSER, random.nextBoolean())); + long [] longs = cache.getLongs(reader, "theLong", getRandom().nextBoolean()); + assertSame("Second request to cache return same array", longs, cache.getLongs(reader, "theLong", getRandom().nextBoolean())); + assertSame("Second request with explicit parser return same array", longs, cache.getLongs(reader, "theLong", FieldCache.DEFAULT_LONG_PARSER, getRandom().nextBoolean())); assertTrue("longs Size: " + longs.length + " is not: " + NUM_DOCS, longs.length == NUM_DOCS); for (int i = 0; i < longs.length; i++) { assertTrue(longs[i] + " does not equal: " + (Long.MAX_VALUE - i) + " i=" + i, longs[i] == (Long.MAX_VALUE - i)); } - byte [] bytes = cache.getBytes(reader, "theByte", random.nextBoolean()); - assertSame("Second request to cache return same array", bytes, cache.getBytes(reader, "theByte", random.nextBoolean())); - assertSame("Second request with explicit parser return same array", bytes, cache.getBytes(reader, "theByte", FieldCache.DEFAULT_BYTE_PARSER, random.nextBoolean())); + byte [] bytes = cache.getBytes(reader, "theByte", getRandom().nextBoolean()); + assertSame("Second request to cache return same array", bytes, cache.getBytes(reader, "theByte", getRandom().nextBoolean())); + assertSame("Second request with explicit parser return same array", bytes, cache.getBytes(reader, "theByte", FieldCache.DEFAULT_BYTE_PARSER, getRandom().nextBoolean())); assertTrue("bytes Size: " + bytes.length + " is not: " + NUM_DOCS, bytes.length == NUM_DOCS); for (int i = 0; i < bytes.length; i++) { assertTrue(bytes[i] + " does not equal: " + (Byte.MAX_VALUE - i), bytes[i] == (byte) (Byte.MAX_VALUE - i)); } - short [] shorts = cache.getShorts(reader, "theShort", random.nextBoolean()); - assertSame("Second request to cache return same array", shorts, cache.getShorts(reader, "theShort", random.nextBoolean())); - assertSame("Second request with explicit parser return same array", shorts, cache.getShorts(reader, "theShort", FieldCache.DEFAULT_SHORT_PARSER, random.nextBoolean())); + short [] shorts = cache.getShorts(reader, "theShort", getRandom().nextBoolean()); + assertSame("Second request to cache return same array", shorts, cache.getShorts(reader, "theShort", getRandom().nextBoolean())); + assertSame("Second request with explicit parser return same array", shorts, cache.getShorts(reader, "theShort", FieldCache.DEFAULT_SHORT_PARSER, getRandom().nextBoolean())); assertTrue("shorts Size: " + shorts.length + " is not: " + NUM_DOCS, shorts.length == NUM_DOCS); for (int i = 0; i < shorts.length; i++) { assertTrue(shorts[i] + " does not equal: " + (Short.MAX_VALUE - i), shorts[i] == (short) (Short.MAX_VALUE - i)); } - int [] ints = cache.getInts(reader, "theInt", random.nextBoolean()); - assertSame("Second request to cache return same array", ints, cache.getInts(reader, "theInt", random.nextBoolean())); - assertSame("Second request with explicit parser return same array", ints, cache.getInts(reader, "theInt", FieldCache.DEFAULT_INT_PARSER, random.nextBoolean())); + int [] ints = cache.getInts(reader, "theInt", getRandom().nextBoolean()); + assertSame("Second request to cache return same array", ints, cache.getInts(reader, "theInt", getRandom().nextBoolean())); + assertSame("Second request with explicit parser return same array", ints, cache.getInts(reader, "theInt", FieldCache.DEFAULT_INT_PARSER, getRandom().nextBoolean())); assertTrue("ints Size: " + ints.length + " is not: " + NUM_DOCS, ints.length == NUM_DOCS); for (int i = 0; i < ints.length; i++) { assertTrue(ints[i] + " does not equal: " + (Integer.MAX_VALUE - i), ints[i] == (Integer.MAX_VALUE - i)); } - float [] floats = cache.getFloats(reader, "theFloat", random.nextBoolean()); - assertSame("Second request to cache return same array", floats, cache.getFloats(reader, "theFloat", random.nextBoolean())); - assertSame("Second request with explicit parser return same array", floats, cache.getFloats(reader, "theFloat", FieldCache.DEFAULT_FLOAT_PARSER, random.nextBoolean())); + float [] floats = cache.getFloats(reader, "theFloat", getRandom().nextBoolean()); + assertSame("Second request to cache return same array", floats, cache.getFloats(reader, "theFloat", getRandom().nextBoolean())); + assertSame("Second request with explicit parser return same array", floats, cache.getFloats(reader, "theFloat", FieldCache.DEFAULT_FLOAT_PARSER, getRandom().nextBoolean())); assertTrue("floats Size: " + floats.length + " is not: " + NUM_DOCS, floats.length == NUM_DOCS); for (int i = 0; i < floats.length; i++) { assertTrue(floats[i] + " does not equal: " + (Float.MAX_VALUE - i), floats[i] == (Float.MAX_VALUE - i)); @@ -224,7 +224,7 @@ public class TestFieldCache extends LuceneTestCase { // seek the enum around (note this isn't a great test here) int num = atLeast(100); for (int i = 0; i < num; i++) { - int k = _TestUtil.nextInt(random, 1, nTerms-1); + int k = _TestUtil.nextInt(getRandom(), 1, nTerms-1); BytesRef val1 = termsIndex.lookup(k, val); assertEquals(TermsEnum.SeekStatus.FOUND, tenum.seekCeil(val1)); assertEquals(val1, tenum.term()); @@ -292,7 +292,7 @@ public class TestFieldCache extends LuceneTestCase { public void testEmptyIndex() throws Exception { Directory dir = newDirectory(); - IndexWriter writer= new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(500)); + IndexWriter writer= new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMaxBufferedDocs(500)); writer.close(); IndexReader r = DirectoryReader.open(dir); AtomicReader reader = SlowCompositeReaderWrapper.wrap(r); @@ -305,16 +305,16 @@ public class TestFieldCache extends LuceneTestCase { private static String generateString(int i) { String s = null; - if (i > 0 && random.nextInt(3) == 1) { + if (i > 0 && getStaticRandom().nextInt(3) == 1) { // reuse past string -- try to find one that's not null for(int iter = 0; iter < 10 && s == null;iter++) { - s = unicodeStrings[random.nextInt(i)]; + s = unicodeStrings[getStaticRandom().nextInt(i)]; } if (s == null) { - s = _TestUtil.randomUnicodeString(random); + s = _TestUtil.randomUnicodeString(getStaticRandom()); } } else { - s = _TestUtil.randomUnicodeString(random); + s = _TestUtil.randomUnicodeString(getStaticRandom()); } return s; } @@ -348,7 +348,7 @@ public class TestFieldCache extends LuceneTestCase { } } - int[] numInts = cache.getInts(reader, "numInt", random.nextBoolean()); + int[] numInts = cache.getInts(reader, "numInt", getRandom().nextBoolean()); docsWithField = cache.getDocsWithField(reader, "numInt"); for (int i = 0; i < docsWithField.length(); i++) { if (i%2 == 0) { @@ -384,7 +384,7 @@ public class TestFieldCache extends LuceneTestCase { try { while(!failed.get()) { - final int op = random.nextInt(3); + final int op = getRandom().nextInt(3); if (op == 0) { // Purge all caches & resume, once all // threads get here: diff --git lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java index 1770d2d..1dda807 100644 --- lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java +++ lucene/core/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java @@ -524,7 +524,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter { @Test public void testSparseIndex() throws IOException { Directory dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); for (int d = -20; d <= 20; d++) { Document doc = new Document(); diff --git lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java index 672b16a..99741e5 100644 --- lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java +++ lucene/core/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java @@ -37,7 +37,7 @@ public class TestFieldCacheTermsFilter extends LuceneTestCase { public void testMissingTerms() throws Exception { String fieldName = "field1"; Directory rd = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, rd); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), rd); for (int i = 0; i < 100; i++) { Document doc = new Document(); int term = i * 10; //terms are units of 10; diff --git lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java index 6dc8425..c49ed11 100644 --- lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java +++ lucene/core/src/test/org/apache/lucene/search/TestFieldValueFilter.java @@ -35,8 +35,8 @@ public class TestFieldValueFilter extends LuceneTestCase { public void testFieldValueFilterNoValue() throws IOException { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); int docs = atLeast(10); int[] docStates = buildIndex(writer, docs); int numDocsNoValue = 0; @@ -63,8 +63,8 @@ public class TestFieldValueFilter extends LuceneTestCase { public void testFieldValueFilter() throws IOException { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); int docs = atLeast(10); int[] docStates = buildIndex(writer, docs); int numDocsWithValue = 0; @@ -93,7 +93,7 @@ public class TestFieldValueFilter extends LuceneTestCase { int[] docStates = new int[docs]; for (int i = 0; i < docs; i++) { Document doc = new Document(); - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { docStates[i] = 1; doc.add(newField("some", "value", TextField.TYPE_STORED)); } @@ -102,9 +102,9 @@ public class TestFieldValueFilter extends LuceneTestCase { writer.addDocument(doc); } writer.commit(); - int numDeletes = random.nextInt(docs); + int numDeletes = getRandom().nextInt(docs); for (int i = 0; i < numDeletes; i++) { - int docID = random.nextInt(docs); + int docID = getRandom().nextInt(docs); writer.deleteDocuments(new Term("id", "" + docID)); docStates[docID] = 2; } diff --git lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java index 2d6be6d..d1991a9 100644 --- lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java +++ lucene/core/src/test/org/apache/lucene/search/TestFilteredQuery.java @@ -52,7 +52,7 @@ public class TestFilteredQuery extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter (random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter (getRandom(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergePolicy(newLogMergePolicy())); Document doc = new Document(); doc.add (newField("field", "one two three four five", TextField.TYPE_STORED)); @@ -121,7 +121,7 @@ public class TestFilteredQuery extends LuceneTestCase { ScoreDoc[] hits = searcher.search (filteredquery, null, 1000).scoreDocs; assertEquals (1, hits.length); assertEquals (1, hits[0].doc); - QueryUtils.check(random, filteredquery,searcher); + QueryUtils.check(getRandom(), filteredquery,searcher); hits = searcher.search (filteredquery, null, 1000, new Sort(new SortField("sorter", SortField.Type.STRING))).scoreDocs; assertEquals (1, hits.length); @@ -130,23 +130,23 @@ public class TestFilteredQuery extends LuceneTestCase { filteredquery = new FilteredQueryRA(new TermQuery (new Term ("field", "one")), filter, useRandomAccess); hits = searcher.search (filteredquery, null, 1000).scoreDocs; assertEquals (2, hits.length); - QueryUtils.check(random, filteredquery,searcher); + QueryUtils.check(getRandom(), filteredquery,searcher); filteredquery = new FilteredQueryRA(new MatchAllDocsQuery(), filter, useRandomAccess); hits = searcher.search (filteredquery, null, 1000).scoreDocs; assertEquals (2, hits.length); - QueryUtils.check(random, filteredquery,searcher); + QueryUtils.check(getRandom(), filteredquery,searcher); filteredquery = new FilteredQueryRA(new TermQuery (new Term ("field", "x")), filter, useRandomAccess); hits = searcher.search (filteredquery, null, 1000).scoreDocs; assertEquals (1, hits.length); assertEquals (3, hits[0].doc); - QueryUtils.check(random, filteredquery,searcher); + QueryUtils.check(getRandom(), filteredquery,searcher); filteredquery = new FilteredQueryRA(new TermQuery (new Term ("field", "y")), filter, useRandomAccess); hits = searcher.search (filteredquery, null, 1000).scoreDocs; assertEquals (0, hits.length); - QueryUtils.check(random, filteredquery,searcher); + QueryUtils.check(getRandom(), filteredquery,searcher); // test boost Filter f = newStaticFilterA(); @@ -213,7 +213,7 @@ public class TestFilteredQuery extends LuceneTestCase { Query filteredquery = new FilteredQueryRA(rq, filter, useRandomAccess); ScoreDoc[] hits = searcher.search(filteredquery, null, 1000).scoreDocs; assertEquals(2, hits.length); - QueryUtils.check(random, filteredquery,searcher); + QueryUtils.check(getRandom(), filteredquery,searcher); } public void testBooleanMUST() throws Exception { @@ -231,7 +231,7 @@ public class TestFilteredQuery extends LuceneTestCase { bq.add(query, BooleanClause.Occur.MUST); ScoreDoc[] hits = searcher.search(bq, null, 1000).scoreDocs; assertEquals(0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); } public void testBooleanSHOULD() throws Exception { @@ -249,7 +249,7 @@ public class TestFilteredQuery extends LuceneTestCase { bq.add(query, BooleanClause.Occur.SHOULD); ScoreDoc[] hits = searcher.search(bq, null, 1000).scoreDocs; assertEquals(2, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); } // Make sure BooleanQuery, which does out-of-order @@ -268,7 +268,7 @@ public class TestFilteredQuery extends LuceneTestCase { bq.add(new TermQuery(new Term("field", "two")), BooleanClause.Occur.SHOULD); ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs; assertEquals(1, hits.length); - QueryUtils.check(random, query, searcher); + QueryUtils.check(getRandom(), query, searcher); } public void testChainedFilters() throws Exception { @@ -284,14 +284,14 @@ public class TestFilteredQuery extends LuceneTestCase { new CachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("field", "four")))), useRandomAccess); ScoreDoc[] hits = searcher.search(query, 10).scoreDocs; assertEquals(2, hits.length); - QueryUtils.check(random, query, searcher); + QueryUtils.check(getRandom(), query, searcher); // one more: query = new TestFilteredQuery.FilteredQueryRA(query, new CachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("field", "five")))), useRandomAccess); hits = searcher.search(query, 10).scoreDocs; assertEquals(1, hits.length); - QueryUtils.check(random, query, searcher); + QueryUtils.check(getRandom(), query, searcher); } public void testEqualsHashcode() throws Exception { @@ -337,11 +337,11 @@ public class TestFilteredQuery extends LuceneTestCase { private void assertRewrite(FilteredQuery fq, Class clazz) throws Exception { // assign crazy boost to FQ - final float boost = random.nextFloat() * 100.f; + final float boost = getRandom().nextFloat() * 100.f; fq.setBoost(boost); // assign crazy boost to inner - final float innerBoost = random.nextFloat() * 100.f; + final float innerBoost = getRandom().nextFloat() * 100.f; fq.getQuery().setBoost(innerBoost); // check the class and boosts of rewritten query diff --git lucene/core/src/test/org/apache/lucene/search/TestFilteredSearch.java lucene/core/src/test/org/apache/lucene/search/TestFilteredSearch.java index 7679453..32180f2 100644 --- lucene/core/src/test/org/apache/lucene/search/TestFilteredSearch.java +++ lucene/core/src/test/org/apache/lucene/search/TestFilteredSearch.java @@ -48,14 +48,14 @@ public class TestFilteredSearch extends LuceneTestCase { Directory directory = newDirectory(); int[] filterBits = {1, 36}; SimpleDocIdSetFilter filter = new SimpleDocIdSetFilter(filterBits); - IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergePolicy(newLogMergePolicy())); searchFiltered(writer, directory, filter, enforceSingleSegment); // run the test on more than one segment enforceSingleSegment = false; // reset - it is stateful filter.reset(); writer.close(); - writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10).setMergePolicy(newLogMergePolicy())); + writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(10).setMergePolicy(newLogMergePolicy())); // we index 60 docs - this will create 6 segments searchFiltered(writer, directory, filter, enforceSingleSegment); writer.close(); diff --git lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java index 62dbf88..5df9b7b 100644 --- lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java +++ lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery.java @@ -39,7 +39,7 @@ public class TestFuzzyQuery extends LuceneTestCase { public void testFuzziness() throws Exception { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); addDoc("aaaaa", writer); addDoc("aaaab", writer); addDoc("aaabb", writer); @@ -191,7 +191,7 @@ public class TestFuzzyQuery extends LuceneTestCase { public void testFuzzinessLong() throws Exception { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); addDoc("aaaaaaa", writer); addDoc("segment", writer); @@ -287,14 +287,14 @@ public class TestFuzzyQuery extends LuceneTestCase { */ public void testTieBreaker() throws Exception { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); addDoc("a123456", writer); addDoc("c123456", writer); addDoc("d123456", writer); addDoc("e123456", writer); Directory directory2 = newDirectory(); - RandomIndexWriter writer2 = new RandomIndexWriter(random, directory2); + RandomIndexWriter writer2 = new RandomIndexWriter(getRandom(), directory2); addDoc("a123456", writer2); addDoc("b123456", writer2); addDoc("b123456", writer2); @@ -321,7 +321,7 @@ public class TestFuzzyQuery extends LuceneTestCase { public void testTokenLengthOpt() throws IOException { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); addDoc("12345678911", writer); addDoc("segment", writer); @@ -357,7 +357,7 @@ public class TestFuzzyQuery extends LuceneTestCase { /** Test the TopTermsBoostOnlyBooleanQueryRewrite rewrite method. */ public void testBoostOnlyRewrite() throws Exception { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); addDoc("Lucene", writer); addDoc("Lucene", writer); addDoc("Lucenne", writer); @@ -380,9 +380,9 @@ public class TestFuzzyQuery extends LuceneTestCase { public void testGiga() throws Exception { - MockAnalyzer analyzer = new MockAnalyzer(random); + MockAnalyzer analyzer = new MockAnalyzer(getRandom()); Directory index = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, index); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), index); addDoc("Lucene in Action", w); addDoc("Lucene for Dummies", w); @@ -417,7 +417,7 @@ public class TestFuzzyQuery extends LuceneTestCase { public void testDistanceAsEditsSearching() throws Exception { Directory index = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, index); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), index); addDoc("foobar", w); addDoc("test", w); addDoc("working", w); diff --git lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery2.java lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery2.java index dc55dc8..d698094 100644 --- lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery2.java +++ lucene/core/src/test/org/apache/lucene/search/TestFuzzyQuery2.java @@ -73,7 +73,7 @@ public class TestFuzzyQuery2 extends LuceneTestCase { }; public void testFromTestData() throws Exception { // TODO: randomize! - assertFromTestData(mappings[random.nextInt(mappings.length)]); + assertFromTestData(mappings[getRandom().nextInt(mappings.length)]); } public void assertFromTestData(int codePointTable[]) throws Exception { @@ -87,7 +87,7 @@ public class TestFuzzyQuery2 extends LuceneTestCase { int terms = (int) Math.pow(2, bits); Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy())); Document doc = new Document(); Field field = newField("field", "", TextField.TYPE_UNSTORED); diff --git lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java index 11050ad..b5367ec 100644 --- lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java +++ lucene/core/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java @@ -35,8 +35,14 @@ import org.apache.lucene.util.LuceneTestCase; * */ public class TestMatchAllDocsQuery extends LuceneTestCase { - private Analyzer analyzer = new MockAnalyzer(random); + private Analyzer analyzer; + @Override + public void setUp() throws Exception { + super.setUp(); + analyzer = new MockAnalyzer(getRandom()); + } + public void testQuery() throws Exception { Directory dir = newDirectory(); IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig( diff --git lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java index 652fd14..8c3e5ea 100644 --- lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java +++ lucene/core/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java @@ -48,7 +48,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { public void testPhrasePrefix() throws IOException { Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), indexStore); add("blueberry pie", writer); add("blueberry strudel", writer); add("blueberry pizza", writer); @@ -140,7 +140,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { // LUCENE-2580 public void testTall() throws IOException { Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), indexStore); add("blueberry chocolate pie", writer); add("blueberry chocolate tart", writer); IndexReader r = writer.getReader(); @@ -169,7 +169,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { // and all terms required. // The contained PhraseMultiQuery must contain exactly one term array. Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), indexStore); add("blueberry pie", writer); add("blueberry chewing gum", writer); add("blue raspberry pie", writer); @@ -200,7 +200,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { public void testPhrasePrefixWithBooleanQuery() throws IOException { Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), indexStore); add("This is a test", "object", writer); add("a note", "note", writer); @@ -227,7 +227,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { public void testNoDocs() throws Exception { Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), indexStore); add("a note", "note", writer); IndexReader reader = writer.getReader(); @@ -290,7 +290,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { public void testCustomIDF() throws Exception { Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), indexStore); add("This is a test", "object", writer); add("a note", "note", writer); @@ -327,7 +327,7 @@ public class TestMultiPhraseQuery extends LuceneTestCase { tokens[2].append("c"); tokens[2].setPositionIncrement(0); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); Document doc = new Document(); doc.add(new TextField("field", new CannedTokenStream(tokens))); writer.addDocument(doc); diff --git lucene/core/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java lucene/core/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java index 7dd81d5..bf15e09 100644 --- lucene/core/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java +++ lucene/core/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java @@ -56,9 +56,9 @@ public class TestMultiTermConstantScore extends BaseTestRangeFilter { "X 4 5 6" }; small = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, small, + RandomIndexWriter writer = new RandomIndexWriter(getStaticRandom(), small, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMergePolicy(newLogMergePolicy())); + new MockAnalyzer(getStaticRandom(), MockTokenizer.WHITESPACE, false)).setMergePolicy(newLogMergePolicy())); FieldType customType = new FieldType(TextField.TYPE_STORED); customType.setTokenized(false); diff --git lucene/core/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java lucene/core/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java index 28e7ec8..3892394 100644 --- lucene/core/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java +++ lucene/core/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java @@ -46,9 +46,9 @@ public class TestMultiTermQueryRewrites extends LuceneTestCase { dir = newDirectory(); sdir1 = newDirectory(); sdir2 = newDirectory(); - final RandomIndexWriter writer = new RandomIndexWriter(random, dir, new MockAnalyzer(random)); - final RandomIndexWriter swriter1 = new RandomIndexWriter(random, sdir1, new MockAnalyzer(random)); - final RandomIndexWriter swriter2 = new RandomIndexWriter(random, sdir2, new MockAnalyzer(random)); + final RandomIndexWriter writer = new RandomIndexWriter(getStaticRandom(), dir, new MockAnalyzer(getStaticRandom())); + final RandomIndexWriter swriter1 = new RandomIndexWriter(getStaticRandom(), sdir1, new MockAnalyzer(getStaticRandom())); + final RandomIndexWriter swriter2 = new RandomIndexWriter(getStaticRandom(), sdir2, new MockAnalyzer(getStaticRandom())); for (int i = 0; i < 10; i++) { Document doc = new Document(); diff --git lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java index 61f308c..f43efd5 100644 --- lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java +++ lucene/core/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java @@ -40,7 +40,7 @@ public class TestMultiThreadTermVectors extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergePolicy(newLogMergePolicy())); //writer.setUseCompoundFile(false); //writer.infoStream = System.out; FieldType customType = new FieldType(TextField.TYPE_STORED); diff --git lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java index ae886c1..b740508 100644 --- lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java +++ lucene/core/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java @@ -40,17 +40,17 @@ public class TestMultiValuedNumericRangeQuery extends LuceneTestCase { */ public void testMultiValuedNRQ() throws Exception { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) - .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000))); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) + .setMaxBufferedDocs(_TestUtil.nextInt(getRandom(), 50, 1000))); DecimalFormat format = new DecimalFormat("00000000000", new DecimalFormatSymbols(Locale.US)); int num = atLeast(500); for (int l = 0; l < num; l++) { Document doc = new Document(); - for (int m=0, c=random.nextInt(10); m<=c; m++) { - int value = random.nextInt(Integer.MAX_VALUE); + for (int m=0, c=getRandom().nextInt(10); m<=c; m++) { + int value = getRandom().nextInt(Integer.MAX_VALUE); doc.add(newField("asc", format.format(value), StringField.TYPE_UNSTORED)); doc.add(new IntField("trie", value)); } @@ -62,8 +62,8 @@ public class TestMultiValuedNumericRangeQuery extends LuceneTestCase { IndexSearcher searcher=newSearcher(reader); num = atLeast(50); for (int i = 0; i < num; i++) { - int lower=random.nextInt(Integer.MAX_VALUE); - int upper=random.nextInt(Integer.MAX_VALUE); + int lower=getRandom().nextInt(Integer.MAX_VALUE); + int upper=getRandom().nextInt(Integer.MAX_VALUE); if (lower>upper) { int a=lower; lower=upper; upper=a; } diff --git lucene/core/src/test/org/apache/lucene/search/TestNGramPhraseQuery.java lucene/core/src/test/org/apache/lucene/search/TestNGramPhraseQuery.java index 7a28963..9a9056f 100644 --- lucene/core/src/test/org/apache/lucene/search/TestNGramPhraseQuery.java +++ lucene/core/src/test/org/apache/lucene/search/TestNGramPhraseQuery.java @@ -33,7 +33,7 @@ public class TestNGramPhraseQuery extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getStaticRandom(), directory); writer.close(); reader = IndexReader.open(directory); } diff --git lucene/core/src/test/org/apache/lucene/search/TestNRTManager.java lucene/core/src/test/org/apache/lucene/search/TestNRTManager.java index ed509df..902616c 100644 --- lucene/core/src/test/org/apache/lucene/search/TestNRTManager.java +++ lucene/core/src/test/org/apache/lucene/search/TestNRTManager.java @@ -65,7 +65,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { @Override protected Directory getDirectory(Directory in) { // Randomly swap in NRTCachingDir - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { if (VERBOSE) { System.out.println("TEST: wrap NRTCachingDir"); } @@ -81,7 +81,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { final long gen = genWriter.updateDocuments(id, docs); // Randomly verify the update "took": - if (random.nextInt(20) == 2) { + if (getRandom().nextInt(20) == 2) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": nrt: verify " + id); } @@ -104,7 +104,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { protected void addDocuments(Term id, List> docs) throws Exception { final long gen = genWriter.addDocuments(docs); // Randomly verify the add "took": - if (random.nextInt(20) == 2) { + if (getRandom().nextInt(20) == 2) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": nrt: verify " + id); } @@ -127,7 +127,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { final long gen = genWriter.addDocument(doc); // Randomly verify the add "took": - if (random.nextInt(20) == 2) { + if (getRandom().nextInt(20) == 2) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": nrt: verify " + id); } @@ -149,7 +149,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { protected void updateDocument(Term id, Iterable doc) throws Exception { final long gen = genWriter.updateDocument(id, doc); // Randomly verify the udpate "took": - if (random.nextInt(20) == 2) { + if (getRandom().nextInt(20) == 2) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": nrt: verify " + id); } @@ -171,7 +171,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { protected void deleteDocuments(Term id) throws Exception { final long gen = genWriter.deleteDocuments(id); // randomly verify the delete "took": - if (random.nextInt(20) == 7) { + if (getRandom().nextInt(20) == 7) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": nrt: verify del " + id); } @@ -202,8 +202,8 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { @Override protected void doAfterWriter(final ExecutorService es) throws Exception { - final double minReopenSec = 0.01 + 0.05 * random.nextDouble(); - final double maxReopenSec = minReopenSec * (1.0 + 10 * random.nextDouble()); + final double minReopenSec = 0.01 + 0.05 * getRandom().nextDouble(); + final double maxReopenSec = minReopenSec * (1.0 + 10 * getRandom().nextDouble()); if (VERBOSE) { System.out.println("TEST: make NRTManager maxReopenSec=" + maxReopenSec + " minReopenSec=" + minReopenSec); @@ -261,7 +261,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { // Test doesn't assert deletions until the end, so we // can randomize whether dels must be applied final NRTManager nrt; - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { nrt = nrtDeletes; } else { nrt = nrtNoDeletes; @@ -295,7 +295,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { * LUCENE-3528 - NRTManager hangs in certain situations */ public void testThreadStarvationNoDeleteNRTReader() throws IOException, InterruptedException { - IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); + IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())); Directory d = newDirectory(); final CountDownLatch latch = new CountDownLatch(1); final CountDownLatch signal = new CountDownLatch(1); @@ -391,7 +391,7 @@ public class TestNRTManager extends ThreadedIndexingAndSearchingTestCase { public void testEvilSearcherFactory() throws Exception { final Directory dir = newDirectory(); - final RandomIndexWriter w = new RandomIndexWriter(random, dir); + final RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir); w.commit(); final IndexReader other = DirectoryReader.open(dir); diff --git lucene/core/src/test/org/apache/lucene/search/TestNot.java lucene/core/src/test/org/apache/lucene/search/TestNot.java index c424aad..cfdc5a0 100644 --- lucene/core/src/test/org/apache/lucene/search/TestNot.java +++ lucene/core/src/test/org/apache/lucene/search/TestNot.java @@ -34,7 +34,7 @@ public class TestNot extends LuceneTestCase { public void testNot() throws Exception { Directory store = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, store); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), store); Document d1 = new Document(); d1.add(newField("field", "a b", TextField.TYPE_STORED)); diff --git lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java index aa61e83..d47b55c 100644 --- lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java +++ lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java @@ -56,9 +56,9 @@ public class TestNumericRangeQuery32 extends LuceneTestCase { noDocs = atLeast(4096); distance = (1 << 30) / noDocs; directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) - .setMaxBufferedDocs(_TestUtil.nextInt(random, 100, 1000)) + RandomIndexWriter writer = new RandomIndexWriter(getStaticRandom(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getStaticRandom())) + .setMaxBufferedDocs(_TestUtil.nextInt(getStaticRandom(), 100, 1000)) .setMergePolicy(newLogMergePolicy())); final FieldType storedInt = new FieldType(IntField.TYPE); @@ -299,8 +299,8 @@ public class TestNumericRangeQuery32 extends LuceneTestCase { @Test public void testInfiniteValues() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, + newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); Document doc = new Document(); doc.add(new FloatField("float", Float.NEGATIVE_INFINITY)); doc.add(new IntField("int", Integer.MIN_VALUE)); @@ -370,10 +370,10 @@ public class TestNumericRangeQuery32 extends LuceneTestCase { private void testRandomTrieAndClassicRangeQuery(int precisionStep) throws Exception { String field="field"+precisionStep; int totalTermCountT=0,totalTermCountC=0,termCountT,termCountC; - int num = _TestUtil.nextInt(random, 10, 20); + int num = _TestUtil.nextInt(getRandom(), 10, 20); for (int i = 0; i < num; i++) { - int lower=(int)(random.nextDouble()*noDocs*distance)+startOffset; - int upper=(int)(random.nextDouble()*noDocs*distance)+startOffset; + int lower=(int)(getRandom().nextDouble()*noDocs*distance)+startOffset; + int upper=(int)(getRandom().nextDouble()*noDocs*distance)+startOffset; if (lower>upper) { int a=lower; lower=upper; upper=a; } @@ -493,10 +493,10 @@ public class TestNumericRangeQuery32 extends LuceneTestCase { private void testRangeSplit(int precisionStep) throws Exception { String field="ascfield"+precisionStep; // 10 random tests - int num = _TestUtil.nextInt(random, 10, 20); + int num = _TestUtil.nextInt(getRandom(), 10, 20); for (int i =0; i< num; i++) { - int lower=(int)(random.nextDouble()*noDocs - noDocs/2); - int upper=(int)(random.nextDouble()*noDocs - noDocs/2); + int lower=(int)(getRandom().nextDouble()*noDocs - noDocs/2); + int upper=(int)(getRandom().nextDouble()*noDocs - noDocs/2); if (lower>upper) { int a=lower; lower=upper; upper=a; } @@ -569,10 +569,10 @@ public class TestNumericRangeQuery32 extends LuceneTestCase { String field="field"+precisionStep; // 10 random tests, the index order is ascending, // so using a reverse sort field should retun descending documents - int num = _TestUtil.nextInt(random, 10, 20); + int num = _TestUtil.nextInt(getRandom(), 10, 20); for (int i = 0; i < num; i++) { - int lower=(int)(random.nextDouble()*noDocs*distance)+startOffset; - int upper=(int)(random.nextDouble()*noDocs*distance)+startOffset; + int lower=(int)(getRandom().nextDouble()*noDocs*distance)+startOffset; + int upper=(int)(getRandom().nextDouble()*noDocs*distance)+startOffset; if (lower>upper) { int a=lower; lower=upper; upper=a; } diff --git lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java index d4019a6..a69b53b 100644 --- lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java +++ lucene/core/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java @@ -56,9 +56,9 @@ public class TestNumericRangeQuery64 extends LuceneTestCase { noDocs = atLeast(4096); distance = (1L << 60) / noDocs; directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) - .setMaxBufferedDocs(_TestUtil.nextInt(random, 100, 1000)) + RandomIndexWriter writer = new RandomIndexWriter(getStaticRandom(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getStaticRandom())) + .setMaxBufferedDocs(_TestUtil.nextInt(getStaticRandom(), 100, 1000)) .setMergePolicy(newLogMergePolicy())); final FieldType storedLong = new FieldType(LongField.TYPE); @@ -324,8 +324,8 @@ public class TestNumericRangeQuery64 extends LuceneTestCase { @Test public void testInfiniteValues() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); Document doc = new Document(); doc.add(new DoubleField("double", Double.NEGATIVE_INFINITY)); doc.add(new LongField("long", Long.MIN_VALUE)); @@ -395,10 +395,10 @@ public class TestNumericRangeQuery64 extends LuceneTestCase { private void testRandomTrieAndClassicRangeQuery(int precisionStep) throws Exception { String field="field"+precisionStep; int totalTermCountT=0,totalTermCountC=0,termCountT,termCountC; - int num = _TestUtil.nextInt(random, 10, 20); + int num = _TestUtil.nextInt(getRandom(), 10, 20); for (int i = 0; i < num; i++) { - long lower=(long)(random.nextDouble()*noDocs*distance)+startOffset; - long upper=(long)(random.nextDouble()*noDocs*distance)+startOffset; + long lower=(long)(getRandom().nextDouble()*noDocs*distance)+startOffset; + long upper=(long)(getRandom().nextDouble()*noDocs*distance)+startOffset; if (lower>upper) { long a=lower; lower=upper; upper=a; } @@ -523,10 +523,10 @@ public class TestNumericRangeQuery64 extends LuceneTestCase { private void testRangeSplit(int precisionStep) throws Exception { String field="ascfield"+precisionStep; // 10 random tests - int num = _TestUtil.nextInt(random, 10, 20); + int num = _TestUtil.nextInt(getRandom(), 10, 20); for (int i = 0; i < num; i++) { - long lower=(long)(random.nextDouble()*noDocs - noDocs/2); - long upper=(long)(random.nextDouble()*noDocs - noDocs/2); + long lower=(long)(getRandom().nextDouble()*noDocs - noDocs/2); + long upper=(long)(getRandom().nextDouble()*noDocs - noDocs/2); if (lower>upper) { long a=lower; lower=upper; upper=a; } @@ -609,10 +609,10 @@ public class TestNumericRangeQuery64 extends LuceneTestCase { String field="field"+precisionStep; // 10 random tests, the index order is ascending, // so using a reverse sort field should retun descending documents - int num = _TestUtil.nextInt(random, 10, 20); + int num = _TestUtil.nextInt(getRandom(), 10, 20); for (int i = 0; i < num; i++) { - long lower=(long)(random.nextDouble()*noDocs*distance)+startOffset; - long upper=(long)(random.nextDouble()*noDocs*distance)+startOffset; + long lower=(long)(getRandom().nextDouble()*noDocs*distance)+startOffset; + long upper=(long)(getRandom().nextDouble()*noDocs*distance)+startOffset; if (lower>upper) { long a=lower; lower=upper; upper=a; } diff --git lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java index d88c96b..e8b8c33 100644 --- lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java +++ lucene/core/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java @@ -41,7 +41,7 @@ public class TestPhrasePrefixQuery extends LuceneTestCase { */ public void testPhrasePrefix() throws IOException { Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), indexStore); Document doc1 = new Document(); Document doc2 = new Document(); Document doc3 = new Document(); diff --git lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java index 75d1e79..526b05b 100644 --- lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java +++ lucene/core/src/test/org/apache/lucene/search/TestPhraseQuery.java @@ -66,7 +66,7 @@ public class TestPhraseQuery extends LuceneTestCase { return 100; } }; - RandomIndexWriter writer = new RandomIndexWriter(random, directory, analyzer); + RandomIndexWriter writer = new RandomIndexWriter(getStaticRandom(), directory, analyzer); Document doc = new Document(); doc.add(newField("field", "one two three four five", TextField.TYPE_STORED)); @@ -111,7 +111,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "five")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); } public void testBarelyCloseEnough() throws Exception { @@ -120,7 +120,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "five")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); } /** @@ -132,7 +132,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "five")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("exact match", 1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); query = new PhraseQuery(); @@ -140,7 +140,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "one")); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("reverse not exact", 0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); } public void testSlop1() throws Exception { @@ -150,7 +150,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "two")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("in order", 1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); // Ensures slop of 1 does not work for phrases out of order; @@ -161,7 +161,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "one")); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("reversed, slop not 2 or more", 0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); } /** @@ -173,7 +173,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "one")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("just sloppy enough", 1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); query = new PhraseQuery(); @@ -182,7 +182,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "one")); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("not sloppy enough", 0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); } @@ -197,7 +197,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "five")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("two total moves", 1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); query = new PhraseQuery(); @@ -207,20 +207,20 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "one")); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("slop of 5 not close enough", 0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); query.setSlop(6); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("slop of 6 just right", 1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); } public void testPhraseQueryWithStopAnalyzer() throws Exception { Directory directory = newDirectory(); - Analyzer stopAnalyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, false); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, + Analyzer stopAnalyzer = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, false); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory, newIndexWriterConfig( Version.LUCENE_40, stopAnalyzer)); Document doc = new Document(); doc.add(newField("field", "the stop words are here", TextField.TYPE_STORED)); @@ -236,7 +236,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field","words")); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); // StopAnalyzer as of 2.4 does not leave "holes", so this matches. @@ -245,7 +245,7 @@ public class TestPhraseQuery extends LuceneTestCase { query.add(new Term("field", "here")); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); reader.close(); @@ -254,7 +254,7 @@ public class TestPhraseQuery extends LuceneTestCase { public void testPhraseQueryInConjunctionScorer() throws Exception { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); Document doc = new Document(); doc.add(newField("source", "marketing info", TextField.TYPE_STORED)); @@ -275,7 +275,7 @@ public class TestPhraseQuery extends LuceneTestCase { phraseQuery.add(new Term("source", "info")); ScoreDoc[] hits = searcher.search(phraseQuery, null, 1000).scoreDocs; assertEquals(2, hits.length); - QueryUtils.check(random, phraseQuery,searcher); + QueryUtils.check(getRandom(), phraseQuery,searcher); TermQuery termQuery = new TermQuery(new Term("contents","foobar")); @@ -284,13 +284,13 @@ public class TestPhraseQuery extends LuceneTestCase { booleanQuery.add(phraseQuery, BooleanClause.Occur.MUST); hits = searcher.search(booleanQuery, null, 1000).scoreDocs; assertEquals(1, hits.length); - QueryUtils.check(random, termQuery,searcher); + QueryUtils.check(getRandom(), termQuery,searcher); reader.close(); - writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + writer = new RandomIndexWriter(getRandom(), directory, + newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.CREATE)); doc = new Document(); doc.add(newField("contents", "map entry woo", TextField.TYPE_STORED)); writer.addDocument(doc); @@ -330,7 +330,7 @@ public class TestPhraseQuery extends LuceneTestCase { booleanQuery.add(termQuery, BooleanClause.Occur.MUST); hits = searcher.search(booleanQuery, null, 1000).scoreDocs; assertEquals(2, hits.length); - QueryUtils.check(random, booleanQuery,searcher); + QueryUtils.check(getRandom(), booleanQuery,searcher); reader.close(); @@ -339,8 +339,8 @@ public class TestPhraseQuery extends LuceneTestCase { public void testSlopScoring() throws IOException { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setMergePolicy(newLogMergePolicy()) .setSimilarity(new DefaultSimilarity())); @@ -375,7 +375,7 @@ public class TestPhraseQuery extends LuceneTestCase { assertEquals(1, hits[1].doc); assertEquals(0.31, hits[2].score, 0.01); assertEquals(2, hits[2].doc); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); reader.close(); directory.close(); } @@ -399,13 +399,13 @@ public class TestPhraseQuery extends LuceneTestCase { ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("slop of 100 just right", 1, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); query.setSlop(99); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("slop of 99 not enough", 0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); } // work on two docs like this: "phrase exist notexist exist found" @@ -418,7 +418,7 @@ public class TestPhraseQuery extends LuceneTestCase { ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("phrase without repetitions exists in 2 docs", 2, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); // phrase with repetitions that exists in 2 docs query = new PhraseQuery(); @@ -429,7 +429,7 @@ public class TestPhraseQuery extends LuceneTestCase { hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("phrase with repetitions exists in two docs", 2, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); // phrase I with repetitions that does not exist in any doc query = new PhraseQuery(); @@ -440,7 +440,7 @@ public class TestPhraseQuery extends LuceneTestCase { hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("nonexisting phrase with repetitions does not exist in any doc", 0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); // phrase II with repetitions that does not exist in any doc query = new PhraseQuery(); @@ -452,7 +452,7 @@ public class TestPhraseQuery extends LuceneTestCase { hits = searcher.search(query, null, 1000).scoreDocs; assertEquals("nonexisting phrase with repetitions does not exist in any doc", 0, hits.length); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); } @@ -475,7 +475,7 @@ public class TestPhraseQuery extends LuceneTestCase { assertEquals("phrase found with exact phrase scorer", 1, hits.length); float score0 = hits[0].score; //System.out.println("(exact) field: two three: "+score0); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); // search on non palyndrome, find phrase with slop 2, though no slop required here. query.setSlop(2); // to use sloppy scorer @@ -484,7 +484,7 @@ public class TestPhraseQuery extends LuceneTestCase { float score1 = hits[0].score; //System.out.println("(sloppy) field: two three: "+score1); assertEquals("exact scorer and sloppy scorer score the same when slop does not matter",score0, score1, SCORE_COMP_THRESH); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); // search ordered in palyndrome, find it twice query = new PhraseQuery(); @@ -495,7 +495,7 @@ public class TestPhraseQuery extends LuceneTestCase { assertEquals("just sloppy enough", 1, hits.length); //float score2 = hits[0].score; //System.out.println("palindrome: two three: "+score2); - QueryUtils.check(random, query,searcher); + QueryUtils.check(getRandom(), query,searcher); //commented out for sloppy-phrase efficiency (issue 736) - see SloppyPhraseScorer.phraseFreq(). //assertTrue("ordered scores higher in palindrome",score1+SCORE_COMP_THRESH> docs = new ArrayList>(); Document d = new Document(); Field f = newField("f", "", TextField.TYPE_UNSTORED); d.add(f); - Random r = random; + Random r = getRandom(); int NUM_DOCS = atLeast(10); for (int i = 0; i < NUM_DOCS; i++) { // must be > 4096 so it spans multiple chunks - int termCount = _TestUtil.nextInt(random, 4097, 8200); + int termCount = _TestUtil.nextInt(getRandom(), 4097, 8200); List doc = new ArrayList(); diff --git lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java index 538b358..ae1e5c4 100644 --- lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java +++ lucene/core/src/test/org/apache/lucene/search/TestPositionIncrement.java @@ -88,7 +88,7 @@ public class TestPositionIncrement extends LuceneTestCase { } }; Directory store = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, store, analyzer); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), store, analyzer); Document d = new Document(); d.add(newField("field", "bogus", TextField.TYPE_STORED)); writer.addDocument(d); @@ -203,7 +203,7 @@ public class TestPositionIncrement extends LuceneTestCase { public void testPayloadsPos0() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, new MockPayloadAnalyzer()); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, new MockPayloadAnalyzer()); Document doc = new Document(); doc.add(new TextField("content", new StringReader( "a a b c d e a f g h i j a b k k"))); diff --git lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java index 06e1c8b..642f251 100644 --- lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java +++ lucene/core/src/test/org/apache/lucene/search/TestPositiveScoresOnlyCollector.java @@ -70,7 +70,7 @@ public class TestPositiveScoresOnlyCollector extends LuceneTestCase { } Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); writer.commit(); IndexReader ir = writer.getReader(); writer.close(); diff --git lucene/core/src/test/org/apache/lucene/search/TestPrefixFilter.java lucene/core/src/test/org/apache/lucene/search/TestPrefixFilter.java index 492a647..8823892 100644 --- lucene/core/src/test/org/apache/lucene/search/TestPrefixFilter.java +++ lucene/core/src/test/org/apache/lucene/search/TestPrefixFilter.java @@ -37,7 +37,7 @@ public class TestPrefixFilter extends LuceneTestCase { "/Computers/Mac/One", "/Computers/Mac/Two", "/Computers/Windows"}; - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); for (int i = 0; i < categories.length; i++) { Document doc = new Document(); doc.add(newField("category", categories[i], StringField.TYPE_STORED)); diff --git lucene/core/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java lucene/core/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java index c5cb7ef..56c2d28 100644 --- lucene/core/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java +++ lucene/core/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java @@ -48,7 +48,7 @@ public class TestPrefixInBooleanQuery extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getStaticRandom(), directory); Document doc = new Document(); Field field = newField(FIELD, "meaninglessnames", StringField.TYPE_UNSTORED); diff --git lucene/core/src/test/org/apache/lucene/search/TestPrefixQuery.java lucene/core/src/test/org/apache/lucene/search/TestPrefixQuery.java index 4a84dd3..2179a9a 100644 --- lucene/core/src/test/org/apache/lucene/search/TestPrefixQuery.java +++ lucene/core/src/test/org/apache/lucene/search/TestPrefixQuery.java @@ -38,7 +38,7 @@ public class TestPrefixQuery extends LuceneTestCase { String[] categories = new String[] {"/Computers", "/Computers/Mac", "/Computers/Windows"}; - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); for (int i = 0; i < categories.length; i++) { Document doc = new Document(); doc.add(newField("category", categories[i], StringField.TYPE_STORED)); diff --git lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java index 6e5d860..4a62154 100644 --- lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java +++ lucene/core/src/test/org/apache/lucene/search/TestPrefixRandom.java @@ -51,9 +51,9 @@ public class TestPrefixRandom extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false)) - .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000))); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.KEYWORD, false)) + .setMaxBufferedDocs(_TestUtil.nextInt(getRandom(), 50, 1000))); Document doc = new Document(); Field field = newField("field", "", StringField.TYPE_UNSTORED); @@ -64,7 +64,7 @@ public class TestPrefixRandom extends LuceneTestCase { final String codec = Codec.getDefault().getName(); int num = codec.equals("Lucene3x") ? 200 * RANDOM_MULTIPLIER : atLeast(1000); for (int i = 0; i < num; i++) { - field.setStringValue(_TestUtil.randomUnicodeString(random, 10)); + field.setStringValue(_TestUtil.randomUnicodeString(getRandom(), 10)); writer.addDocument(doc); } reader = writer.getReader(); @@ -118,7 +118,7 @@ public class TestPrefixRandom extends LuceneTestCase { public void testPrefixes() throws Exception { int num = atLeast(100); for (int i = 0; i < num; i++) - assertSame(_TestUtil.randomUnicodeString(random, 5)); + assertSame(_TestUtil.randomUnicodeString(getRandom(), 5)); } /** check that the # of hits is the same as from a very diff --git lucene/core/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java lucene/core/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java index 4741066..63aed56 100644 --- lucene/core/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java +++ lucene/core/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java @@ -35,7 +35,7 @@ public class TestQueryWrapperFilter extends LuceneTestCase { public void testBasic() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); Document doc = new Document(); doc.add(newField("field", "value", TextField.TYPE_UNSTORED)); writer.addDocument(doc); @@ -87,14 +87,14 @@ public class TestQueryWrapperFilter extends LuceneTestCase { public void testRandom() throws Exception { final Directory d = newDirectory(); - final RandomIndexWriter w = new RandomIndexWriter(random, d); + final RandomIndexWriter w = new RandomIndexWriter(getRandom(), d); w.w.getConfig().setMaxBufferedDocs(17); final int numDocs = atLeast(100); final Set aDocs = new HashSet(); for(int i=0;i terms = new ArrayList(); int num = atLeast(200); for (int i = 0; i < num; i++) { - String s = _TestUtil.randomUnicodeString(random); + String s = _TestUtil.randomUnicodeString(getRandom()); field.setStringValue(s); terms.add(s); writer.addDocument(doc); @@ -143,7 +143,7 @@ public class TestRegexpRandom2 extends LuceneTestCase { // but for preflex codec, the test can be very slow, so use less iterations. int num = Codec.getDefault().getName().equals("Lucene3x") ? 100 * RANDOM_MULTIPLIER : atLeast(1000); for (int i = 0; i < num; i++) { - String reg = AutomatonTestUtil.randomRegexp(random); + String reg = AutomatonTestUtil.randomRegexp(getRandom()); if (VERBOSE) { System.out.println("TEST: regexp=" + reg); } diff --git lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java index 21b3076..ae8b576 100644 --- lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java +++ lucene/core/src/test/org/apache/lucene/search/TestScoreCachingWrappingScorer.java @@ -100,7 +100,7 @@ public class TestScoreCachingWrappingScorer extends LuceneTestCase { public void testGetScores() throws Exception { Directory directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); writer.commit(); IndexReader ir = writer.getReader(); writer.close(); diff --git lucene/core/src/test/org/apache/lucene/search/TestScorerPerf.java lucene/core/src/test/org/apache/lucene/search/TestScorerPerf.java index b41b1c7..980342d 100755 --- lucene/core/src/test/org/apache/lucene/search/TestScorerPerf.java +++ lucene/core/src/test/org/apache/lucene/search/TestScorerPerf.java @@ -48,7 +48,7 @@ public class TestScorerPerf extends LuceneTestCase { // Create a dummy index with nothing in it. // This could possibly fail if Lucene starts checking for docid ranges... d = newDirectory(); - IndexWriter iw = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter iw = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); iw.addDocument(new Document()); iw.close(); r = IndexReader.open(d); @@ -64,11 +64,11 @@ public class TestScorerPerf extends LuceneTestCase { terms[i] = new Term("f",Character.toString((char)('A'+i))); } - IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.CREATE)); for (int i=0; i=termsInIndex) tnum=termflag.nextClearBit(0); termflag.set(tnum); @@ -259,17 +259,17 @@ public class TestScorerPerf extends LuceneTestCase { int ret=0; long nMatches=0; for (int i=0; i=25) tnum=termflag.nextClearBit(0); termflag.set(tnum); @@ -299,10 +299,10 @@ public class TestScorerPerf extends LuceneTestCase { int ret=0; for (int i=0; i 200) { - Collections.shuffle(priorSearches, random); + Collections.shuffle(priorSearches, getRandom()); priorSearches.subList(100, priorSearches.size()).clear(); } } @@ -272,7 +272,7 @@ public class TestShardSearching extends ShardSearchingTestBase { private PreviousSearchState assertSame(IndexSearcher mockSearcher, NodeState.ShardIndexSearcher shardSearcher, Query q, Sort sort, PreviousSearchState state) throws IOException { - int numHits = _TestUtil.nextInt(random, 1, 100); + int numHits = _TestUtil.nextInt(getRandom(), 1, 100); if (state != null && state.searchAfterLocal == null) { // In addition to what we last searched: numHits += state.numHitsPaged; diff --git lucene/core/src/test/org/apache/lucene/search/TestSimilarity.java lucene/core/src/test/org/apache/lucene/search/TestSimilarity.java index e2021d4..ef9387c 100644 --- lucene/core/src/test/org/apache/lucene/search/TestSimilarity.java +++ lucene/core/src/test/org/apache/lucene/search/TestSimilarity.java @@ -54,8 +54,8 @@ public class TestSimilarity extends LuceneTestCase { public void testSimilarity() throws Exception { Directory store = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, store, - newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)) + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), store, + newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setSimilarity(new SimpleSimilarity())); Document d1 = new Document(); diff --git lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java index 4f52f03..ae7d0ba 100644 --- lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java +++ lucene/core/src/test/org/apache/lucene/search/TestSimilarityProvider.java @@ -46,8 +46,8 @@ public class TestSimilarityProvider extends LuceneTestCase { directory = newDirectory(); PerFieldSimilarityWrapper sim = new ExampleSimilarityProvider(); IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setSimilarity(sim); - RandomIndexWriter iw = new RandomIndexWriter(random, directory, iwc); + new MockAnalyzer(getRandom())).setSimilarity(sim); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), directory, iwc); Document doc = new Document(); Field field = newField("foo", "", TextField.TYPE_UNSTORED); doc.add(field); diff --git lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java index fc1782b..860b59d 100755 --- lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java +++ lucene/core/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java @@ -137,7 +137,7 @@ public class TestSloppyPhraseQuery extends LuceneTestCase { query.setSlop(slop); Directory ramDir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, ramDir, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), ramDir, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); writer.addDocument(doc); IndexReader reader = writer.getReader(); @@ -227,13 +227,13 @@ public class TestSloppyPhraseQuery extends LuceneTestCase { return false; } }); - QueryUtils.check(random, pq, searcher); + QueryUtils.check(getRandom(), pq, searcher); } // LUCENE-3215 public void testSlopWithHoles() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, dir); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), dir); FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setOmitNorms(true); Field f = new Field("lyrics", "", customType); @@ -270,7 +270,7 @@ public class TestSloppyPhraseQuery extends LuceneTestCase { String document = "drug druggy drug drug drug"; Directory dir = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, dir); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), dir); Document doc = new Document(); doc.add(newField("lyrics", document, new FieldType(TextField.TYPE_UNSTORED))); iw.addDocument(doc); @@ -323,7 +323,7 @@ public class TestSloppyPhraseQuery extends LuceneTestCase { Directory dir = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, dir); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), dir); Document doc = new Document(); doc.add(newField("lyrics", document, new FieldType(TextField.TYPE_UNSTORED))); iw.addDocument(doc); diff --git lucene/core/src/test/org/apache/lucene/search/TestSort.java lucene/core/src/test/org/apache/lucene/search/TestSort.java index 4681028..666fa3f 100644 --- lucene/core/src/test/org/apache/lucene/search/TestSort.java +++ lucene/core/src/test/org/apache/lucene/search/TestSort.java @@ -117,20 +117,20 @@ public class TestSort extends LuceneTestCase { throws IOException { Directory indexStore = newDirectory(); dirs.add(indexStore); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), indexStore, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergePolicy(newLogMergePolicy())); final DocValues.Type stringDVType; if (dvStringSorted) { // Index sorted - stringDVType = random.nextBoolean() ? DocValues.Type.BYTES_VAR_SORTED : DocValues.Type.BYTES_FIXED_SORTED; + stringDVType = getRandom().nextBoolean() ? DocValues.Type.BYTES_VAR_SORTED : DocValues.Type.BYTES_FIXED_SORTED; } else { // Index non-sorted - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { // Fixed - stringDVType = random.nextBoolean() ? DocValues.Type.BYTES_FIXED_STRAIGHT : DocValues.Type.BYTES_FIXED_DEREF; + stringDVType = getRandom().nextBoolean() ? DocValues.Type.BYTES_FIXED_STRAIGHT : DocValues.Type.BYTES_FIXED_DEREF; } else { // Var - stringDVType = random.nextBoolean() ? DocValues.Type.BYTES_VAR_STRAIGHT : DocValues.Type.BYTES_VAR_DEREF; + stringDVType = getRandom().nextBoolean() ? DocValues.Type.BYTES_VAR_STRAIGHT : DocValues.Type.BYTES_VAR_DEREF; } } @@ -200,7 +200,7 @@ public class TestSort extends LuceneTestCase { dirs.add(indexStore); IndexWriter writer = new IndexWriter( indexStore, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setMergePolicy(newLogMergePolicy(97)) ); FieldType onlyStored = new FieldType(); @@ -279,7 +279,7 @@ public class TestSort extends LuceneTestCase { public int getRandomNumber(final int low, final int high) { - int randInt = (Math.abs(random.nextInt()) % (high - low)) + low; + int randInt = (Math.abs(getRandom().nextInt()) % (high - low)) + low; return randInt; } @@ -307,7 +307,7 @@ public class TestSort extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); - dvStringSorted = random.nextBoolean(); + dvStringSorted = getRandom().nextBoolean(); full = getFullIndex(); searchX = getXIndex(); searchY = getYIndex(); @@ -402,7 +402,7 @@ public class TestSort extends LuceneTestCase { if (dvStringSorted) { // If you index as sorted source you can still sort by // value instead: - return random.nextBoolean() ? SortField.Type.STRING : SortField.Type.STRING_VAL; + return getRandom().nextBoolean() ? SortField.Type.STRING : SortField.Type.STRING_VAL; } else { return SortField.Type.STRING_VAL; } @@ -505,7 +505,7 @@ public class TestSort extends LuceneTestCase { private void verifyStringSort(Sort sort) throws Exception { final IndexSearcher searcher = getFullStrings(); - final ScoreDoc[] result = searcher.search(new MatchAllDocsQuery(), null, _TestUtil.nextInt(random, 500, searcher.getIndexReader().maxDoc()), sort).scoreDocs; + final ScoreDoc[] result = searcher.search(new MatchAllDocsQuery(), null, _TestUtil.nextInt(getRandom(), 500, searcher.getIndexReader().maxDoc()), sort).scoreDocs; StringBuilder buff = new StringBuilder(); int n = result.length; String last = null; @@ -795,7 +795,7 @@ public class TestSort extends LuceneTestCase { assertMatches (full, queryG, sort, "ZYXW"); // Do the same for a ParallelMultiSearcher - ExecutorService exec = Executors.newFixedThreadPool(_TestUtil.nextInt(random, 2, 8)); + ExecutorService exec = Executors.newFixedThreadPool(_TestUtil.nextInt(getRandom(), 2, 8)); IndexSearcher parallelSearcher=new IndexSearcher (full.getIndexReader(), exec); sort.setSort (new SortField ("int", SortField.Type.INT), @@ -839,7 +839,7 @@ public class TestSort extends LuceneTestCase { // test a variety of sorts using a parallel multisearcher public void testParallelMultiSort() throws Exception { - ExecutorService exec = Executors.newFixedThreadPool(_TestUtil.nextInt(random, 2, 8)); + ExecutorService exec = Executors.newFixedThreadPool(_TestUtil.nextInt(getRandom(), 2, 8)); IndexSearcher searcher = new IndexSearcher( new MultiReader(searchX.getIndexReader(), searchY.getIndexReader()), exec); @@ -1230,7 +1230,7 @@ public class TestSort extends LuceneTestCase { public void testEmptyStringVsNullStringSort() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); Document doc = new Document(); doc.add(newField("f", "", StringField.TYPE_UNSTORED)); doc.add(newField("t", "1", StringField.TYPE_UNSTORED)); @@ -1255,7 +1255,7 @@ public class TestSort extends LuceneTestCase { public void testLUCENE2142() throws IOException { Directory indexStore = newDirectory(); IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); for (int i=0; i<5; i++) { Document doc = new Document(); doc.add (new StringField ("string", "a"+i)); @@ -1277,7 +1277,7 @@ public class TestSort extends LuceneTestCase { public void testCountingCollector() throws Exception { Directory indexStore = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, indexStore); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), indexStore); for (int i=0; i<5; i++) { Document doc = new Document(); doc.add (new StringField ("string", "a"+i)); diff --git lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java index 734655c..de9d96e 100644 --- lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java +++ lucene/core/src/test/org/apache/lucene/search/TestSubScorerFreqs.java @@ -40,7 +40,7 @@ public class TestSubScorerFreqs extends LuceneTestCase { public static void makeIndex() throws Exception { dir = new RAMDirectory(); RandomIndexWriter w = new RandomIndexWriter( - random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + getStaticRandom(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getStaticRandom())).setMergePolicy(newLogMergePolicy())); // make sure we have more than one segment occationally int num = atLeast(31); for (int i = 0; i < num; i++) { diff --git lucene/core/src/test/org/apache/lucene/search/TestTermRangeQuery.java lucene/core/src/test/org/apache/lucene/search/TestTermRangeQuery.java index fed281c..95d4022 100644 --- lucene/core/src/test/org/apache/lucene/search/TestTermRangeQuery.java +++ lucene/core/src/test/org/apache/lucene/search/TestTermRangeQuery.java @@ -240,7 +240,7 @@ public class TestTermRangeQuery extends LuceneTestCase { } private void initializeIndex(String[] values) throws IOException { - initializeIndex(values, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + initializeIndex(values, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); } private void initializeIndex(String[] values, Analyzer analyzer) throws IOException { @@ -254,7 +254,7 @@ public class TestTermRangeQuery extends LuceneTestCase { // shouldnt create an analyzer for every doc? private void addDoc(String content) throws IOException { - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND)); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND)); insertDoc(writer, content); writer.close(); } diff --git lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java index 4015c6a..2258dca 100644 --- lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java +++ lucene/core/src/test/org/apache/lucene/search/TestTermScorer.java @@ -47,8 +47,8 @@ public class TestTermScorer extends LuceneTestCase { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setMergePolicy(newLogMergePolicy()) .setSimilarity(new DefaultSimilarity())); for (int i = 0; i < values.length; i++) { diff --git lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java index 362e3fc..2e706dc 100644 --- lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java +++ lucene/core/src/test/org/apache/lucene/search/TestTermVectors.java @@ -46,7 +46,7 @@ public class TestTermVectors extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter(getStaticRandom(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getStaticRandom(), MockTokenizer.SIMPLE, true)).setMergePolicy(newLogMergePolicy())); //writer.setUseCompoundFile(true); //writer.infoStream = System.out; for (int i = 0; i < 1000; i++) { @@ -107,7 +107,7 @@ public class TestTermVectors extends LuceneTestCase { public void testTermVectorsFieldOrder() throws IOException { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, new MockAnalyzer(random, MockTokenizer.SIMPLE, true)); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true)); Document doc = new Document(); FieldType ft = new FieldType(TextField.TYPE_STORED); ft.setStoreTermVectors(true); @@ -238,8 +238,8 @@ public class TestTermVectors extends LuceneTestCase { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.SIMPLE, true)) + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true)) .setOpenMode(OpenMode.CREATE) .setMergePolicy(newLogMergePolicy()) .setSimilarity(new DefaultSimilarity())); @@ -261,7 +261,7 @@ public class TestTermVectors extends LuceneTestCase { while (termsEnum.next() != null) { String text = termsEnum.term().utf8ToString(); - docs = _TestUtil.docs(random, termsEnum, MultiFields.getLiveDocs(knownSearcher.reader), docs, true); + docs = _TestUtil.docs(getRandom(), termsEnum, MultiFields.getLiveDocs(knownSearcher.reader), docs, true); while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) { int docId = docs.docID(); @@ -333,8 +333,8 @@ public class TestTermVectors extends LuceneTestCase { // Test only a few docs having vectors public void testRareVectors() throws IOException { - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.SIMPLE, true)) + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true)) .setOpenMode(OpenMode.CREATE)); if (VERBOSE) { System.out.println("TEST: now add non-vectors"); @@ -380,9 +380,9 @@ public class TestTermVectors extends LuceneTestCase { // In a single doc, for the same field, mix the term // vectors up public void testMixedVectrosVectors() throws IOException { - RandomIndexWriter writer = new RandomIndexWriter(random, directory, + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random, MockTokenizer.SIMPLE, true)).setOpenMode(OpenMode.CREATE)); + new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true)).setOpenMode(OpenMode.CREATE)); Document doc = new Document(); FieldType ft2 = new FieldType(TextField.TYPE_STORED); @@ -448,7 +448,7 @@ public class TestTermVectors extends LuceneTestCase { private IndexWriter createWriter(Directory dir) throws IOException { return new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setMaxBufferedDocs(2)); + new MockAnalyzer(getRandom())).setMaxBufferedDocs(2)); } private void createDir(Directory dir) throws IOException { diff --git lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java index ddddfae..ae8e5d9 100644 --- lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java +++ lucene/core/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java @@ -80,7 +80,7 @@ public class TestTimeLimitingCollector extends LuceneTestCase { "blueberry pizza", }; directory = newDirectory(); - RandomIndexWriter iw = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergePolicy(newLogMergePolicy())); for (int i=0; i test2 - final IndexInput in = dir.openInput("test", newIOContext(random)); + final IndexInput in = dir.openInput("test", newIOContext(getRandom())); - out = dir.createOutput("test2", newIOContext(random)); + out = dir.createOutput("test2", newIOContext(getRandom())); upto = 0; while (upto < size) { - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { out.writeByte(in.readByte()); upto++; } else { final int chunk = Math.min( - _TestUtil.nextInt(random, 1, bytes.length), size - upto); + _TestUtil.nextInt(getRandom(), 1, bytes.length), size - upto); out.copyBytes(in, chunk); upto += chunk; } @@ -81,16 +81,16 @@ public class TestCopyBytes extends LuceneTestCase { in.close(); // verify - IndexInput in2 = dir.openInput("test2", newIOContext(random)); + IndexInput in2 = dir.openInput("test2", newIOContext(getRandom())); upto = 0; while (upto < size) { - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { final byte v = in2.readByte(); assertEquals(value(upto), v); upto++; } else { final int limit = Math.min( - _TestUtil.nextInt(random, 1, bytes.length), size - upto); + _TestUtil.nextInt(getRandom(), 1, bytes.length), size - upto); in2.readBytes(bytes, 0, limit); for (int byteIdx = 0; byteIdx < limit; byteIdx++) { assertEquals(value(upto), bytes[byteIdx]); @@ -109,9 +109,9 @@ public class TestCopyBytes extends LuceneTestCase { // LUCENE-3541 public void testCopyBytesWithThreads() throws Exception { - int datalen = _TestUtil.nextInt(random, 101, 10000); + int datalen = _TestUtil.nextInt(getRandom(), 101, 10000); byte data[] = new byte[datalen]; - random.nextBytes(data); + getRandom().nextBytes(data); Directory d = newDirectory(); IndexOutput output = d.createOutput("data", IOContext.DEFAULT); diff --git lucene/core/src/test/org/apache/lucene/store/TestDirectory.java lucene/core/src/test/org/apache/lucene/store/TestDirectory.java index e17cf53..8965845 100644 --- lucene/core/src/test/org/apache/lucene/store/TestDirectory.java +++ lucene/core/src/test/org/apache/lucene/store/TestDirectory.java @@ -31,7 +31,7 @@ public class TestDirectory extends LuceneTestCase { for (Directory dir : dirs) { dir.close(); try { - dir.createOutput("test", newIOContext(random)); + dir.createOutput("test", newIOContext(getRandom())); fail("did not hit expected exception"); } catch (AlreadyClosedException ace) { } @@ -56,7 +56,7 @@ public class TestDirectory extends LuceneTestCase { dir.ensureOpen(); String fname = "foo." + i; String lockname = "foo" + i + ".lck"; - IndexOutput out = dir.createOutput(fname, newIOContext(random)); + IndexOutput out = dir.createOutput(fname, newIOContext(getRandom())); out.writeByte((byte)i); out.close(); @@ -70,7 +70,7 @@ public class TestDirectory extends LuceneTestCase { // closed and will cause a failure to delete the file. if (d2 instanceof MMapDirectory) continue; - IndexInput input = d2.openInput(fname, newIOContext(random)); + IndexInput input = d2.openInput(fname, newIOContext(getRandom())); assertEquals((byte)i, input.readByte()); input.close(); } @@ -141,7 +141,7 @@ public class TestDirectory extends LuceneTestCase { private void checkDirectoryFilter(Directory dir) throws IOException { String name = "file"; try { - dir.createOutput(name, newIOContext(random)).close(); + dir.createOutput(name, newIOContext(getRandom())).close(); assertTrue(dir.fileExists(name)); assertTrue(Arrays.asList(dir.listAll()).contains(name)); } finally { @@ -156,7 +156,7 @@ public class TestDirectory extends LuceneTestCase { path.mkdirs(); new File(path, "subdir").mkdirs(); Directory fsDir = new SimpleFSDirectory(path, null); - assertEquals(0, new RAMDirectory(fsDir, newIOContext(random)).listAll().length); + assertEquals(0, new RAMDirectory(fsDir, newIOContext(getRandom())).listAll().length); } finally { _TestUtil.rmDir(path); } @@ -167,7 +167,7 @@ public class TestDirectory extends LuceneTestCase { File path = _TestUtil.getTempDir("testnotdir"); Directory fsDir = new SimpleFSDirectory(path, null); try { - IndexOutput out = fsDir.createOutput("afile", newIOContext(random)); + IndexOutput out = fsDir.createOutput("afile", newIOContext(getRandom())); out.close(); assertTrue(fsDir.fileExists("afile")); try { diff --git lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java index 0e82e45..4b6c0e0 100644 --- lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java +++ lucene/core/src/test/org/apache/lucene/store/TestFileSwitchDirectory.java @@ -43,16 +43,16 @@ public class TestFileSwitchDirectory extends LuceneTestCase { fileExtensions.add(Lucene40StoredFieldsWriter.FIELDS_EXTENSION); fileExtensions.add(Lucene40StoredFieldsWriter.FIELDS_INDEX_EXTENSION); - MockDirectoryWrapper primaryDir = new MockDirectoryWrapper(random, new RAMDirectory()); + MockDirectoryWrapper primaryDir = new MockDirectoryWrapper(getRandom(), new RAMDirectory()); primaryDir.setCheckIndexOnClose(false); // only part of an index - MockDirectoryWrapper secondaryDir = new MockDirectoryWrapper(random, new RAMDirectory()); + MockDirectoryWrapper secondaryDir = new MockDirectoryWrapper(getRandom(), new RAMDirectory()); secondaryDir.setCheckIndexOnClose(false); // only part of an index FileSwitchDirectory fsd = new FileSwitchDirectory(fileExtensions, primaryDir, secondaryDir, true); // for now we wire Lucene40Codec because we rely upon its specific impl IndexWriter writer = new IndexWriter( fsd, - new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setMergePolicy(newLogMergePolicy(false)).setCodec(Codec.forName("Lucene40")) ); TestIndexWriterReader.createIndexNoClose(true, "ram", writer); @@ -87,7 +87,7 @@ public class TestFileSwitchDirectory extends LuceneTestCase { Directory a = new SimpleFSDirectory(_TestUtil.getTempDir("foo")); Directory b = new SimpleFSDirectory(_TestUtil.getTempDir("bar")); FileSwitchDirectory switchDir = new FileSwitchDirectory(primaryExtensions, a, b, true); - return new MockDirectoryWrapper(random, switchDir); + return new MockDirectoryWrapper(getRandom(), switchDir); } // LUCENE-3380 -- make sure we get exception if the directory really does not exist. @@ -107,7 +107,7 @@ public class TestFileSwitchDirectory extends LuceneTestCase { Directory dir = newFSSwitchDirectory(Collections.emptySet()); String name = "file"; try { - dir.createOutput(name, newIOContext(random)).close(); + dir.createOutput(name, newIOContext(getRandom())).close(); assertTrue(dir.fileExists(name)); assertTrue(Arrays.asList(dir.listAll()).contains(name)); } finally { @@ -118,12 +118,12 @@ public class TestFileSwitchDirectory extends LuceneTestCase { // LUCENE-3380 test that delegate compound files correctly. public void testCompoundFileAppendTwice() throws IOException { Directory newDir = newFSSwitchDirectory(Collections.singleton("cfs")); - CompoundFileDirectory csw = new CompoundFileDirectory(newDir, "d.cfs", newIOContext(random), true); + CompoundFileDirectory csw = new CompoundFileDirectory(newDir, "d.cfs", newIOContext(getRandom()), true); createSequenceFile(newDir, "d1", (byte) 0, 15); - IndexOutput out = csw.createOutput("d.xyz", newIOContext(random)); + IndexOutput out = csw.createOutput("d.xyz", newIOContext(getRandom())); out.writeInt(0); try { - newDir.copy(csw, "d1", "d1", newIOContext(random)); + newDir.copy(csw, "d1", "d1", newIOContext(getRandom())); fail("file does already exist"); } catch (IOException e) { // @@ -134,7 +134,7 @@ public class TestFileSwitchDirectory extends LuceneTestCase { csw.close(); - CompoundFileDirectory cfr = new CompoundFileDirectory(newDir, "d.cfs", newIOContext(random), false); + CompoundFileDirectory cfr = new CompoundFileDirectory(newDir, "d.cfs", newIOContext(getRandom()), false); assertEquals(1, cfr.listAll().length); assertEquals("d.xyz", cfr.listAll()[0]); cfr.close(); @@ -146,7 +146,7 @@ public class TestFileSwitchDirectory extends LuceneTestCase { * computed as start + offset where offset is the number of the byte. */ private void createSequenceFile(Directory dir, String name, byte start, int size) throws IOException { - IndexOutput os = dir.createOutput(name, newIOContext(random)); + IndexOutput os = dir.createOutput(name, newIOContext(getRandom())); for (int i=0; i < size; i++) { os.writeByte(start); start ++; diff --git lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java index c41ca65..82f90c5 100755 --- lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java +++ lucene/core/src/test/org/apache/lucene/store/TestLockFactory.java @@ -43,14 +43,14 @@ public class TestLockFactory extends LuceneTestCase { // methods are called at the right time, locks are created, etc. public void testCustomLockFactory() throws IOException { - Directory dir = new MockDirectoryWrapper(random, new RAMDirectory()); + Directory dir = new MockDirectoryWrapper(getRandom(), new RAMDirectory()); MockLockFactory lf = new MockLockFactory(); dir.setLockFactory(lf); // Lock prefix should have been set: assertTrue("lock prefix was not set by the RAMDirectory", lf.lockPrefixSet); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); // add 100 documents (so that commit lock is used) for (int i = 0; i < 100; i++) { @@ -76,19 +76,19 @@ public class TestLockFactory extends LuceneTestCase { // exceptions raised: // Verify: NoLockFactory allows two IndexWriters public void testRAMDirectoryNoLocking() throws IOException { - Directory dir = new MockDirectoryWrapper(random, new RAMDirectory()); + Directory dir = new MockDirectoryWrapper(getRandom(), new RAMDirectory()); dir.setLockFactory(NoLockFactory.getNoLockFactory()); assertTrue("RAMDirectory.setLockFactory did not take", NoLockFactory.class.isInstance(dir.getLockFactory())); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); writer.commit(); // required so the second open succeed // Create a 2nd IndexWriter. This is normally not allowed but it should run through since we're not // using any locks: IndexWriter writer2 = null; try { - writer2 = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer2 = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.APPEND)); } catch (Exception e) { e.printStackTrace(System.out); fail("Should not have hit an IOException with no locking"); @@ -108,12 +108,12 @@ public class TestLockFactory extends LuceneTestCase { assertTrue("RAMDirectory did not use correct LockFactory: got " + dir.getLockFactory(), SingleInstanceLockFactory.class.isInstance(dir.getLockFactory())); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); // Create a 2nd IndexWriter. This should fail: IndexWriter writer2 = null; try { - writer2 = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + writer2 = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.APPEND)); fail("Should have hit an IOException with two IndexWriters on default SingleInstanceLockFactory"); } catch (IOException e) { } @@ -151,7 +151,7 @@ public class TestLockFactory extends LuceneTestCase { Directory dir = newFSDirectory(indexDir, lockFactory); // First create a 1 doc index: - IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.CREATE)); addDoc(w); w.close(); @@ -289,7 +289,7 @@ public class TestLockFactory extends LuceneTestCase { IndexWriter writer = null; for(int i=0;i= minTargetSize); } @@ -118,9 +118,9 @@ public class TestArrayUtil extends LuceneTestCase { } private Integer[] createRandomArray(int maxSize) { - final Integer[] a = new Integer[random.nextInt(maxSize) + 1]; + final Integer[] a = new Integer[getRandom().nextInt(maxSize) + 1]; for (int i = 0; i < a.length; i++) { - a[i] = Integer.valueOf(random.nextInt(a.length)); + a[i] = Integer.valueOf(getRandom().nextInt(a.length)); } return a; } @@ -146,9 +146,9 @@ public class TestArrayUtil extends LuceneTestCase { } private Integer[] createSparseRandomArray(int maxSize) { - final Integer[] a = new Integer[random.nextInt(maxSize) + 1]; + final Integer[] a = new Integer[getRandom().nextInt(maxSize) + 1]; for (int i = 0; i < a.length; i++) { - a[i] = Integer.valueOf(random.nextInt(2)); + a[i] = Integer.valueOf(getRandom().nextInt(2)); } return a; } @@ -228,8 +228,8 @@ public class TestArrayUtil extends LuceneTestCase { // so they should always be in order after sorting. // The other half has defined order, but no (-1) value (they should appear after // all above, when sorted). - final boolean equal = random.nextBoolean(); - items[i] = new Item(equal ? (i+1) : -1, equal ? 0 : (random.nextInt(1000)+1)); + final boolean equal = getRandom().nextBoolean(); + items[i] = new Item(equal ? (i+1) : -1, equal ? 0 : (getRandom().nextInt(1000)+1)); } if (VERBOSE) System.out.println("Before: " + Arrays.toString(items)); diff --git lucene/core/src/test/org/apache/lucene/util/TestByteBlockPool.java lucene/core/src/test/org/apache/lucene/util/TestByteBlockPool.java index 26ea474..4940b97 100644 --- lucene/core/src/test/org/apache/lucene/util/TestByteBlockPool.java +++ lucene/core/src/test/org/apache/lucene/util/TestByteBlockPool.java @@ -35,18 +35,18 @@ public class TestByteBlockPool extends LuceneTestCase { final int numValues = atLeast(100); BytesRef ref = new BytesRef(); for (int i = 0; i < numValues; i++) { - final String value = _TestUtil.randomRealisticUnicodeString(random, + final String value = _TestUtil.randomRealisticUnicodeString(getRandom(), maxLength); list.add(value); ref.copyChars(value); pool.copy(ref); } RAMDirectory dir = new RAMDirectory(); - IndexOutput stream = dir.createOutput("foo.txt", newIOContext(random)); + IndexOutput stream = dir.createOutput("foo.txt", newIOContext(getRandom())); pool.writePool(stream); stream.flush(); stream.close(); - IndexInput input = dir.openInput("foo.txt", newIOContext(random)); + IndexInput input = dir.openInput("foo.txt", newIOContext(getRandom())); assertEquals(pool.byteOffset + pool.byteUpto, stream.length()); BytesRef expected = new BytesRef(); BytesRef actual = new BytesRef(); diff --git lucene/core/src/test/org/apache/lucene/util/TestBytesRef.java lucene/core/src/test/org/apache/lucene/util/TestBytesRef.java index cb5bb53..20503c8 100644 --- lucene/core/src/test/org/apache/lucene/util/TestBytesRef.java +++ lucene/core/src/test/org/apache/lucene/util/TestBytesRef.java @@ -40,7 +40,7 @@ public class TestBytesRef extends LuceneTestCase { public void testFromChars() { for (int i = 0; i < 100; i++) { - String s = _TestUtil.randomUnicodeString(random); + String s = _TestUtil.randomUnicodeString(getRandom()); String s2 = new BytesRef(s).utf8ToString(); assertEquals(s, s2); } diff --git lucene/core/src/test/org/apache/lucene/util/TestBytesRefHash.java lucene/core/src/test/org/apache/lucene/util/TestBytesRefHash.java index 1e0ecd9..30bfa39 100644 --- lucene/core/src/test/org/apache/lucene/util/TestBytesRefHash.java +++ lucene/core/src/test/org/apache/lucene/util/TestBytesRefHash.java @@ -49,13 +49,13 @@ public class TestBytesRefHash extends LuceneTestCase { } private ByteBlockPool newPool(){ - return random.nextBoolean() && pool != null ? pool - : new ByteBlockPool(new RecyclingByteBlockAllocator(ByteBlockPool.BYTE_BLOCK_SIZE, random.nextInt(25))); + return getRandom().nextBoolean() && pool != null ? pool + : new ByteBlockPool(new RecyclingByteBlockAllocator(ByteBlockPool.BYTE_BLOCK_SIZE, getRandom().nextInt(25))); } private BytesRefHash newHash(ByteBlockPool blockPool) { - final int initSize = 2 << 1 + random.nextInt(5); - return random.nextBoolean() ? new BytesRefHash(blockPool) : new BytesRefHash( + final int initSize = 2 << 1 + getRandom().nextInt(5); + return getRandom().nextBoolean() ? new BytesRefHash(blockPool) : new BytesRefHash( blockPool, initSize, new BytesRefHash.DirectBytesStartArray(initSize)); } @@ -67,11 +67,11 @@ public class TestBytesRefHash extends LuceneTestCase { BytesRef ref = new BytesRef(); int num = atLeast(2); for (int j = 0; j < num; j++) { - final int mod = 1+random.nextInt(39); + final int mod = 1+getRandom().nextInt(39); for (int i = 0; i < 797; i++) { String str; do { - str = _TestUtil.randomRealisticUnicodeString(random, 1000); + str = _TestUtil.randomRealisticUnicodeString(getRandom(), 1000); } while (str.length() == 0); ref.copyChars(str); int count = hash.size(); @@ -105,7 +105,7 @@ public class TestBytesRefHash extends LuceneTestCase { for (int i = 0; i < 797; i++) { String str; do { - str = _TestUtil.randomRealisticUnicodeString(random, 1000); + str = _TestUtil.randomRealisticUnicodeString(getRandom(), 1000); } while (str.length() == 0); ref.copyChars(str); int count = hash.size(); @@ -144,7 +144,7 @@ public class TestBytesRefHash extends LuceneTestCase { for (int i = 0; i < size; i++) { String str; do { - str = _TestUtil.randomRealisticUnicodeString(random, 1000); + str = _TestUtil.randomRealisticUnicodeString(getRandom(), 1000); } while (str.length() == 0); ref.copyChars(str); final int key = hash.add(ref); @@ -184,7 +184,7 @@ public class TestBytesRefHash extends LuceneTestCase { for (int i = 0; i < 797; i++) { String str; do { - str = _TestUtil.randomRealisticUnicodeString(random, 1000); + str = _TestUtil.randomRealisticUnicodeString(getRandom(), 1000); } while (str.length() == 0); ref.copyChars(str); hash.add(ref); @@ -223,7 +223,7 @@ public class TestBytesRefHash extends LuceneTestCase { for (int i = 0; i < 797; i++) { String str; do { - str = _TestUtil.randomRealisticUnicodeString(random, 1000); + str = _TestUtil.randomRealisticUnicodeString(getRandom(), 1000); } while (str.length() == 0); ref.copyChars(str); int count = hash.size(); @@ -251,9 +251,9 @@ public class TestBytesRefHash extends LuceneTestCase { @Test(expected = MaxBytesLengthExceededException.class) public void testLargeValue() { - int[] sizes = new int[] { random.nextInt(5), - ByteBlockPool.BYTE_BLOCK_SIZE - 33 + random.nextInt(31), - ByteBlockPool.BYTE_BLOCK_SIZE - 1 + random.nextInt(37) }; + int[] sizes = new int[] { getRandom().nextInt(5), + ByteBlockPool.BYTE_BLOCK_SIZE - 33 + getRandom().nextInt(31), + ByteBlockPool.BYTE_BLOCK_SIZE - 1 + getRandom().nextInt(37) }; BytesRef ref = new BytesRef(); for (int i = 0; i < sizes.length; i++) { ref.bytes = new byte[sizes[i]]; @@ -286,7 +286,7 @@ public class TestBytesRefHash extends LuceneTestCase { for (int i = 0; i < 797; i++) { String str; do { - str = _TestUtil.randomRealisticUnicodeString(random, 1000); + str = _TestUtil.randomRealisticUnicodeString(getRandom(), 1000); } while (str.length() == 0); ref.copyChars(str); int count = hash.size(); diff --git lucene/core/src/test/org/apache/lucene/util/TestCharsRef.java lucene/core/src/test/org/apache/lucene/util/TestCharsRef.java index f6d25ce..18e46eb 100644 --- lucene/core/src/test/org/apache/lucene/util/TestCharsRef.java +++ lucene/core/src/test/org/apache/lucene/util/TestCharsRef.java @@ -26,7 +26,7 @@ public class TestCharsRef extends LuceneTestCase { CharsRef utf16[] = new CharsRef[numStrings]; for (int i = 0; i < numStrings; i++) { - String s = _TestUtil.randomUnicodeString(random); + String s = _TestUtil.randomUnicodeString(getRandom()); utf8[i] = new BytesRef(s); utf16[i] = new CharsRef(s); } @@ -44,8 +44,8 @@ public class TestCharsRef extends LuceneTestCase { StringBuilder builder = new StringBuilder(); int numStrings = atLeast(10); for (int i = 0; i < numStrings; i++) { - char[] charArray = _TestUtil.randomRealisticUnicodeString(random, 1, 100).toCharArray(); - int offset = random.nextInt(charArray.length); + char[] charArray = _TestUtil.randomRealisticUnicodeString(getRandom(), 1, 100).toCharArray(); + int offset = getRandom().nextInt(charArray.length); int length = charArray.length - offset; builder.append(charArray, offset, length); ref.append(charArray, offset, length); @@ -58,8 +58,8 @@ public class TestCharsRef extends LuceneTestCase { int numIters = atLeast(10); for (int i = 0; i < numIters; i++) { CharsRef ref = new CharsRef(); - char[] charArray = _TestUtil.randomRealisticUnicodeString(random, 1, 100).toCharArray(); - int offset = random.nextInt(charArray.length); + char[] charArray = _TestUtil.randomRealisticUnicodeString(getRandom(), 1, 100).toCharArray(); + int offset = getRandom().nextInt(charArray.length); int length = charArray.length - offset; String str = new String(charArray, offset, length); ref.copyChars(charArray, offset, length); diff --git lucene/core/src/test/org/apache/lucene/util/TestCollectionUtil.java lucene/core/src/test/org/apache/lucene/util/TestCollectionUtil.java index fb9235c..031f6b6 100644 --- lucene/core/src/test/org/apache/lucene/util/TestCollectionUtil.java +++ lucene/core/src/test/org/apache/lucene/util/TestCollectionUtil.java @@ -26,9 +26,9 @@ import java.util.List; public class TestCollectionUtil extends LuceneTestCase { private List createRandomList(int maxSize) { - final Integer[] a = new Integer[random.nextInt(maxSize) + 1]; + final Integer[] a = new Integer[getRandom().nextInt(maxSize) + 1]; for (int i = 0; i < a.length; i++) { - a[i] = Integer.valueOf(random.nextInt(a.length)); + a[i] = Integer.valueOf(getRandom().nextInt(a.length)); } return Arrays.asList(a); } diff --git lucene/core/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java lucene/core/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java index 033427a..4ca1881 100644 --- lucene/core/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java +++ lucene/core/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java @@ -46,8 +46,8 @@ public class TestFieldCacheSanityChecker extends LuceneTestCase { dirA = newDirectory(); dirB = newDirectory(); - IndexWriter wA = new IndexWriter(dirA, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); - IndexWriter wB = new IndexWriter(dirB, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + IndexWriter wA = new IndexWriter(dirA, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); + IndexWriter wB = new IndexWriter(dirB, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); long theLong = Long.MAX_VALUE; double theDouble = Double.MAX_VALUE; diff --git lucene/core/src/test/org/apache/lucene/util/TestFixedBitSet.java lucene/core/src/test/org/apache/lucene/util/TestFixedBitSet.java index 3362694..8e4f50d 100644 --- lucene/core/src/test/org/apache/lucene/util/TestFixedBitSet.java +++ lucene/core/src/test/org/apache/lucene/util/TestFixedBitSet.java @@ -43,7 +43,7 @@ public class TestFixedBitSet extends LuceneTestCase { } void doPrevSetBit(BitSet a, FixedBitSet b) { - int aa = a.size() + random.nextInt(100); + int aa = a.size() + getRandom().nextInt(100); int bb = aa; do { // aa = a.prevSetBit(aa-1); @@ -75,7 +75,7 @@ public class TestFixedBitSet extends LuceneTestCase { DocIdSetIterator iterator = b.iterator(); do { aa = a.nextSetBit(aa+1); - bb = (bb < b.length() && random.nextBoolean()) ? iterator.nextDoc() : iterator.advance(bb + 1); + bb = (bb < b.length() && getRandom().nextBoolean()) ? iterator.nextDoc() : iterator.advance(bb + 1); assertEquals(aa == -1 ? DocIdSetIterator.NO_MORE_DOCS : aa, bb); } while (aa>=0); } @@ -85,7 +85,7 @@ public class TestFixedBitSet extends LuceneTestCase { DocIdSetIterator iterator = b.iterator(); do { aa = a.nextSetBit(aa+1); - bb = random.nextBoolean() ? iterator.nextDoc() : iterator.advance(bb + 1); + bb = getRandom().nextBoolean() ? iterator.nextDoc() : iterator.advance(bb + 1); assertEquals(aa == -1 ? DocIdSetIterator.NO_MORE_DOCS : aa, bb); } while (aa>=0); } @@ -95,29 +95,29 @@ public class TestFixedBitSet extends LuceneTestCase { FixedBitSet b0=null; for (int i=0; i0) { - int nOper = random.nextInt(sz); + int nOper = getRandom().nextInt(sz); for (int j=0; j=0); } @@ -113,7 +113,7 @@ public class TestOpenBitSet extends LuceneTestCase { OpenBitSetIterator iterator = new OpenBitSetIterator(b); do { aa = a.nextSetBit(aa+1); - bb = random.nextBoolean() ? iterator.nextDoc() : iterator.advance(bb + 1); + bb = getRandom().nextBoolean() ? iterator.nextDoc() : iterator.advance(bb + 1); assertEquals(aa == -1 ? DocIdSetIterator.NO_MORE_DOCS : aa, bb); } while (aa>=0); } @@ -123,33 +123,33 @@ public class TestOpenBitSet extends LuceneTestCase { OpenBitSet b0=null; for (int i=0; i0) { - int nOper = random.nextInt(sz); + int nOper = getRandom().nextInt(sz); for (int j=0; j>1)+1); + fromIndex = getRandom().nextInt(sz+80); + toIndex = fromIndex + getRandom().nextInt((sz>>1)+1); BitSet aa = (BitSet)a.clone(); aa.flip(fromIndex,toIndex); OpenBitSet bb = (OpenBitSet)b.clone(); bb.flip(fromIndex,toIndex); doIterate(aa,bb, mode); // a problem here is from flip or doIterate - fromIndex = random.nextInt(sz+80); - toIndex = fromIndex + random.nextInt((sz>>1)+1); + fromIndex = getRandom().nextInt(sz+80); + toIndex = fromIndex + getRandom().nextInt((sz>>1)+1); aa = (BitSet)a.clone(); aa.clear(fromIndex,toIndex); bb = (OpenBitSet)b.clone(); bb.clear(fromIndex,toIndex); @@ -198,8 +198,8 @@ public class TestOpenBitSet extends LuceneTestCase { doPrevSetBit(aa,bb); doPrevSetBitLong(aa,bb); - fromIndex = random.nextInt(sz+80); - toIndex = fromIndex + random.nextInt((sz>>1)+1); + fromIndex = getRandom().nextInt(sz+80); + toIndex = fromIndex + getRandom().nextInt((sz>>1)+1); aa = (BitSet)a.clone(); aa.set(fromIndex,toIndex); bb = (OpenBitSet)b.clone(); bb.set(fromIndex,toIndex); diff --git lucene/core/src/test/org/apache/lucene/util/TestPagedBytes.java lucene/core/src/test/org/apache/lucene/util/TestPagedBytes.java index 73e84bd..bd38fb4 100644 --- lucene/core/src/test/org/apache/lucene/util/TestPagedBytes.java +++ lucene/core/src/test/org/apache/lucene/util/TestPagedBytes.java @@ -26,34 +26,34 @@ public class TestPagedBytes extends LuceneTestCase { public void testDataInputOutput() throws Exception { for(int iter=0;iter<5*RANDOM_MULTIPLIER;iter++) { - final PagedBytes p = new PagedBytes(_TestUtil.nextInt(random, 1, 20)); + final PagedBytes p = new PagedBytes(_TestUtil.nextInt(getRandom(), 1, 20)); final DataOutput out = p.getDataOutput(); - final int numBytes = random.nextInt(10000000); + final int numBytes = getRandom().nextInt(10000000); final byte[] answer = new byte[numBytes]; - random.nextBytes(answer); + getRandom().nextBytes(answer); int written = 0; while(written < numBytes) { - if (random.nextInt(10) == 7) { + if (getRandom().nextInt(10) == 7) { out.writeByte(answer[written++]); } else { - int chunk = Math.min(random.nextInt(1000), numBytes - written); + int chunk = Math.min(getRandom().nextInt(1000), numBytes - written); out.writeBytes(answer, written, chunk); written += chunk; } } - p.freeze(random.nextBoolean()); + p.freeze(getRandom().nextBoolean()); final DataInput in = p.getDataInput(); final byte[] verify = new byte[numBytes]; int read = 0; while(read < numBytes) { - if (random.nextInt(10) == 7) { + if (getRandom().nextInt(10) == 7) { verify[read++] = in.readByte(); } else { - int chunk = Math.min(random.nextInt(1000), numBytes - read); + int chunk = Math.min(getRandom().nextInt(1000), numBytes - read); in.readBytes(verify, read, chunk); read += chunk; } diff --git lucene/core/src/test/org/apache/lucene/util/TestPriorityQueue.java lucene/core/src/test/org/apache/lucene/util/TestPriorityQueue.java index d36e1d1..eb59441 100644 --- lucene/core/src/test/org/apache/lucene/util/TestPriorityQueue.java +++ lucene/core/src/test/org/apache/lucene/util/TestPriorityQueue.java @@ -33,7 +33,7 @@ public class TestPriorityQueue extends LuceneTestCase { } public void testPQ() throws Exception { - testPQ(atLeast(10000), random); + testPQ(atLeast(10000), getRandom()); } public static void testPQ(int count, Random gen) { diff --git lucene/core/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java lucene/core/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java index 2425cf1..e890f55 100644 --- lucene/core/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java +++ lucene/core/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java @@ -38,8 +38,8 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase { } private RecyclingByteBlockAllocator newAllocator() { - return new RecyclingByteBlockAllocator(1 << (2 + random.nextInt(15)), - random.nextInt(97), new AtomicLong()); + return new RecyclingByteBlockAllocator(1 << (2 + getRandom().nextInt(15)), + getRandom().nextInt(97), new AtomicLong()); } @Test @@ -74,7 +74,7 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase { int numIters = atLeast(97); for (int i = 0; i < numIters; i++) { - int num = 1 + random.nextInt(39); + int num = 1 + getRandom().nextInt(39); for (int j = 0; j < num; j++) { block = allocator.getByteBlock(); assertNotNull(block); @@ -84,8 +84,8 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase { .bytesUsed()); } byte[][] array = allocated.toArray(new byte[0][]); - int begin = random.nextInt(array.length); - int end = begin + random.nextInt(array.length - begin); + int begin = getRandom().nextInt(array.length); + int end = begin + getRandom().nextInt(array.length - begin); List selected = new ArrayList(); for (int j = begin; j < end; j++) { selected.add(array[j]); @@ -111,7 +111,7 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase { int numIters = atLeast(97); for (int i = 0; i < numIters; i++) { - int num = 1 + random.nextInt(39); + int num = 1 + getRandom().nextInt(39); for (int j = 0; j < num; j++) { block = allocator.getByteBlock(); freeButAllocated = Math.max(0, freeButAllocated - 1); @@ -123,8 +123,8 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase { } byte[][] array = allocated.toArray(new byte[0][]); - int begin = random.nextInt(array.length); - int end = begin + random.nextInt(array.length - begin); + int begin = getRandom().nextInt(array.length); + int end = begin + getRandom().nextInt(array.length - begin); for (int j = begin; j < end; j++) { byte[] b = array[j]; assertTrue(allocated.remove(b)); @@ -135,7 +135,7 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase { } // randomly free blocks int numFreeBlocks = allocator.numBufferedBlocks(); - int freeBlocks = allocator.freeBlocks(random.nextInt(7 + allocator + int freeBlocks = allocator.freeBlocks(getRandom().nextInt(7 + allocator .maxBufferedBlocks())); assertEquals(allocator.numBufferedBlocks(), numFreeBlocks - freeBlocks); } diff --git lucene/core/src/test/org/apache/lucene/util/TestSentinelIntSet.java lucene/core/src/test/org/apache/lucene/util/TestSentinelIntSet.java index 4651134..d44d3d7 100644 --- lucene/core/src/test/org/apache/lucene/util/TestSentinelIntSet.java +++ lucene/core/src/test/org/apache/lucene/util/TestSentinelIntSet.java @@ -52,15 +52,15 @@ public class TestSentinelIntSet extends LuceneTestCase { @Test public void testRandom() throws Exception { for (int i=0; i<10000; i++) { - int initSz = random.nextInt(20); - int num = random.nextInt(30); - int maxVal = (random.nextBoolean() ? random.nextInt(50) : random.nextInt(Integer.MAX_VALUE)) + 1; + int initSz = getRandom().nextInt(20); + int num = getRandom().nextInt(30); + int maxVal = (getRandom().nextBoolean() ? getRandom().nextInt(50) : getRandom().nextInt(Integer.MAX_VALUE)) + 1; HashSet a = new HashSet(initSz); SentinelIntSet b = new SentinelIntSet(initSz, -1); for (int j=0; j set = new SetOnce(); SetOnceThread[] threads = new SetOnceThread[10]; for (int i = 0; i < threads.length; i++) { - threads[i] = new SetOnceThread(random); + threads[i] = new SetOnceThread(getRandom()); threads[i].setName("t-" + (i+1)); threads[i].set = set; } diff --git lucene/core/src/test/org/apache/lucene/util/TestSmallFloat.java lucene/core/src/test/org/apache/lucene/util/TestSmallFloat.java index 2ee03c6..eb87a9e 100644 --- lucene/core/src/test/org/apache/lucene/util/TestSmallFloat.java +++ lucene/core/src/test/org/apache/lucene/util/TestSmallFloat.java @@ -114,7 +114,7 @@ public class TestSmallFloat extends LuceneTestCase { // up iterations for more exhaustive test after changing something int num = atLeast(100000); for (int i = 0; i < num; i++) { - float f = Float.intBitsToFloat(random.nextInt()); + float f = Float.intBitsToFloat(getRandom().nextInt()); if (Float.isNaN(f)) continue; // skip NaN byte b1 = orig_floatToByte(f); byte b2 = SmallFloat.floatToByte(f,3,15); diff --git lucene/core/src/test/org/apache/lucene/util/TestTwoPhaseCommitTool.java lucene/core/src/test/org/apache/lucene/util/TestTwoPhaseCommitTool.java index ddbb540..3618934 100644 --- lucene/core/src/test/org/apache/lucene/util/TestTwoPhaseCommitTool.java +++ lucene/core/src/test/org/apache/lucene/util/TestTwoPhaseCommitTool.java @@ -91,13 +91,13 @@ public class TestTwoPhaseCommitTool extends LuceneTestCase { public void testRollback() throws Exception { // tests that rollback is called if failure occurs at any stage - int numObjects = random.nextInt(8) + 3; // between [3, 10] + int numObjects = getRandom().nextInt(8) + 3; // between [3, 10] TwoPhaseCommitImpl[] objects = new TwoPhaseCommitImpl[numObjects]; for (int i = 0; i < objects.length; i++) { - boolean failOnPrepare = random.nextBoolean(); + boolean failOnPrepare = getRandom().nextBoolean(); // we should not hit failures on commit usually - boolean failOnCommit = random.nextDouble() < 0.05; - boolean railOnRollback = random.nextBoolean(); + boolean failOnCommit = getRandom().nextDouble() < 0.05; + boolean railOnRollback = getRandom().nextBoolean(); objects[i] = new TwoPhaseCommitImpl(failOnPrepare, failOnCommit, railOnRollback); } @@ -138,11 +138,11 @@ public class TestTwoPhaseCommitTool extends LuceneTestCase { } public void testNullTPCs() throws Exception { - int numObjects = random.nextInt(4) + 3; // between [3, 6] + int numObjects = getRandom().nextInt(4) + 3; // between [3, 6] TwoPhaseCommit[] tpcs = new TwoPhaseCommit[numObjects]; boolean setNull = false; for (int i = 0; i < tpcs.length; i++) { - boolean isNull = random.nextDouble() < 0.3; + boolean isNull = getRandom().nextDouble() < 0.3; if (isNull) { setNull = true; tpcs[i] = null; @@ -153,7 +153,7 @@ public class TestTwoPhaseCommitTool extends LuceneTestCase { if (!setNull) { // none of the TPCs were picked to be null, pick one at random - int idx = random.nextInt(numObjects); + int idx = getRandom().nextInt(numObjects); tpcs[idx] = null; } diff --git lucene/core/src/test/org/apache/lucene/util/TestUnicodeUtil.java lucene/core/src/test/org/apache/lucene/util/TestUnicodeUtil.java index 32d39c0..838212b 100644 --- lucene/core/src/test/org/apache/lucene/util/TestUnicodeUtil.java +++ lucene/core/src/test/org/apache/lucene/util/TestUnicodeUtil.java @@ -90,7 +90,7 @@ public class TestUnicodeUtil extends LuceneTestCase { BytesRef utf8 = new BytesRef(20); int num = atLeast(50000); for (int i = 0; i < num; i++) { - final String s = _TestUtil.randomUnicodeString(random); + final String s = _TestUtil.randomUnicodeString(getRandom()); UnicodeUtil.UTF16toUTF8(s, 0, s.length(), utf8); assertEquals(s.codePointCount(0, s.length()), UnicodeUtil.codePointCount(utf8)); @@ -103,7 +103,7 @@ public class TestUnicodeUtil extends LuceneTestCase { int[] codePoints = new int[20]; int num = atLeast(50000); for (int i = 0; i < num; i++) { - final String s = _TestUtil.randomUnicodeString(random); + final String s = _TestUtil.randomUnicodeString(getRandom()); UnicodeUtil.UTF16toUTF8(s, 0, s.length(), utf8); UnicodeUtil.UTF8toUTF32(utf8, utf32); @@ -170,11 +170,11 @@ public class TestUnicodeUtil extends LuceneTestCase { public void testUTF8UTF16CharsRef() { int num = atLeast(3989); for (int i = 0; i < num; i++) { - String unicode = _TestUtil.randomRealisticUnicodeString(random); + String unicode = _TestUtil.randomRealisticUnicodeString(getRandom()); BytesRef ref = new BytesRef(unicode); - char[] arr = new char[1 + random.nextInt(100)]; - int offset = random.nextInt(arr.length); - int len = random.nextInt(arr.length - offset); + char[] arr = new char[1 + getRandom().nextInt(100)]; + int offset = getRandom().nextInt(arr.length); + int len = getRandom().nextInt(arr.length - offset); CharsRef cRef = new CharsRef(arr, offset, len); UnicodeUtil.UTF8toUTF16(ref, cRef); assertEquals(cRef.toString(), unicode); diff --git lucene/core/src/test/org/apache/lucene/util/TestWeakIdentityMap.java lucene/core/src/test/org/apache/lucene/util/TestWeakIdentityMap.java index 908e918..47af3a3 100644 --- lucene/core/src/test/org/apache/lucene/util/TestWeakIdentityMap.java +++ lucene/core/src/test/org/apache/lucene/util/TestWeakIdentityMap.java @@ -127,7 +127,7 @@ public class TestWeakIdentityMap extends LuceneTestCase { try { for (int t = 0; t < threadCount; t++) { - final Random rnd = new Random(random.nextLong()); + final Random rnd = new Random(getRandom().nextLong()); exec.execute(new Runnable() { public void run() { final int count = atLeast(rnd, 10000); diff --git lucene/core/src/test/org/apache/lucene/util/automaton/TestBasicOperations.java lucene/core/src/test/org/apache/lucene/util/automaton/TestBasicOperations.java index 87c4efb..9bb1181 100644 --- lucene/core/src/test/org/apache/lucene/util/automaton/TestBasicOperations.java +++ lucene/core/src/test/org/apache/lucene/util/automaton/TestBasicOperations.java @@ -94,7 +94,7 @@ public class TestBasicOperations extends LuceneTestCase { final int ITER2 = atLeast(100); for(int i=0;i(term, NO_OUTPUT)); } - FST fst = new FSTTester(random, dir, inputMode, pairs, outputs, false).doTest(0, 0, false); + FST fst = new FSTTester(getRandom(), dir, inputMode, pairs, outputs, false).doTest(0, 0, false); assertNotNull(fst); assertEquals(22, fst.getNodeCount()); assertEquals(27, fst.getArcCount()); @@ -175,7 +175,7 @@ public class TestFSTs extends LuceneTestCase { for(int idx=0;idx(terms2[idx], (long) idx)); } - final FST fst = new FSTTester(random, dir, inputMode, pairs, outputs, true).doTest(0, 0, false); + final FST fst = new FSTTester(getRandom(), dir, inputMode, pairs, outputs, true).doTest(0, 0, false); assertNotNull(fst); assertEquals(22, fst.getNodeCount()); assertEquals(27, fst.getArcCount()); @@ -187,10 +187,10 @@ public class TestFSTs extends LuceneTestCase { final BytesRef NO_OUTPUT = outputs.getNoOutput(); final List> pairs = new ArrayList>(terms2.length); for(int idx=0;idx(terms2[idx], output)); } - final FST fst = new FSTTester(random, dir, inputMode, pairs, outputs, false).doTest(0, 0, false); + final FST fst = new FSTTester(getRandom(), dir, inputMode, pairs, outputs, false).doTest(0, 0, false); assertNotNull(fst); assertEquals(24, fst.getNodeCount()); assertEquals(30, fst.getArcCount()); @@ -223,7 +223,7 @@ public class TestFSTs extends LuceneTestCase { for(IntsRef term : terms) { pairs.add(new FSTTester.InputOutput(term, NO_OUTPUT)); } - new FSTTester(random, dir, inputMode, pairs, outputs, false).doTest(); + new FSTTester(getRandom(), dir, inputMode, pairs, outputs, false).doTest(); } // PositiveIntOutput (ord) @@ -233,47 +233,47 @@ public class TestFSTs extends LuceneTestCase { for(int idx=0;idx(terms[idx], (long) idx)); } - new FSTTester(random, dir, inputMode, pairs, outputs, true).doTest(); + new FSTTester(getRandom(), dir, inputMode, pairs, outputs, true).doTest(); } // PositiveIntOutput (random monotonically increasing positive number) { - final boolean doShare = random.nextBoolean(); + final boolean doShare = getRandom().nextBoolean(); final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(doShare); final List> pairs = new ArrayList>(terms.length); long lastOutput = 0; for(int idx=0;idx(terms[idx], value)); } - new FSTTester(random, dir, inputMode, pairs, outputs, doShare).doTest(); + new FSTTester(getRandom(), dir, inputMode, pairs, outputs, doShare).doTest(); } // PositiveIntOutput (random positive number) { - final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(random.nextBoolean()); + final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(getRandom().nextBoolean()); final List> pairs = new ArrayList>(terms.length); for(int idx=0;idx(terms[idx], random.nextLong() & Long.MAX_VALUE)); + pairs.add(new FSTTester.InputOutput(terms[idx], getRandom().nextLong() & Long.MAX_VALUE)); } - new FSTTester(random, dir, inputMode, pairs, outputs, false).doTest(); + new FSTTester(getRandom(), dir, inputMode, pairs, outputs, false).doTest(); } // Pair { - final PositiveIntOutputs o1 = PositiveIntOutputs.getSingleton(random.nextBoolean()); - final PositiveIntOutputs o2 = PositiveIntOutputs.getSingleton(random.nextBoolean()); + final PositiveIntOutputs o1 = PositiveIntOutputs.getSingleton(getRandom().nextBoolean()); + final PositiveIntOutputs o2 = PositiveIntOutputs.getSingleton(getRandom().nextBoolean()); final PairOutputs outputs = new PairOutputs(o1, o2); final List>> pairs = new ArrayList>>(terms.length); long lastOutput = 0; for(int idx=0;idx>(terms[idx], outputs.newPair((long) idx, value))); } - new FSTTester>(random, dir, inputMode, pairs, outputs, false).doTest(); + new FSTTester>(getRandom(), dir, inputMode, pairs, outputs, false).doTest(); } // Sequence-of-bytes @@ -282,10 +282,10 @@ public class TestFSTs extends LuceneTestCase { final BytesRef NO_OUTPUT = outputs.getNoOutput(); final List> pairs = new ArrayList>(terms.length); for(int idx=0;idx(terms[idx], output)); } - new FSTTester(random, dir, inputMode, pairs, outputs, false).doTest(); + new FSTTester(getRandom(), dir, inputMode, pairs, outputs, false).doTest(); } // Sequence-of-ints @@ -301,7 +301,7 @@ public class TestFSTs extends LuceneTestCase { } pairs.add(new FSTTester.InputOutput(terms[idx], output)); } - new FSTTester(random, dir, inputMode, pairs, outputs, false).doTest(); + new FSTTester(getRandom(), dir, inputMode, pairs, outputs, false).doTest(); } // Up to two positive ints, shared, generally but not @@ -315,15 +315,15 @@ public class TestFSTs extends LuceneTestCase { long lastOutput = 0; for(int idx=0;idx(terms[idx], output)); } - new FSTTester(random, dir, inputMode, pairs, outputs, false).doTest(); + new FSTTester(getRandom(), dir, inputMode, pairs, outputs, false).doTest(); } } @@ -1037,7 +1037,7 @@ public class TestFSTs extends LuceneTestCase { System.out.println("\nTEST: iter " + iter); } for(int inputMode=0;inputMode<2;inputMode++) { - final int numWords = random.nextInt(maxNumWords+1); + final int numWords = getRandom().nextInt(maxNumWords+1); Set termsSet = new HashSet(); IntsRef[] terms = new IntsRef[numWords]; while(termsSet.size() < numWords) { @@ -1051,20 +1051,20 @@ public class TestFSTs extends LuceneTestCase { static String getRandomString() { final String term; - if (random.nextBoolean()) { - term = _TestUtil.randomRealisticUnicodeString(random); + if (getStaticRandom().nextBoolean()) { + term = _TestUtil.randomRealisticUnicodeString(getStaticRandom()); } else { // we want to mix in limited-alphabet symbols so // we get more sharing of the nodes given how few // terms we are testing... - term = simpleRandomString(random); + term = simpleRandomString(getStaticRandom()); } return term; } @Nightly public void testBigSet() throws IOException { - testRandomWords(_TestUtil.nextInt(random, 50000, 60000), 1); + testRandomWords(_TestUtil.nextInt(getRandom(), 50000, 60000), 1); } static String inputToString(int inputMode, IntsRef term) { @@ -1094,9 +1094,9 @@ public class TestFSTs extends LuceneTestCase { Codec.setDefault(_TestUtil.alwaysPostingsFormat(new Lucene40PostingsFormat())); } - final LineFileDocs docs = new LineFileDocs(random); + final LineFileDocs docs = new LineFileDocs(getRandom()); final int RUN_TIME_MSEC = atLeast(500); - final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(-1).setRAMBufferSizeMB(64); + final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMaxBufferedDocs(-1).setRAMBufferSizeMB(64); final File tempDir = _TestUtil.getTempDir("fstlines"); final MockDirectoryWrapper dir = newFSDirectory(tempDir); final IndexWriter writer = new IndexWriter(dir, conf); @@ -1109,13 +1109,13 @@ public class TestFSTs extends LuceneTestCase { } IndexReader r = IndexReader.open(writer, true); writer.close(); - final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(random.nextBoolean()); + final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(getRandom().nextBoolean()); - final boolean doRewrite = random.nextBoolean(); + final boolean doRewrite = getRandom().nextBoolean(); Builder builder = new Builder(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, null, doRewrite); - boolean storeOrd = random.nextBoolean(); + boolean storeOrd = getRandom().nextBoolean(); if (VERBOSE) { if (storeOrd) { System.out.println("FST stores ord"); @@ -1165,7 +1165,7 @@ public class TestFSTs extends LuceneTestCase { if (rewriteIter == 1) { if (doRewrite) { // Verify again, with packed FST: - fst = fst.pack(_TestUtil.nextInt(random, 1, 10), _TestUtil.nextInt(random, 0, 10000000)); + fst = fst.pack(_TestUtil.nextInt(getRandom(), 1, 10), _TestUtil.nextInt(getRandom(), 0, 10000000)); } else { break; } @@ -1601,8 +1601,8 @@ public class TestFSTs extends LuceneTestCase { if (VERBOSE) { System.out.println("TEST: cycle=" + cycle); } - RandomIndexWriter w = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(IndexWriterConfig.OpenMode.CREATE)); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(IndexWriterConfig.OpenMode.CREATE)); Document doc = new Document(); Field idField = newField("id", "", StringField.TYPE_UNSTORED); doc.add(idField); @@ -1620,7 +1620,7 @@ public class TestFSTs extends LuceneTestCase { idString = String.format("%07d", id); } else { while(true) { - final String s = Long.toString(random.nextLong()); + final String s = Long.toString(getRandom().nextLong()); if (!allIDs.contains(s)) { idString = s; break; @@ -1651,7 +1651,7 @@ public class TestFSTs extends LuceneTestCase { idString = String.format("%07d", (NUM_IDS + idx)); } else { while(true) { - idString = Long.toString(random.nextLong()); + idString = Long.toString(getRandom().nextLong()); if (!allIDs.contains(idString)) { break; } @@ -1663,7 +1663,7 @@ public class TestFSTs extends LuceneTestCase { // Verify w/ TermQuery for(int iter=0;iter<2*NUM_IDS;iter++) { - final String id = allIDsList.get(random.nextInt(allIDsList.size())); + final String id = allIDsList.get(getRandom().nextInt(allIDsList.size())); final boolean exists = !outOfBounds.contains(id); if (VERBOSE) { System.out.println("TEST: TermQuery " + (exists ? "" : "non-exist ") + " id=" + id); @@ -1678,8 +1678,8 @@ public class TestFSTs extends LuceneTestCase { final String nextID; final boolean exists; - if (random.nextBoolean()) { - id = allIDsList.get(random.nextInt(allIDsList.size())); + if (getRandom().nextBoolean()) { + id = allIDsList.get(getRandom().nextInt(allIDsList.size())); exists = !outOfBounds.contains(id); nextID = null; if (VERBOSE) { @@ -1688,7 +1688,7 @@ public class TestFSTs extends LuceneTestCase { } else { // Pick ID between two IDs: exists = false; - final int idv = random.nextInt(NUM_IDS-1); + final int idv = getRandom().nextInt(NUM_IDS-1); if (cycle == 0) { id = String.format("%07da", idv); nextID = String.format("%07d", idv+1); @@ -1701,7 +1701,7 @@ public class TestFSTs extends LuceneTestCase { } } - final boolean useCache = random.nextBoolean(); + final boolean useCache = getRandom().nextBoolean(); if (VERBOSE) { System.out.println(" useCache=" + useCache); } @@ -1736,20 +1736,20 @@ public class TestFSTs extends LuceneTestCase { public void testRandomTermLookup() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(IndexWriterConfig.OpenMode.CREATE)); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(IndexWriterConfig.OpenMode.CREATE)); Document doc = new Document(); Field f = newField("field", "", StringField.TYPE_UNSTORED); doc.add(f); - final int NUM_TERMS = (int) (1000*RANDOM_MULTIPLIER * (1+random.nextDouble())); + final int NUM_TERMS = (int) (1000*RANDOM_MULTIPLIER * (1+getRandom().nextDouble())); if (VERBOSE) { System.out.println("TEST: NUM_TERMS=" + NUM_TERMS); } final Set allTerms = new HashSet(); while(allTerms.size() < NUM_TERMS) { - allTerms.add(simpleRandomString(random)); + allTerms.add(simpleRandomString(getRandom())); } for(String term : allTerms) { @@ -1769,7 +1769,7 @@ public class TestFSTs extends LuceneTestCase { w.close(); final List allTermsList = new ArrayList(allTerms); - Collections.shuffle(allTermsList, random); + Collections.shuffle(allTermsList, getRandom()); // verify exact lookup for(String term : allTermsList) { @@ -1869,7 +1869,7 @@ public class TestFSTs extends LuceneTestCase { public void testFinalOutputOnEndState() throws Exception { final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true); - final Builder builder = new Builder(FST.INPUT_TYPE.BYTE4, 2, 0, true, true, Integer.MAX_VALUE, outputs, null, random.nextBoolean()); + final Builder builder = new Builder(FST.INPUT_TYPE.BYTE4, 2, 0, true, true, Integer.MAX_VALUE, outputs, null, getRandom().nextBoolean()); builder.add(Util.toUTF32("stat", new IntsRef()), 17L); builder.add(Util.toUTF32("station", new IntsRef()), 10L); final FST fst = builder.finish(); @@ -1883,7 +1883,7 @@ public class TestFSTs extends LuceneTestCase { public void testInternalFinalState() throws Exception { final PositiveIntOutputs outputs = PositiveIntOutputs.getSingleton(true); - final boolean willRewrite = random.nextBoolean(); + final boolean willRewrite = getRandom().nextBoolean(); final Builder builder = new Builder(FST.INPUT_TYPE.BYTE1, 0, 0, true, true, Integer.MAX_VALUE, outputs, null, willRewrite); builder.add(Util.toIntsRef(new BytesRef("stat"), new IntsRef()), outputs.getNoOutput()); builder.add(Util.toIntsRef(new BytesRef("station"), new IntsRef()), outputs.getNoOutput()); @@ -2005,6 +2005,7 @@ public class TestFSTs extends LuceneTestCase { } public void testShortestPathsRandom() throws Exception { + final Random random = getRandom(); int numWords = atLeast(1000); final TreeMap slowCompletor = new TreeMap(); diff --git lucene/core/src/test/org/apache/lucene/util/junitcompat/TestReproduceMessage.java lucene/core/src/test/org/apache/lucene/util/junitcompat/TestReproduceMessage.java index b7b93b8..193d667 100644 --- lucene/core/src/test/org/apache/lucene/util/junitcompat/TestReproduceMessage.java +++ lucene/core/src/test/org/apache/lucene/util/junitcompat/TestReproduceMessage.java @@ -106,49 +106,49 @@ public class TestReproduceMessage extends WithNestedTests { public void testAssumeBeforeClass() throws Exception { type = SoreType.ASSUMPTION; where = SorePoint.BEFORE_CLASS; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: Assume failed in")); + Assert.assertTrue(runAndReturnSyserr().isEmpty()); } @Test @Ignore public void testAssumeInitializer() throws Exception { type = SoreType.ASSUMPTION; where = SorePoint.INITIALIZER; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: Assume failed in")); + Assert.assertTrue(runAndReturnSyserr().isEmpty()); } @Test public void testAssumeRule() throws Exception { type = SoreType.ASSUMPTION; where = SorePoint.RULE; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: Assume failed in")); + Assert.assertTrue(runAndReturnSyserr().isEmpty()); } @Test public void testAssumeBefore() throws Exception { type = SoreType.ASSUMPTION; where = SorePoint.BEFORE; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: Assume failed in")); + Assert.assertTrue(runAndReturnSyserr().isEmpty()); } @Test public void testAssumeTest() throws Exception { type = SoreType.ASSUMPTION; where = SorePoint.TEST; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: Assume failed in")); + Assert.assertTrue(runAndReturnSyserr().isEmpty()); } @Test public void testAssumeAfter() throws Exception { type = SoreType.ASSUMPTION; where = SorePoint.AFTER; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: Assume failed in")); + Assert.assertTrue(runAndReturnSyserr().isEmpty()); } @Test @Ignore public void testAssumeAfterClass() throws Exception { type = SoreType.ASSUMPTION; where = SorePoint.AFTER_CLASS; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: Assume failed in")); + Assert.assertTrue(runAndReturnSyserr().isEmpty()); } /* @@ -173,32 +173,41 @@ public class TestReproduceMessage extends WithNestedTests { public void testFailureRule() throws Exception { type = SoreType.FAILURE; where = SorePoint.RULE; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); - Assert.assertTrue(Arrays.asList(runAndReturnSyserr().split("\\s")).contains("-Dtestmethod=test")); + + final String syserr = runAndReturnSyserr(); + Assert.assertTrue(syserr.contains("NOTE: reproduce with:")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.filter.method=test")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.filter=*." + Nested.class.getSimpleName())); } @Test public void testFailureBefore() throws Exception { type = SoreType.FAILURE; where = SorePoint.BEFORE; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); - Assert.assertTrue(Arrays.asList(runAndReturnSyserr().split("\\s")).contains("-Dtestmethod=test")); + final String syserr = runAndReturnSyserr(); + Assert.assertTrue(syserr.contains("NOTE: reproduce with:")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.filter.method=test")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.filter=*." + Nested.class.getSimpleName())); } @Test public void testFailureTest() throws Exception { type = SoreType.FAILURE; where = SorePoint.TEST; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); - Assert.assertTrue(Arrays.asList(runAndReturnSyserr().split("\\s")).contains("-Dtestmethod=test")); + final String syserr = runAndReturnSyserr(); + Assert.assertTrue(syserr.contains("NOTE: reproduce with:")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.filter.method=test")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.filter=*." + Nested.class.getSimpleName())); } @Test public void testFailureAfter() throws Exception { type = SoreType.FAILURE; where = SorePoint.AFTER; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); - Assert.assertTrue(Arrays.asList(runAndReturnSyserr().split("\\s")).contains("-Dtestmethod=test")); + final String syserr = runAndReturnSyserr(); + Assert.assertTrue(syserr.contains("NOTE: reproduce with:")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.filter.method=test")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.filter=*." + Nested.class.getSimpleName())); } @Test @Ignore @@ -230,32 +239,40 @@ public class TestReproduceMessage extends WithNestedTests { public void testErrorRule() throws Exception { type = SoreType.ERROR; where = SorePoint.RULE; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); - Assert.assertTrue(Arrays.asList(runAndReturnSyserr().split("\\s")).contains("-Dtestmethod=test")); + final String syserr = runAndReturnSyserr(); + Assert.assertTrue(syserr.contains("NOTE: reproduce with:")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.filter.method=test")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.filter=*." + Nested.class.getSimpleName())); } @Test public void testErrorBefore() throws Exception { type = SoreType.ERROR; where = SorePoint.BEFORE; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); - Assert.assertTrue(Arrays.asList(runAndReturnSyserr().split("\\s")).contains("-Dtestmethod=test")); + final String syserr = runAndReturnSyserr(); + Assert.assertTrue(syserr.contains("NOTE: reproduce with:")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.filter.method=test")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.filter=*." + Nested.class.getSimpleName())); } @Test public void testErrorTest() throws Exception { type = SoreType.ERROR; where = SorePoint.TEST; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); - Assert.assertTrue(Arrays.asList(runAndReturnSyserr().split("\\s")).contains("-Dtestmethod=test")); + final String syserr = runAndReturnSyserr(); + Assert.assertTrue(syserr.contains("NOTE: reproduce with:")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.filter.method=test")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.filter=*." + Nested.class.getSimpleName())); } @Test public void testErrorAfter() throws Exception { type = SoreType.ERROR; where = SorePoint.AFTER; - Assert.assertTrue(runAndReturnSyserr().contains("NOTE: reproduce with:")); - Assert.assertTrue(Arrays.asList(runAndReturnSyserr().split("\\s")).contains("-Dtestmethod=test")); + final String syserr = runAndReturnSyserr(); + Assert.assertTrue(syserr.contains("NOTE: reproduce with:")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.filter.method=test")); + Assert.assertTrue(Arrays.asList(syserr.split("\\s")).contains("-Dtests.filter=*." + Nested.class.getSimpleName())); } @Test @Ignore @@ -269,8 +286,8 @@ public class TestReproduceMessage extends WithNestedTests { JUnitCore.runClasses(Nested.class); String err = getSysErr(); - //super.prevSysErr.println("Type: " + type + ", point: " + where + " resulted in:\n" + err); - //super.prevSysErr.println("---"); + // super.prevSysErr.println("Type: " + type + ", point: " + where + " resulted in:\n" + err); + // super.prevSysErr.println("---"); return err; } } diff --git lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java index 2f7605d..1ce80fc 100644 --- lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java +++ lucene/core/src/test/org/apache/lucene/util/packed/TestPackedInts.java @@ -51,16 +51,16 @@ public class TestPackedInts extends LuceneTestCase { for (int iter = 0; iter < num; iter++) { long ceil = 2; for(int nbits=1;nbits<63;nbits++) { - final int valueCount = 100+random.nextInt(500); + final int valueCount = 100+getRandom().nextInt(500); final Directory d = newDirectory(); - IndexOutput out = d.createOutput("out.bin", newIOContext(random)); + IndexOutput out = d.createOutput("out.bin", newIOContext(getRandom())); PackedInts.Writer w = PackedInts.getWriter( out, valueCount, nbits); final long[] values = new long[valueCount]; for(int i=0;i map = new HashMap(); // create a map up front. @@ -267,7 +267,7 @@ public abstract class CollationTestBase extends LuceneTestCase { // and ensure they are the same as the ones we produced in serial fashion. for (int i = 0; i < numTestPoints; i++) { - String term = _TestUtil.randomSimpleString(random); + String term = _TestUtil.randomSimpleString(getRandom()); TokenStream ts = analyzer.tokenStream("fake", new StringReader(term)); TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); BytesRef bytes = termAtt.getBytesRef(); diff --git lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java index cc9e0bc..3d7dd67 100644 --- lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java +++ lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java @@ -138,16 +138,16 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas // Occasional longish pause if running // nightly - if (LuceneTestCase.TEST_NIGHTLY && random.nextInt(6) == 3) { + if (LuceneTestCase.TEST_NIGHTLY && getRandom().nextInt(6) == 3) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": now long sleep"); } - Thread.sleep(_TestUtil.nextInt(random, 50, 500)); + Thread.sleep(_TestUtil.nextInt(getRandom(), 50, 500)); } // Rate limit ingest rate: - if (random.nextInt(7) == 5) { - Thread.sleep(_TestUtil.nextInt(random, 1, 10)); + if (getRandom().nextInt(7) == 5) { + Thread.sleep(_TestUtil.nextInt(getRandom(), 1, 10)); if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": done sleep"); } @@ -160,21 +160,21 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas // Maybe add randomly named field final String addedField; - if (random.nextBoolean()) { - addedField = "extra" + random.nextInt(40); + if (getRandom().nextBoolean()) { + addedField = "extra" + getRandom().nextInt(40); doc.add(newField(addedField, "a random field", TextField.TYPE_STORED)); } else { addedField = null; } - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { // Add/update doc block: final String packID; final SubDocs delSubDocs; - if (toDeleteSubDocs.size() > 0 && random.nextBoolean()) { - delSubDocs = toDeleteSubDocs.get(random.nextInt(toDeleteSubDocs.size())); + if (toDeleteSubDocs.size() > 0 && getRandom().nextBoolean()) { + delSubDocs = toDeleteSubDocs.get(getRandom().nextInt(toDeleteSubDocs.size())); assert !delSubDocs.deleted; toDeleteSubDocs.remove(delSubDocs); // Update doc block, replacing prior packID @@ -195,7 +195,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas docsList.add(_TestUtil.cloneDocument(doc)); docIDs.add(doc.get("docid")); - final int maxDocCount = _TestUtil.nextInt(random, 1, 10); + final int maxDocCount = _TestUtil.nextInt(getRandom(), 1, 10); while(docsList.size() < maxDocCount) { doc = docs.nextDoc(); if (doc == null) { @@ -224,7 +224,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas } doc.removeField("packID"); - if (random.nextInt(5) == 2) { + if (getRandom().nextInt(5) == 2) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": buffer del id:" + packID); } @@ -240,7 +240,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas addDocument(new Term("docid", docid), doc); addCount.getAndIncrement(); - if (random.nextInt(5) == 3) { + if (getRandom().nextInt(5) == 3) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": buffer del id:" + doc.get("docid")); } @@ -259,7 +259,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas updateDocument(new Term("docid", docid), doc); addCount.getAndIncrement(); - if (random.nextInt(5) == 3) { + if (getRandom().nextInt(5) == 3) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": buffer del id:" + doc.get("docid")); } @@ -267,7 +267,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas } } - if (random.nextInt(30) == 17) { + if (getRandom().nextInt(30) == 17) { if (VERBOSE) { System.out.println(Thread.currentThread().getName() + ": apply " + toDeleteIDs.size() + " deletes"); } @@ -322,7 +322,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas } protected void runSearchThreads(final long stopTimeMS) throws Exception { - final int numThreads = _TestUtil.nextInt(random, 1, 5); + final int numThreads = _TestUtil.nextInt(getRandom(), 1, 5); final Thread[] searchThreads = new Thread[numThreads]; final AtomicInteger totHits = new AtomicInteger(); @@ -357,7 +357,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas trigger = 1; } else { trigger = totTermCount.get()/10; - shift = random.nextInt(trigger); + shift = getRandom().nextInt(trigger); } BytesRef term = termsEnum.next(); if (term == null) { @@ -372,7 +372,7 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas } else { trigger = totTermCount.get()/10; //System.out.println("trigger " + trigger); - shift = random.nextInt(trigger); + shift = getRandom().nextInt(trigger); } termsEnum.seekCeil(new BytesRef("")); continue; @@ -432,11 +432,11 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas final long t0 = System.currentTimeMillis(); - final LineFileDocs docs = new LineFileDocs(random); + final LineFileDocs docs = new LineFileDocs(getRandom()); final File tempDir = _TestUtil.getTempDir(testName); dir = newFSDirectory(tempDir); ((MockDirectoryWrapper) dir).setCheckIndexOnClose(false); // don't double-checkIndex, we do it ourselves. - final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)). + final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())). setInfoStream(new FailOnNonBulkMergesInfoStream()); if (LuceneTestCase.TEST_NIGHTLY) { @@ -482,11 +482,11 @@ public abstract class ThreadedIndexingAndSearchingTestCase extends LuceneTestCas writer = new IndexWriter(dir, conf); _TestUtil.reduceOpenFiles(writer); - final ExecutorService es = random.nextBoolean() ? null : Executors.newCachedThreadPool(new NamedThreadFactory(testName)); + final ExecutorService es = getRandom().nextBoolean() ? null : Executors.newCachedThreadPool(new NamedThreadFactory(testName)); doAfterWriter(es); - final int NUM_INDEX_THREADS = _TestUtil.nextInt(random, 2, 4); + final int NUM_INDEX_THREADS = _TestUtil.nextInt(getRandom(), 2, 4); final int RUN_TIME_SEC = LuceneTestCase.TEST_NIGHTLY ? 300 : RANDOM_MULTIPLIER; diff --git lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java index 0665760..582a0d8 100644 --- lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java +++ lucene/test-framework/src/java/org/apache/lucene/search/ShardSearchingTestBase.java @@ -518,29 +518,29 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { @Override public void run() { try { - final LineFileDocs docs = new LineFileDocs(random); + final LineFileDocs docs = new LineFileDocs(getRandom()); int numDocs = 0; while (System.nanoTime() < endTimeNanos) { - final int what = random.nextInt(3); - final NodeState node = nodes[random.nextInt(nodes.length)]; + final int what = getRandom().nextInt(3); + final NodeState node = nodes[getRandom().nextInt(nodes.length)]; if (numDocs == 0 || what == 0) { node.writer.addDocument(docs.nextDoc()); numDocs++; } else if (what == 1) { - node.writer.updateDocument(new Term("docid", ""+random.nextInt(numDocs)), + node.writer.updateDocument(new Term("docid", ""+getRandom().nextInt(numDocs)), docs.nextDoc()); numDocs++; } else { - node.writer.deleteDocuments(new Term("docid", ""+random.nextInt(numDocs))); + node.writer.deleteDocuments(new Term("docid", ""+getRandom().nextInt(numDocs))); } // TODO: doc blocks too - if (random.nextInt(17) == 12) { + if (getRandom().nextInt(17) == 12) { node.writer.commit(); } - if (random.nextInt(17) == 12) { - nodes[random.nextInt(nodes.length)].reopen(); + if (getRandom().nextInt(17) == 12) { + nodes[getRandom().nextInt(nodes.length)].reopen(); } } } catch (Throwable t) { @@ -563,7 +563,7 @@ public abstract class ShardSearchingTestBase extends LuceneTestCase { nodes = new NodeState[numNodes]; for(int nodeID=0;nodeID divisor) { - throw new BuildException("part must be <= divisor"); - } - } - - @Override - public boolean isSelected(File dir, String name, File path) { - counter = counter % divisor + 1; - return counter == part; - } -} diff --git lucene/test-framework/src/java/org/apache/lucene/util/LuceneJUnitResultFormatter.java lucene/test-framework/src/java/org/apache/lucene/util/LuceneJUnitResultFormatter.java deleted file mode 100644 index c67b9bc..0000000 --- lucene/test-framework/src/java/org/apache/lucene/util/LuceneJUnitResultFormatter.java +++ /dev/null @@ -1,294 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.lucene.util; - -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.IOException; -import java.io.OutputStream; -import java.text.NumberFormat; -import java.util.logging.LogManager; - -import junit.framework.AssertionFailedError; -import junit.framework.Test; - -import org.apache.lucene.store.LockReleaseFailedException; -import org.apache.lucene.store.NativeFSLockFactory; -import org.apache.tools.ant.taskdefs.optional.junit.JUnitResultFormatter; -import org.apache.tools.ant.taskdefs.optional.junit.JUnitTest; -import org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner; -import org.apache.tools.ant.util.FileUtils; -import org.apache.tools.ant.util.StringUtils; -import org.junit.Ignore; - -/** - * Just like BriefJUnitResultFormatter "brief" bundled with ant, - * except all formatted text is buffered until the test suite is finished. - * At this point, the output is written at once in synchronized fashion. - * This way tests can run in parallel without interleaving output. - */ -public class LuceneJUnitResultFormatter implements JUnitResultFormatter { - private static final double ONE_SECOND = 1000.0; - - private static final NativeFSLockFactory lockFactory; - - /** Where to write the log to. */ - private OutputStream out; - - /** Formatter for timings. */ - private NumberFormat numberFormat = NumberFormat.getInstance(); - - /** Output suite has written to System.out */ - private String systemOutput = null; - - /** Output suite has written to System.err */ - private String systemError = null; - - /** Buffer output until the end of the test */ - private ByteArrayOutputStream sb; // use a BOS for our mostly ascii-output - - private static final org.apache.lucene.store.Lock lock; - - static { - File lockDir = new File( - System.getProperty("tests.lockdir", System.getProperty("java.io.tmpdir")), - "lucene_junit_lock"); - lockDir.mkdirs(); - if (!lockDir.exists()) { - throw new RuntimeException("Could not make Lock directory:" + lockDir); - } - try { - lockFactory = new NativeFSLockFactory(lockDir); - lock = lockFactory.makeLock("junit_lock"); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - /** Constructor for LuceneJUnitResultFormatter. */ - public LuceneJUnitResultFormatter() { - } - - /** - * Sets the stream the formatter is supposed to write its results to. - * @param out the output stream to write to - */ - public void setOutput(OutputStream out) { - this.out = out; - } - - /** - * @see JUnitResultFormatter#setSystemOutput(String) - */ - /** {@inheritDoc}. */ - public void setSystemOutput(String out) { - systemOutput = out; - } - - /** - * @see JUnitResultFormatter#setSystemError(String) - */ - /** {@inheritDoc}. */ - public void setSystemError(String err) { - systemError = err; - } - - - /** - * The whole testsuite started. - * @param suite the test suite - */ - public synchronized void startTestSuite(JUnitTest suite) { - if (out == null) { - return; // Quick return - no output do nothing. - } - sb = new ByteArrayOutputStream(); // don't reuse, so its gc'ed - try { - LogManager.getLogManager().readConfiguration(); - } catch (Exception e) {} - append("Testsuite: "); - append(suite.getName()); - append(StringUtils.LINE_SEP); - } - - /** - * The whole testsuite ended. - * @param suite the test suite - */ - public synchronized void endTestSuite(JUnitTest suite) { - append("Tests run: "); - append(suite.runCount()); - append(", Failures: "); - append(suite.failureCount()); - append(", Errors: "); - append(suite.errorCount()); - append(", Time elapsed: "); - append(numberFormat.format(suite.getRunTime() / ONE_SECOND)); - append(" sec"); - append(StringUtils.LINE_SEP); - append(StringUtils.LINE_SEP); - - // append the err and output streams to the log - if (systemOutput != null && systemOutput.length() > 0) { - append("------------- Standard Output ---------------") - .append(StringUtils.LINE_SEP) - .append(systemOutput) - .append("------------- ---------------- ---------------") - .append(StringUtils.LINE_SEP); - } - - // HACK: junit gives us no way to do this in LuceneTestCase - try { - Class clazz = Class.forName(suite.getName()); - Ignore ignore = clazz.getAnnotation(Ignore.class); - if (ignore != null) { - if (systemError == null) systemError = ""; - systemError += "NOTE: Ignoring test class '" + clazz.getSimpleName() + "': " - + ignore.value() + StringUtils.LINE_SEP; - } - } catch (ClassNotFoundException e) { /* no problem */ } - // END HACK - - if (systemError != null && systemError.length() > 0) { - append("------------- Standard Error -----------------") - .append(StringUtils.LINE_SEP) - .append(systemError) - .append("------------- ---------------- ---------------") - .append(StringUtils.LINE_SEP); - } - - if (out != null) { - try { - lock.obtain(5000); - try { - sb.writeTo(out); - out.flush(); - } finally { - try { - lock.release(); - } catch(LockReleaseFailedException e) { - // well lets pretend its released anyway - } - } - } catch (IOException e) { - throw new RuntimeException("unable to write results", e); - } finally { - if (out != System.out && out != System.err) { - FileUtils.close(out); - } - } - } - } - - /** - * A test started. - * @param test a test - */ - public void startTest(Test test) { - } - - /** - * A test ended. - * @param test a test - */ - public void endTest(Test test) { - } - - /** - * Interface TestListener for JUnit <= 3.4. - * - *

A Test failed. - * @param test a test - * @param t the exception thrown by the test - */ - public void addFailure(Test test, Throwable t) { - formatError("\tFAILED", test, t); - } - - /** - * Interface TestListener for JUnit > 3.4. - * - *

A Test failed. - * @param test a test - * @param t the assertion failed by the test - */ - public void addFailure(Test test, AssertionFailedError t) { - addFailure(test, (Throwable) t); - } - - /** - * A test caused an error. - * @param test a test - * @param error the error thrown by the test - */ - public void addError(Test test, Throwable error) { - formatError("\tCaused an ERROR", test, error); - } - - /** - * Format the test for printing.. - * @param test a test - * @return the formatted testname - */ - protected String formatTest(Test test) { - if (test == null) { - return "Null Test: "; - } else { - return "Testcase: " + test.toString() + ":"; - } - } - - /** - * Format an error and print it. - * @param type the type of error - * @param test the test that failed - * @param error the exception that the test threw - */ - protected synchronized void formatError(String type, Test test, - Throwable error) { - if (test != null) { - endTest(test); - } - - append(formatTest(test) + type); - append(StringUtils.LINE_SEP); - append(error.getMessage()); - append(StringUtils.LINE_SEP); - String strace = JUnitTestRunner.getFilteredTrace(error); - append(strace); - append(StringUtils.LINE_SEP); - append(StringUtils.LINE_SEP); - } - - public LuceneJUnitResultFormatter append(String s) { - if (s == null) - s = "(null)"; - try { - sb.write(s.getBytes()); // intentionally use default charset, its a console. - } catch (IOException e) { - throw new RuntimeException(e); - } - return this; - } - - public LuceneJUnitResultFormatter append(long l) { - return append(Long.toString(l)); - } -} - diff --git lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java index 808de44..15fe611 100644 --- lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java +++ lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java @@ -147,23 +147,16 @@ public abstract class LuceneTestCase extends Assert { */ public static final Version TEST_VERSION_CURRENT = Version.LUCENE_40; - /** - * If this is set, it is the only method that should run. - */ - static final String TEST_METHOD; - /** Create indexes in this directory, optimally use a subdir, named after the test */ public static final File TEMP_DIR; static { - String method = System.getProperty("testmethod", "").trim(); - TEST_METHOD = method.length() == 0 ? null : method; String s = System.getProperty("tempDir", System.getProperty("java.io.tmpdir")); if (s == null) throw new RuntimeException("To run tests, you need to define system property 'tempDir' or 'java.io.tmpdir'."); TEMP_DIR = new File(s); TEMP_DIR.mkdirs(); } - + /** set of directories we created, in afterclass we try to clean these up */ private static final Map tempDirs = Collections.synchronizedMap(new HashMap()); @@ -184,8 +177,6 @@ public abstract class LuceneTestCase extends Assert { public static final int TEST_ITER = Integer.parseInt(System.getProperty("tests.iter", "1")); /** Get the minimum number of times to run tests until a failure happens */ public static final int TEST_ITER_MIN = Integer.parseInt(System.getProperty("tests.iter.min", Integer.toString(TEST_ITER))); - /** Get the random seed for tests */ - public static final String TEST_SEED = System.getProperty("tests.seed", "random"); /** whether or not @nightly tests should run */ public static final boolean TEST_NIGHTLY = Boolean.parseBoolean(System.getProperty("tests.nightly", "false")); /** whether or not @weekly tests should run */ @@ -263,19 +254,62 @@ public abstract class LuceneTestCase extends Assert { @Deprecated static List testClassesRun = new ArrayList(); - private static void initRandom() { - assert !random.initialized; - staticSeed = "random".equals(TEST_SEED) ? seedRand.nextLong() : ThreeLongs.fromString(TEST_SEED).l1; - random.setSeed(staticSeed); - random.initialized = true; + /** + * Per-instance random. Associated with a single test case. Can be acquired from + * {@link Before} hook methods and test cases. + */ + private volatile RandomNoSetSeed instanceRandom; + + /** + * Per-class random. Associated with a suite. Can be acquired from + * {@link BeforeClass} hook methods and test cases. + */ + private volatile static RandomNoSetSeed staticRandom; + + /** + * Returns the "static" context {@link Random}. A static context is marked as being + * between {@link BeforeClass} and {@link AfterClass} methods. In particular + * any of the behaviors below will result in an exception: + *

    + *
  • calling this method from a static class initializer (static field initializer + * counts as one!). This is bad because class initializers are outside of {@link Runner} + * control;
  • + *
  • using the {@link Random} returned by this method outside of its original context + * (storing the reference and reusing it for another suite).
  • + *
+ */ + protected static Random getStaticRandom() { + Random rnd = staticRandom; + if (rnd == null) { + throw new RuntimeException("Static random uninitialized. Out-of-context call most likely."); + } + return rnd; + } + + /** + * Returns the "instance" context {@link Random}. An instance context is marked as being + * between {@link Before} and {@link After} methods. In particular + * any of the behaviors below will result in an exception: + *
  • using the {@link Random} returned by this method outside of its original context + * (storing the reference and reusing it for another test or suite).
  • + * + */ + protected Random getRandom() { + Random rnd = instanceRandom; + if (rnd == null) { + throw new RuntimeException("Instance random uninitialized. Out-of-context call most likely."); + } + return rnd; } - + @Deprecated private static boolean icuTested = false; @BeforeClass public static void beforeClassLuceneTestCaseJ4() { - initRandom(); + // Initialize static random. We will reuse the runner's seed to be repeatable. + staticRandom = new RandomNoSetSeed(LuceneTestCaseRunner.getNewRandom()); + tempDirs.clear(); stores = Collections.synchronizedMap(new IdentityHashMap()); @@ -285,7 +319,7 @@ public abstract class LuceneTestCase extends Assert { if (System.getProperty("solr.directoryFactory") == null) { System.setProperty("solr.directoryFactory", "org.apache.solr.core.MockDirectoryFactory"); } - + // if verbose: print some debugging stuff about which codecs are loaded if (VERBOSE) { Set codecs = Codec.availableCodecs(); @@ -298,14 +332,13 @@ public abstract class LuceneTestCase extends Assert { System.out.println("Loaded postingsFormat: '" + postingsFormat + "': " + PostingsFormat.forName(postingsFormat).getClass().getName()); } } - + savedInfoStream = InfoStream.getDefault(); + final boolean v = getStaticRandom().nextBoolean(); if (INFOSTREAM) { - // consume random for consistency - random.nextBoolean(); InfoStream.setDefault(new PrintStreamInfoStream(System.out)); } else { - if (random.nextBoolean()) { + if (v) { InfoStream.setDefault(new NullInfoStream()); } } @@ -313,7 +346,7 @@ public abstract class LuceneTestCase extends Assert { PREFLEX_IMPERSONATION_IS_ACTIVE = false; savedCodec = Codec.getDefault(); final Codec codec; - int randomVal = random.nextInt(10); + int randomVal = getStaticRandom().nextInt(10); if ("Lucene3x".equals(TEST_CODEC) || ("random".equals(TEST_CODEC) && randomVal < 2)) { // preflex-only setup codec = Codec.forName("Lucene3x"); @@ -326,7 +359,7 @@ public abstract class LuceneTestCase extends Assert { } else if (!"random".equals(TEST_CODEC)) { codec = Codec.forName(TEST_CODEC); } else if ("random".equals(TEST_POSTINGSFORMAT)) { - codec = new RandomCodec(random, useNoMemoryExpensiveCodec); + codec = new RandomCodec(getStaticRandom(), useNoMemoryExpensiveCodec); } else { codec = new Lucene40Codec() { private final PostingsFormat format = PostingsFormat.forName(TEST_POSTINGSFORMAT); @@ -361,20 +394,20 @@ public abstract class LuceneTestCase extends Assert { } // END hack - locale = TEST_LOCALE.equals("random") ? randomLocale(random) : localeForName(TEST_LOCALE); + locale = TEST_LOCALE.equals("random") ? randomLocale(getStaticRandom()) : localeForName(TEST_LOCALE); Locale.setDefault(locale); savedTimeZone = TimeZone.getDefault(); - timeZone = TEST_TIMEZONE.equals("random") ? randomTimeZone(random) : TimeZone.getTimeZone(TEST_TIMEZONE); + timeZone = TEST_TIMEZONE.equals("random") ? randomTimeZone(getStaticRandom()) : TimeZone.getTimeZone(TEST_TIMEZONE); TimeZone.setDefault(timeZone); - similarity = random.nextBoolean() ? new DefaultSimilarity() : new RandomSimilarityProvider(random); + similarity = getStaticRandom().nextBoolean() ? new DefaultSimilarity() : new RandomSimilarityProvider(getStaticRandom()); testsFailed = false; } @AfterClass public static void afterClassLuceneTestCaseJ4() { Throwable problem = null; - - if (! "false".equals(TEST_CLEAN_THREADS)) { + + if (!"false".equals(TEST_CLEAN_THREADS)) { int rogueThreads = threadCleanup("test class"); if (rogueThreads > 0) { // TODO: fail here once the leaks are fixed. @@ -419,16 +452,16 @@ public abstract class LuceneTestCase extends Assert { if (VERBOSE || testsFailed || problem != null) { printDebuggingInformation(codecDescription); } - - // reset seed - random.setSeed(0L); - random.initialized = false; - + + // Clear static random. + staticRandom.setDead(); + staticRandom = null; + if (problem != null) { throw new RuntimeException(problem); } } - + /** print some useful debugging information about the environment */ private static void printDebuggingInformation(String codecDescription) { System.err.println("NOTE: test params are: codec=" + codecDescription + @@ -507,9 +540,8 @@ public abstract class LuceneTestCase extends Assert { starting(description); try { base.evaluate(); - } catch (AssumptionViolatedException e) { - assumptionIgnored(e, description); - throw e; + } catch (AssumptionViolatedException t) { + throw t; } catch (Throwable t) { failed(t, description); throw t; @@ -520,17 +552,6 @@ public abstract class LuceneTestCase extends Assert { }; } - private void assumptionIgnored(AssumptionViolatedException e, Description description) { - System.err.print("NOTE: Assume failed in '" + description.getDisplayName() + "' (ignored):"); - if (VERBOSE) { - System.err.println(); - e.printStackTrace(System.err); - } else { - System.err.print(" "); - System.err.println(e.getMessage()); - } - } - private void failed(Throwable e, Description description) { testsFailed = true; reportAdditionalFailureInfo(); @@ -594,27 +615,31 @@ public abstract class LuceneTestCase extends Assert { return new Statement() { @Override public void evaluate() throws Throwable { - setUpInternal(); - // We simulate the previous behavior of @Before in that - // if any statement below us fails, we just propagate the original - // exception and do not call tearDownInternal. + // Set up instance Random. + instanceRandom = new RandomNoSetSeed(LuceneTestCaseRunner.getNewRandom()); - // TODO: [DW] should this really be this way? We could use - // JUnit's MultipleFailureException and propagate both? - base.evaluate(); - tearDownInternal(); + try { + setUpInternal(); + // We simulate the previous behavior of @Before in that + // if any statement below us fails, we just propagate the original + // exception and do not call tearDownInternal. + // TODO: [DW] should this really be this way? We could use + // JUnit's MultipleFailureException and propagate both? + base.evaluate(); + tearDownInternal(); + } finally { + instanceRandom.setDead(); + instanceRandom = null; + } } }; } } - + /** * Setup before the tests. */ private final void setUpInternal() throws Exception { - seed = "random".equals(TEST_SEED) ? seedRand.nextLong() : ThreeLongs.fromString(TEST_SEED).l2; - random.setSeed(seed); - savedUncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler(); Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() { public void uncaughtException(Thread t, Throwable e) { @@ -649,12 +674,9 @@ public abstract class LuceneTestCase extends Assert { if (useNoMemoryExpensiveCodec) { String defFormat = _TestUtil.getPostingsFormat("thisCodeMakesAbsolutelyNoSenseCanWeDeleteIt"); - // Stupid: assumeFalse in setUp() does not print any information, because - // TestWatchman does not watch test during setUp() - getName() is also not defined... - // => print info directly and use assume without message: if ("SimpleText".equals(defFormat) || "Memory".equals(defFormat)) { - System.err.println("NOTE: A test method in " + getClass().getSimpleName() + " was ignored, as it uses too much memory with " + defFormat + "."); - Assume.assumeTrue(false); + assumeTrue("NOTE: A test method in " + getClass().getSimpleName() + + " was ignored, as it uses too much memory with " + defFormat + ".", false); } } } @@ -929,7 +951,7 @@ public abstract class LuceneTestCase extends Assert { } public static int atLeast(int i) { - return atLeast(random, i); + return atLeast(getStaticRandom(), i); } /** @@ -946,7 +968,7 @@ public abstract class LuceneTestCase extends Assert { } public static boolean rarely() { - return rarely(random); + return rarely(getStaticRandom()); } public static boolean usually(Random random) { @@ -954,7 +976,7 @@ public abstract class LuceneTestCase extends Assert { } public static boolean usually() { - return usually(random); + return usually(getStaticRandom()); } public static void assumeTrue(String msg, boolean b) { @@ -1006,7 +1028,7 @@ public abstract class LuceneTestCase extends Assert { /** create a new index writer config with random defaults */ public static IndexWriterConfig newIndexWriterConfig(Version v, Analyzer a) { - return newIndexWriterConfig(random, v, a); + return newIndexWriterConfig(getStaticRandom(), v, a); } /** create a new index writer config with random defaults using the specified random */ @@ -1060,11 +1082,11 @@ public abstract class LuceneTestCase extends Assert { } public static LogMergePolicy newLogMergePolicy() { - return newLogMergePolicy(random); + return newLogMergePolicy(getStaticRandom()); } public static TieredMergePolicy newTieredMergePolicy() { - return newTieredMergePolicy(random); + return newTieredMergePolicy(getStaticRandom()); } public static LogMergePolicy newLogMergePolicy(Random r) { @@ -1136,7 +1158,7 @@ public abstract class LuceneTestCase extends Assert { * overwritten. */ public static MockDirectoryWrapper newDirectory() throws IOException { - return newDirectory(random); + return newDirectory(getStaticRandom()); } /** @@ -1157,7 +1179,7 @@ public abstract class LuceneTestCase extends Assert { * information. */ public static MockDirectoryWrapper newDirectory(Directory d) throws IOException { - return newDirectory(random, d); + return newDirectory(getStaticRandom(), d); } /** Returns a new FSDirectory instance over the given file, which must be a folder. */ @@ -1169,7 +1191,7 @@ public abstract class LuceneTestCase extends Assert { public static MockDirectoryWrapper newFSDirectory(File f, LockFactory lf) throws IOException { String fsdirClass = TEST_DIRECTORY; if (fsdirClass.equals("random")) { - fsdirClass = FS_DIRECTORIES[random.nextInt(FS_DIRECTORIES.length)]; + fsdirClass = FS_DIRECTORIES[getStaticRandom().nextInt(FS_DIRECTORIES.length)]; } Class clazz; @@ -1178,12 +1200,13 @@ public abstract class LuceneTestCase extends Assert { clazz = CommandLineUtil.loadFSDirectoryClass(fsdirClass); } catch (ClassCastException e) { // TEST_DIRECTORY is not a sub-class of FSDirectory, so draw one at random - fsdirClass = FS_DIRECTORIES[random.nextInt(FS_DIRECTORIES.length)]; + fsdirClass = FS_DIRECTORIES[getStaticRandom().nextInt(FS_DIRECTORIES.length)]; clazz = CommandLineUtil.loadFSDirectoryClass(fsdirClass); } Directory fsdir = newFSDirectoryImpl(clazz, f); - MockDirectoryWrapper dir = new MockDirectoryWrapper(random, maybeNRTWrap(random, fsdir)); + MockDirectoryWrapper dir = new MockDirectoryWrapper( + getStaticRandom(), maybeNRTWrap(getStaticRandom(), fsdir)); if (lf != null) { dir.setLockFactory(lf); } @@ -1220,7 +1243,7 @@ public abstract class LuceneTestCase extends Assert { } public static Field newField(String name, String value, FieldType type) { - return newField(random, name, value, type); + return newField(getStaticRandom(), name, value, type); } public static Field newField(Random random, String name, String value, FieldType type) { @@ -1357,13 +1380,14 @@ public abstract class LuceneTestCase extends Assert { * with one that returns null for getSequentialSubReaders. */ public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) throws IOException { + Random rnd = getStaticRandom(); if (usually()) { if (maybeWrap && rarely()) { r = SlowCompositeReaderWrapper.wrap(r); } if (maybeWrap && rarely()) { // just wrap as MultiReader/ParallelXReader with one subreader - if (random.nextBoolean()) { + if (getStaticRandom().nextBoolean()) { r = (r instanceof AtomicReader) ? new ParallelAtomicReader((AtomicReader) r) : new ParallelCompositeReader((CompositeReader) r); @@ -1371,13 +1395,13 @@ public abstract class LuceneTestCase extends Assert { r = new MultiReader(r); } } - IndexSearcher ret = random.nextBoolean() ? new AssertingIndexSearcher(random, r) : new AssertingIndexSearcher(random, r.getTopReaderContext()); + IndexSearcher ret = getStaticRandom().nextBoolean() ? new AssertingIndexSearcher(getStaticRandom(), r) : new AssertingIndexSearcher(getStaticRandom(), r.getTopReaderContext()); ret.setSimilarity(similarity); return ret; } else { int threads = 0; - final ExecutorService ex = (random.nextBoolean()) ? null - : Executors.newFixedThreadPool(threads = _TestUtil.nextInt(random, 1, 8), + final ExecutorService ex = (rnd.nextBoolean()) ? null + : Executors.newFixedThreadPool(threads = _TestUtil.nextInt(rnd, 1, 8), new NamedThreadFactory("LuceneTestCase")); if (ex != null) { if (VERBOSE) { @@ -1390,14 +1414,14 @@ public abstract class LuceneTestCase extends Assert { } }); } - IndexSearcher ret = random.nextBoolean() - ? new AssertingIndexSearcher(random, r, ex) - : new AssertingIndexSearcher(random, r.getTopReaderContext(), ex); + IndexSearcher ret = rnd.nextBoolean() + ? new AssertingIndexSearcher(rnd, r, ex) + : new AssertingIndexSearcher(rnd, r.getTopReaderContext(), ex); ret.setSimilarity(similarity); return ret; } } - + static void shutdownExecutorService(ExecutorService ex) { if (ex != null) { ex.shutdown(); @@ -1428,15 +1452,16 @@ public abstract class LuceneTestCase extends Assert { // We get here from InterceptTestCaseEvents on the 'failed' event.... public static void reportPartialFailureInfo() { - System.err.println("NOTE: reproduce with (hopefully): ant test -Dtestcase=" + testClassesRun.get(testClassesRun.size()-1) - + " -Dtests.seed=" + new ThreeLongs(staticSeed, 0L, LuceneTestCaseRunner.runnerSeed) + System.err.println("NOTE: reproduce with (hopefully): ant test -Dtests.filter=*." + testClassesRun.get(testClassesRun.size()-1) + + " -Dtests.seed=" + LuceneTestCaseRunner.getMasterSeed() + reproduceWithExtraParams()); } // We get here from InterceptTestCaseEvents on the 'failed' event.... public void reportAdditionalFailureInfo() { - System.err.println("NOTE: reproduce with: ant test -Dtestcase=" + getClass().getSimpleName() - + " -Dtestmethod=" + getName() + " -Dtests.seed=" + new ThreeLongs(staticSeed, seed, LuceneTestCaseRunner.runnerSeed) + System.err.println("NOTE: reproduce with: ant test -Dtests.filter=*." + getClass().getSimpleName() + + " -Dtests.filter.method=" + getName() + + " -Dtests.seed=" + LuceneTestCaseRunner.getMasterSeed() + reproduceWithExtraParams()); } @@ -1484,14 +1509,6 @@ public abstract class LuceneTestCase extends Assert { // initialized by the TestRunner static boolean useNoMemoryExpensiveCodec; - - // recorded seed: for beforeClass - private static long staticSeed; - // seed for individual test methods, changed in @before - private long seed; - - static final Random seedRand = new Random(); - protected static final SmartRandom random = new SmartRandom(0); private String name = ""; diff --git lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCaseRunner.java lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCaseRunner.java index 322acd2..c57b360 100644 --- lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCaseRunner.java +++ lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCaseRunner.java @@ -17,6 +17,13 @@ package org.apache.lucene.util; * limitations under the License. */ +import static org.apache.lucene.util.LuceneTestCase.TEST_ITER; +import static org.apache.lucene.util.LuceneTestCase.TEST_ITER_MIN; +import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY; +import static org.apache.lucene.util.LuceneTestCase.TEST_SLOW; +import static org.apache.lucene.util.LuceneTestCase.TEST_WEEKLY; +import static org.apache.lucene.util.LuceneTestCase.VERBOSE; + import java.lang.annotation.Annotation; import java.lang.reflect.Method; import java.lang.reflect.Modifier; @@ -27,14 +34,11 @@ import java.util.List; import java.util.Random; import org.apache.lucene.util.LuceneTestCase.Nightly; -import org.apache.lucene.util.LuceneTestCase.Weekly; import org.apache.lucene.util.LuceneTestCase.Slow; import org.apache.lucene.util.LuceneTestCase.UseNoMemoryExpensiveCodec; +import org.apache.lucene.util.LuceneTestCase.Weekly; import org.junit.Ignore; import org.junit.Test; -import org.junit.runner.Description; -import org.junit.runner.manipulation.Filter; -import org.junit.runner.manipulation.NoTestsRemainException; import org.junit.runner.notification.Failure; import org.junit.runner.notification.RunListener; import org.junit.runner.notification.RunNotifier; @@ -42,32 +46,37 @@ import org.junit.runners.BlockJUnit4ClassRunner; import org.junit.runners.model.FrameworkMethod; import org.junit.runners.model.InitializationError; -// please don't reorganize these into a wildcard! -import static org.apache.lucene.util.LuceneTestCase.TEST_ITER; -import static org.apache.lucene.util.LuceneTestCase.TEST_ITER_MIN; -import static org.apache.lucene.util.LuceneTestCase.TEST_METHOD; -import static org.apache.lucene.util.LuceneTestCase.TEST_SEED; -import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY; -import static org.apache.lucene.util.LuceneTestCase.TEST_WEEKLY; -import static org.apache.lucene.util.LuceneTestCase.TEST_SLOW; -import static org.apache.lucene.util.LuceneTestCase.VERBOSE; - -/** optionally filters the tests to be run by TEST_METHOD */ public class LuceneTestCaseRunner extends BlockJUnit4ClassRunner { - private List testMethods; - static final long runnerSeed; + private final static char [] HEX = "0123456789ABCDEF".toCharArray(); + + private static final long runnerSeed; static { - runnerSeed = "random".equals(TEST_SEED) ? LuceneTestCase.seedRand.nextLong() : ThreeLongs.fromString(TEST_SEED).l3; + // Check for junit4-passed seed first. + String TEST_SEED = System.getProperty("junit4.seed"); + if (TEST_SEED == null) { + TEST_SEED = System.getProperty("tests.seed"); + } + + if (TEST_SEED != null) { + if (TEST_SEED.length() == 0) { + throw new RuntimeException("Seed should be a hex long: " + TEST_SEED); + } + runnerSeed = parseSeed(TEST_SEED); + } else { + runnerSeed = new Random().nextLong(); + } } - + + private List testMethods; + @Override protected List computeTestMethods() { if (testMethods != null) return testMethods; Random r = new Random(runnerSeed); - + LuceneTestCase.testClassesRun.add(getTestClass().getJavaClass().getSimpleName()); testMethods = new ArrayList(); for (Method m : getTestClass().getJavaClass().getMethods()) { @@ -185,23 +194,49 @@ public class LuceneTestCaseRunner extends BlockJUnit4ClassRunner { System.err.println("NOTE: Using no memory expensive codecs (Memory, SimpleText) for " + clazz.getSimpleName() + "."); } - - // evil we cannot init our random here, because super() calls computeTestMethods!!!!; - Filter f = new Filter() { - - @Override - public String describe() { return "filters according to TEST_METHOD"; } - - @Override - public boolean shouldRun(Description d) { - return TEST_METHOD == null || d.getMethodName().equals(TEST_METHOD); - } - }; - - try { - f.apply(this); - } catch (NoTestsRemainException e) { - throw new RuntimeException(e); + } + + /** + * Provides a constant random off the runner's seed. + */ + static Random getNewRandom() { + return new Random(runnerSeed); + } + + /** + * Parse a single seed. The seed needs to be cleaned up from any surrounding characters. + */ + private static long parseSeed(String seed) { + long result = 0; + for (char chr : seed.toCharArray()) { + chr = Character.toLowerCase(chr); + result = result << 4; + if (chr >= '0' && chr <= '9') + result |= (chr - '0'); + else if (chr >= 'a' && chr <= 'f') + result |= (chr - 'a' + 10); + else + throw new IllegalArgumentException("Expected hexadecimal seed: " + seed); } + return result; + } + + /** + * Format a single seed. + */ + public static String formatSeed(long seed) { + StringBuilder b = new StringBuilder(); + do { + b.append(HEX[(int) (seed & 0xF)]); + seed = seed >>> 4; + } while (seed != 0); + return b.reverse().toString(); + } + + /** + * Format master seed. + */ + static String getMasterSeed() { + return formatSeed(runnerSeed); } } diff --git lucene/test-framework/src/java/org/apache/lucene/util/RandomNoSetSeed.java lucene/test-framework/src/java/org/apache/lucene/util/RandomNoSetSeed.java new file mode 100644 index 0000000..ab338d1 --- /dev/null +++ lucene/test-framework/src/java/org/apache/lucene/util/RandomNoSetSeed.java @@ -0,0 +1,123 @@ +package org.apache.lucene.util; + +import java.util.Random; + +/** + * A random with a delegate, preventing calls to {@link Random#setSeed(long)} and + * permitting end-of-lifecycle markers. + */ +@SuppressWarnings("serial") +final class RandomNoSetSeed extends Random { + private final Random delegate; + + /** + * If false, the object is dead. Any calls to any method will result + * in an exception. + */ + private volatile boolean alive = true; + + void setDead() { + alive = false; + } + + public RandomNoSetSeed(Random delegate) { + super(0); + this.delegate = delegate; + } + + @Override + protected int next(int bits) { + throw new RuntimeException("Shouldn't be reachable."); + } + + @Override + public boolean nextBoolean() { + checkAlive(); + return delegate.nextBoolean(); + } + + @Override + public void nextBytes(byte[] bytes) { + checkAlive(); + delegate.nextBytes(bytes); + } + + @Override + public double nextDouble() { + checkAlive(); + return delegate.nextDouble(); + } + + @Override + public float nextFloat() { + checkAlive(); + return delegate.nextFloat(); + } + + @Override + public double nextGaussian() { + checkAlive(); + return delegate.nextGaussian(); + } + + @Override + public int nextInt() { + checkAlive(); + return delegate.nextInt(); + } + + @Override + public int nextInt(int n) { + checkAlive(); + return delegate.nextInt(n); + } + + @Override + public long nextLong() { + checkAlive(); + return delegate.nextLong(); + } + + @Override + public void setSeed(long seed) { + // This is an interesting case of observing uninitialized object from an instance method + // (this method is called from the superclass constructor). We allow it. + if (seed == 0 && delegate == null) { + return; + } + + throw new RuntimeException( + RandomNoSetSeed.class.getSimpleName() + + " prevents changing the seed of its random generators to assure repeatability" + + " of tests. If you need a mutable instance of Random, create a new instance," + + " preferably with the initial seed aquired from this Random instance."); + } + + @Override + public String toString() { + checkAlive(); + return delegate.toString(); + } + + @Override + public boolean equals(Object obj) { + checkAlive(); + return delegate.equals(obj); + } + + @Override + public int hashCode() { + checkAlive(); + return delegate.hashCode(); + } + + /** + * Check the liveness status. + */ + private void checkAlive() { + if (!alive) { + throw new RuntimeException("This Random is dead. Do not store references to " + + "Random instances, acquire an instance when you need one."); + } + } +} diff --git lucene/test-framework/src/java/org/apache/lucene/util/SmartRandom.java lucene/test-framework/src/java/org/apache/lucene/util/SmartRandom.java deleted file mode 100644 index 8e92ba2..0000000 --- lucene/test-framework/src/java/org/apache/lucene/util/SmartRandom.java +++ /dev/null @@ -1,43 +0,0 @@ -package org.apache.lucene.util; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.util.Random; - -/** - * A random that tracks if its been initialized properly, - * and throws an exception if it hasn't. - */ -public class SmartRandom extends Random { - boolean initialized; - - SmartRandom(long seed) { - super(seed); - } - - @Override - protected int next(int bits) { - if (!initialized) { - System.err.println("!!! WARNING: test is using random from static initializer !!!"); - Thread.dumpStack(); - // I wish, but it causes JRE crashes - // throw new IllegalStateException("you cannot use this random from a static initializer in your test"); - } - return super.next(bits); - } -} diff --git lucene/test-framework/src/java/org/apache/lucene/util/ThreeLongs.java lucene/test-framework/src/java/org/apache/lucene/util/ThreeLongs.java deleted file mode 100644 index 8911341..0000000 --- lucene/test-framework/src/java/org/apache/lucene/util/ThreeLongs.java +++ /dev/null @@ -1,46 +0,0 @@ -package org.apache.lucene.util; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** helper class for a random seed that is really 3 random seeds: - *
      - *
    1. The test class's random seed: this is what the test sees in its beforeClass methods - *
    2. The test method's random seed: this is what the test method sees starting in its befores - *
    3. The test runner's random seed (controls the shuffling of test methods) - *
    - */ -class ThreeLongs { - public final long l1, l2, l3; - - public ThreeLongs(long l1, long l2, long l3) { - this.l1 = l1; - this.l2 = l2; - this.l3 = l3; - } - - @Override - public String toString() { - return Long.toString(l1, 16) + ":" + Long.toString(l2, 16) + ":" + Long.toString(l3, 16); - } - - public static ThreeLongs fromString(String s) { - String parts[] = s.split(":"); - assert parts.length == 3; - return new ThreeLongs(Long.parseLong(parts[0], 16), Long.parseLong(parts[1], 16), Long.parseLong(parts[2], 16)); - } -} diff --git lucene/tools/junit4/tests-timehints.txt lucene/tools/junit4/tests-timehints.txt new file mode 100644 index 0000000..2ab700e --- /dev/null +++ lucene/tools/junit4/tests-timehints.txt @@ -0,0 +1,962 @@ +org.apache.lucene.demo.TestDemo=531,969,964,985 +org.apache.lucene.search.highlight.HighlighterPhraseTest=228,161,162,229 +org.apache.lucene.search.highlight.HighlighterTest=1405,1161,1142,1411 +org.apache.lucene.search.highlight.OffsetLimitTokenFilterTest=11,12,10,11 +org.apache.lucene.search.highlight.TokenSourcesTest=214,193,158,165 +org.apache.lucene.search.highlight.custom.HighlightCustomQueryTest=11,7,11,8 +org.apache.lucene.search.vectorhighlight.BreakIteratorBoundaryScannerTest=21,21,17,19 +org.apache.lucene.search.vectorhighlight.FieldPhraseListTest=293,255,256,282 +org.apache.lucene.search.vectorhighlight.FieldQueryTest=707,593,614,664 +org.apache.lucene.search.vectorhighlight.FieldTermStackTest=730,744,777,664 +org.apache.lucene.search.vectorhighlight.IndexTimeSynonymTest=373,406,389,379 +org.apache.lucene.search.vectorhighlight.ScoreOrderFragmentsBuilderTest=564,574,482,480 +org.apache.lucene.search.vectorhighlight.SimpleBoundaryScannerTest=8,7,8,10 +org.apache.lucene.search.vectorhighlight.SimpleFragListBuilderTest=426,367,392,343 +org.apache.lucene.search.vectorhighlight.SimpleFragmentsBuilderTest=660,713,774,800 +org.apache.lucene.search.vectorhighlight.SingleFragListBuilderTest=118,86,116,73 +org.apache.lucene.index.memory.MemoryIndexTest=4979,4492,4902,4646 +org.apache.lucene.index.TestIndexSplitter=1331,1457,1517,1413 +org.apache.lucene.index.TestMultiPassIndexSplitter=742,711,832,773 +org.apache.lucene.index.TestPKIndexSplitter=937,882,857,788 +org.apache.lucene.misc.SweetSpotSimilarityTest=315,312,308,336 +org.apache.lucene.misc.TestHighFreqTerms=1217,397,527,636 +org.apache.lucene.sandbox.queries.DuplicateFilterTest=678,606,654,565 +org.apache.lucene.sandbox.queries.FuzzyLikeThisQueryTest=542,701,598,600 +org.apache.lucene.sandbox.queries.TestSlowCollationMethods=1471,1366,1464,1921 +org.apache.lucene.sandbox.queries.regex.TestJakartaRegexpCapabilities=272,222,211,234 +org.apache.lucene.sandbox.queries.regex.TestRegexQuery=479,531,452,439 +org.apache.lucene.sandbox.queries.regex.TestSpanRegexQuery=65,95,93,49 +org.apache.lucene.spatial.DistanceUtilsTest=33,23,28,36 +org.apache.lucene.spatial.geohash.TestGeoHashUtils=209,257,250,230 +org.apache.lucene.spatial.geometry.TestDistanceUnits=232,217,264,202 +org.apache.lucene.spatial.tier.TestCartesian=1039,999,1060,1061 +org.apache.lucene.spatial.tier.TestDistance=495,539,510,529 +org.apache.lucene.spatial.tier.projections.SinusoidalProjectorTest=6,10,15,26 +org.apache.lucene.TestAssertions=5,4,3,4 +org.apache.lucene.TestDemo=72,9,9,16 +org.apache.lucene.TestExternalCodecs=299,136,69,126 +org.apache.lucene.TestMergeSchedulerExternal=278,539,590,305 +org.apache.lucene.TestSearch=22,44,71,17 +org.apache.lucene.TestSearchForDuplicates=113,98,55,115 +org.apache.lucene.analysis.TestCachingTokenFilter=7,32,36,9 +org.apache.lucene.analysis.TestMockAnalyzer=1698,2243,5217,1473 +org.apache.lucene.analysis.TestMockCharFilter=5,4,4,5 +org.apache.lucene.analysis.TestNumericTokenStream=15,4,6,7 +org.apache.lucene.analysis.TestToken=3168,1751,1631,1751 +org.apache.lucene.analysis.tokenattributes.TestCharTermAttributeImpl=964,788,1020,653 +org.apache.lucene.analysis.tokenattributes.TestSimpleAttributeImpl=5,5,3,4 +org.apache.lucene.codecs.appending.TestAppendingCodec=18,55,26,28 +org.apache.lucene.codecs.intblock.TestIntBlockCodec=9,12,9,9 +org.apache.lucene.codecs.lucene3x.TestImpersonation=2,2,2,3 +org.apache.lucene.codecs.lucene3x.TestSurrogates=735,321,102,370 +org.apache.lucene.codecs.lucene3x.TestTermInfosReaderIndex=170,334,143,293 +org.apache.lucene.codecs.lucene40.TestBitVector=653,739,937,911 +org.apache.lucene.codecs.lucene40.TestDocValues=462,425,353,286 +org.apache.lucene.codecs.lucene40.TestReuseDocsEnum=554,620,247,460 +org.apache.lucene.codecs.perfield.TestPerFieldPostingsFormat=1142,741,765,1638 +org.apache.lucene.codecs.pulsing.Test10KPulsings=4,4,2,2 +org.apache.lucene.codecs.pulsing.TestPulsingReuse=10,53,10,9 +org.apache.lucene.document.TestBinaryDocument=12,11,10,11 +org.apache.lucene.document.TestDateTools=9,11,18,19 +org.apache.lucene.document.TestDocument=191,36,36,451 +org.apache.lucene.index.Test2BPostings=1,1,1,2 +org.apache.lucene.index.Test2BTerms=2,3,2,2 +org.apache.lucene.index.TestAddIndexes=5656,8210,11930,11938 +org.apache.lucene.index.TestAtomicUpdate=2803,3435,1655,3604 +org.apache.lucene.index.TestBackwardsCompatibility=6788,6173,6638,7781 +org.apache.lucene.index.TestBinaryTerms=28,52,27,65 +org.apache.lucene.index.TestByteSlices=2116,2102,2677,2579 +org.apache.lucene.index.TestCheckIndex=23,23,13,414 +org.apache.lucene.index.TestCodecs=342,342,191,774 +org.apache.lucene.index.TestCompoundFile=2800,4008,4823,2880 +org.apache.lucene.index.TestConcurrentMergeScheduler=804,1182,1384,3413 +org.apache.lucene.index.TestConsistentFieldNumbers=802,567,391,346 +org.apache.lucene.index.TestCrash=441,624,261,491 +org.apache.lucene.index.TestCrashCausesCorruptIndex=157,240,144,213 +org.apache.lucene.index.TestCustomNorms=711,368,588,216 +org.apache.lucene.index.TestDeletionPolicy=8116,4136,5647,4380 +org.apache.lucene.index.TestDirectoryReader=1986,5251,1396,723 +org.apache.lucene.index.TestDoc=1034,284,340,315 +org.apache.lucene.index.TestDocCount=34,58,33,33 +org.apache.lucene.index.TestDocTermOrds=149,266,156,160 +org.apache.lucene.index.TestDocValuesIndexing=5176,2982,215,3811 +org.apache.lucene.index.TestDocsAndPositions=1747,1950,1671,1495 +org.apache.lucene.index.TestDocumentWriter=185,617,472,256 +org.apache.lucene.index.TestDocumentsWriterDeleteQueue=367,923,523,375 +org.apache.lucene.index.TestDuelingCodecs=4296,9589,3472,5808 +org.apache.lucene.index.TestFieldInfos=6,6,4,4 +org.apache.lucene.index.TestFieldsReader=750,7225,569,600 +org.apache.lucene.index.TestFilterIndexReader=24,16,55,19 +org.apache.lucene.index.TestFlex=308,764,1203,291 +org.apache.lucene.index.TestFlushByRamOrCountsPolicy=7133,12285,6734,7873 +org.apache.lucene.index.TestForTooMuchCloning=185,316,329,124 +org.apache.lucene.index.TestForceMergeForever=756,2368,739,425 +org.apache.lucene.index.TestIndexCommit=2,3,17,2 +org.apache.lucene.index.TestIndexFileDeleter=29,109,24,29 +org.apache.lucene.index.TestIndexInput=3,3,5,3 +org.apache.lucene.index.TestIndexReader=1960,2129,2413,1600 +org.apache.lucene.index.TestIndexReaderReopen=1864,2585,2300,1574 +org.apache.lucene.index.TestIndexWriter=6282,11908,12177,8100 +org.apache.lucene.index.TestIndexWriterCommit=2366,1676,2636,2646 +org.apache.lucene.index.TestIndexWriterConfig=7,7,7,12 +org.apache.lucene.index.TestIndexWriterDelete=2281,2558,1979,3413 +org.apache.lucene.index.TestIndexWriterExceptions=3568,4436,4135,3765 +org.apache.lucene.index.TestIndexWriterForceMerge=2330,2051,2361,2155 +org.apache.lucene.index.TestIndexWriterLockRelease=10,8,101,6 +org.apache.lucene.index.TestIndexWriterMergePolicy=3397,2502,5830,3046 +org.apache.lucene.index.TestIndexWriterMerging=2561,3549,5304,4152 +org.apache.lucene.index.TestIndexWriterNRTIsCurrent=634,556,680,1152 +org.apache.lucene.index.TestIndexWriterOnDiskFull=867,1106,335,2372 +org.apache.lucene.index.TestIndexWriterOnJRECrash=3,4,3,3 +org.apache.lucene.index.TestIndexWriterReader=6795,15686,11066,16046 +org.apache.lucene.index.TestIndexWriterUnicode=1721,2844,1810,2625 +org.apache.lucene.index.TestIndexWriterWithThreads=7079,4587,9820,5817 +org.apache.lucene.index.TestIndexableField=382,1181,634,1124 +org.apache.lucene.index.TestIsCurrent=11,12,7,13 +org.apache.lucene.index.TestLazyProxSkipping=47,750,403,549 +org.apache.lucene.index.TestLongPostings=1844,3316,1490,1450 +org.apache.lucene.index.TestMaxTermFrequency=1081,1055,573,974 +org.apache.lucene.index.TestMixedCodecs=1293,1986,1292,9 +org.apache.lucene.index.TestMultiFields=268,88,179,1371 +org.apache.lucene.index.TestMultiLevelSkipList=341,117,186,268 +org.apache.lucene.index.TestMultiReader=687,686,580,568 +org.apache.lucene.index.TestNRTReaderWithThreads=1493,2447,3215,1192 +org.apache.lucene.index.TestNRTThreads=5816,4106,3114,48 +org.apache.lucene.index.TestNeverDelete=2035,2120,1066,1972 +org.apache.lucene.index.TestNewestSegment=5,5,5,4 +org.apache.lucene.index.TestNoDeletionPolicy=297,64,59,58 +org.apache.lucene.index.TestNoMergePolicy=5,7,6,5 +org.apache.lucene.index.TestNoMergeScheduler=5,5,4,5 +org.apache.lucene.index.TestNorms=5100,1844,6351,2084 +org.apache.lucene.index.TestOmitNorms=1194,1404,2306,3802 +org.apache.lucene.index.TestOmitPositions=369,229,82,130 +org.apache.lucene.index.TestOmitTf=1176,841,685,1187 +org.apache.lucene.index.TestParallelReader=115,87,70,204 +org.apache.lucene.index.TestParallelReaderEmptyIndex=39,64,56,31 +org.apache.lucene.index.TestParallelTermEnum=37,54,11,18 +org.apache.lucene.index.TestPayloadProcessorProvider=607,266,359,738 +org.apache.lucene.index.TestPayloads=95,696,61,67 +org.apache.lucene.index.TestPerSegmentDeletes=32,35,37,56 +org.apache.lucene.index.TestPersistentSnapshotDeletionPolicy=3713,3673,2795,2904 +org.apache.lucene.index.TestPostingsOffsets=39,33,1867,854 +org.apache.lucene.index.TestPrefixCodedTerms=357,210,411,646 +org.apache.lucene.index.TestRandomStoredFields=931,1020,3080,967 +org.apache.lucene.index.TestReaderClosed=6,33,7,6 +org.apache.lucene.index.TestRollback=9,8,10,13 +org.apache.lucene.index.TestRollingUpdates=2867,832,724,1204 +org.apache.lucene.index.TestSameTokenSamePosition=64,34,30,43 +org.apache.lucene.index.TestSegmentMerger=348,472,269,819 +org.apache.lucene.index.TestSegmentReader=926,1133,1043,2495 +org.apache.lucene.index.TestSegmentTermDocs=7228,843,10592,5894 +org.apache.lucene.index.TestSegmentTermEnum=24,33,18,18 +org.apache.lucene.index.TestSizeBoundedForceMerge=154,153,40,182 +org.apache.lucene.index.TestSnapshotDeletionPolicy=1564,1644,1400,1354 +org.apache.lucene.index.TestStressAdvance=2038,1563,2260,1950 +org.apache.lucene.index.TestStressIndexing=2026,2479,1748,1294 +org.apache.lucene.index.TestStressIndexing2=379,453,362,552 +org.apache.lucene.index.TestStressNRT=967,1891,309,21646 +org.apache.lucene.index.TestSumDocFreq=130,126,157,42 +org.apache.lucene.index.TestTerm=2,3,2,3 +org.apache.lucene.index.TestTermVectorsReader=42,54,69,79 +org.apache.lucene.index.TestTermVectorsWriter=120,453,140,140 +org.apache.lucene.index.TestTermdocPerf=2,4,3,2 +org.apache.lucene.index.TestTermsEnum=4912,5669,5943,5380 +org.apache.lucene.index.TestTermsEnum2=1406,1819,3517,2180 +org.apache.lucene.index.TestThreadedForceMerge=762,750,1501,1700 +org.apache.lucene.index.TestTieredMergePolicy=2463,1015,1707,1465 +org.apache.lucene.index.TestTransactionRollback=112,115,119,200 +org.apache.lucene.index.TestTransactions=623,800,718,779 +org.apache.lucene.index.TestTypePromotion=3549,34,2525,35 +org.apache.lucene.index.TestUniqueTermCount=29,42,18,25 +org.apache.lucene.search.MultiCollectorTest=5,4,6,6 +org.apache.lucene.search.TestAutomatonQuery=320,204,62,61 +org.apache.lucene.search.TestAutomatonQueryUnicode=13,9,13,19 +org.apache.lucene.search.TestBoolean2=4798,1549,5126,5695 +org.apache.lucene.search.TestBooleanMinShouldMatch=427,1121,631,1473 +org.apache.lucene.search.TestBooleanOr=330,573,679,1027 +org.apache.lucene.search.TestBooleanQuery=135,100,289,120 +org.apache.lucene.search.TestBooleanScorer=61,14,21,28 +org.apache.lucene.search.TestCachingCollector=10,11,11,13 +org.apache.lucene.search.TestCachingWrapperFilter=103,78,25,31 +org.apache.lucene.search.TestComplexExplanations=1822,983,1373,1172 +org.apache.lucene.search.TestComplexExplanationsOfNonMatches=77,37,35,38 +org.apache.lucene.search.TestConstantScoreQuery=34,13,21,12 +org.apache.lucene.search.TestCustomSearcherSort=683,812,843,990 +org.apache.lucene.search.TestDateFilter=28,15,18,23 +org.apache.lucene.search.TestDateSort=22,13,15,11 +org.apache.lucene.search.TestDisjunctionMaxQuery=2407,930,1474,1290 +org.apache.lucene.search.TestDocBoost=8,7,8,13 +org.apache.lucene.search.TestDocIdSet=25,8,12,12 +org.apache.lucene.search.TestDocValuesScoring=36,26,10,44 +org.apache.lucene.search.TestElevationComparator=18,35,18,25 +org.apache.lucene.search.TestExplanations=12,10,9,7 +org.apache.lucene.search.TestFieldCache=3264,786,664,502 +org.apache.lucene.search.TestFieldCacheRangeFilter=497,690,196,660 +org.apache.lucene.search.TestFieldCacheRewriteMethod=1266,875,1095,1141 +org.apache.lucene.search.TestFieldCacheTermsFilter=12,13,10,10 +org.apache.lucene.search.TestFieldValueFilter=31,27,36,28 +org.apache.lucene.search.TestFilteredQuery=240,304,323,432 +org.apache.lucene.search.TestFilteredSearch=33,25,41,39 +org.apache.lucene.search.TestFuzzyQuery=154,104,57,100 +org.apache.lucene.search.TestFuzzyQuery2=1099,450,1117,514 +org.apache.lucene.search.TestMatchAllDocsQuery=11,9,33,15 +org.apache.lucene.search.TestMultiPhraseQuery=504,833,826,791 +org.apache.lucene.search.TestMultiTermConstantScore=426,186,137,124 +org.apache.lucene.search.TestMultiTermQueryRewrites=24,22,15,45 +org.apache.lucene.search.TestMultiThreadTermVectors=1232,1360,1148,1634 +org.apache.lucene.search.TestMultiValuedNumericRangeQuery=1131,990,2837,817 +org.apache.lucene.search.TestNGramPhraseQuery=5,4,4,4 +org.apache.lucene.search.TestNRTManager=2556,60,5400,5146 +org.apache.lucene.search.TestNot=18,8,14,8 +org.apache.lucene.search.TestNumericRangeQuery32=3513,6453,5145,5126 +org.apache.lucene.search.TestNumericRangeQuery64=17221,12483,22047,7784 +org.apache.lucene.search.TestPhrasePrefixQuery=8,13,30,7 +org.apache.lucene.search.TestPhraseQuery=3164,6474,5922,4556 +org.apache.lucene.search.TestPositionIncrement=15,19,13,17 +org.apache.lucene.search.TestPositiveScoresOnlyCollector=5,4,6,17 +org.apache.lucene.search.TestPrefixFilter=6,8,6,16 +org.apache.lucene.search.TestPrefixInBooleanQuery=756,635,1199,646 +org.apache.lucene.search.TestPrefixQuery=8,9,8,32 +org.apache.lucene.search.TestPrefixRandom=1826,3314,769,990 +org.apache.lucene.search.TestQueryWrapperFilter=966,320,1226,3325 +org.apache.lucene.search.TestRegexpQuery=46,81,45,47 +org.apache.lucene.search.TestRegexpRandom=110,84,162,164 +org.apache.lucene.search.TestRegexpRandom2=2429,5399,4815,5980 +org.apache.lucene.search.TestScoreCachingWrappingScorer=6,5,4,6 +org.apache.lucene.search.TestScorerPerf=574,1215,1022,661 +org.apache.lucene.search.TestSearchAfter=79,43,86,101 +org.apache.lucene.search.TestSearchWithThreads=1482,2318,2046,4084 +org.apache.lucene.search.TestSearcherManager=2658,9519,4796,2247 +org.apache.lucene.search.TestShardSearching=6964,4585,3932,5770 +org.apache.lucene.search.TestSimilarity=9,11,8,17 +org.apache.lucene.search.TestSimilarityProvider=11,9,6,11 +org.apache.lucene.search.TestSimpleExplanations=683,855,736,3021 +org.apache.lucene.search.TestSimpleExplanationsOfNonMatches=67,60,139,441 +org.apache.lucene.search.TestSloppyPhraseQuery=3752,3070,3444,3542 +org.apache.lucene.search.TestSort=2189,2422,3173,2332 +org.apache.lucene.search.TestSubScorerFreqs=16,18,15,36 +org.apache.lucene.search.TestTermRangeFilter=565,778,1616,415 +org.apache.lucene.search.TestTermRangeQuery=45,55,71,65 +org.apache.lucene.search.TestTermScorer=19,19,47,19 +org.apache.lucene.search.TestTermVectors=252,408,547,312 +org.apache.lucene.search.TestTimeLimitingCollector=4680,4452,3286,2701 +org.apache.lucene.search.TestTopDocsCollector=55,55,37,95 +org.apache.lucene.search.TestTopDocsMerge=3992,4115,1374,3679 +org.apache.lucene.search.TestTopScoreDocCollector=5,7,7,9 +org.apache.lucene.search.TestWildcard=257,91,61,69 +org.apache.lucene.search.TestWildcardRandom=93,78,98,92 +org.apache.lucene.search.payloads.TestPayloadExplanations=1410,324,339,314 +org.apache.lucene.search.payloads.TestPayloadNearQuery=759,613,120,1657 +org.apache.lucene.search.payloads.TestPayloadTermQuery=463,1159,2769,695 +org.apache.lucene.search.similarities.TestSimilarity2=132,71,101,125 +org.apache.lucene.search.similarities.TestSimilarityBase=2059,1801,1740,1474 +org.apache.lucene.search.spans.TestBasics=9064,7511,8748,9945 +org.apache.lucene.search.spans.TestFieldMaskingSpanQuery=190,1213,467,273 +org.apache.lucene.search.spans.TestNearSpansOrdered=82,92,47,87 +org.apache.lucene.search.spans.TestPayloadSpans=755,677,1262,515 +org.apache.lucene.search.spans.TestSpanExplanations=582,2817,2582,816 +org.apache.lucene.search.spans.TestSpanExplanationsOfNonMatches=51,28,59,75 +org.apache.lucene.search.spans.TestSpanFirstQuery=44,7,9,11 +org.apache.lucene.search.spans.TestSpanMultiTermQueryWrapper=63,50,71,37 +org.apache.lucene.search.spans.TestSpans=1628,1823,2607,2122 +org.apache.lucene.search.spans.TestSpansAdvanced=24,59,25,156 +org.apache.lucene.search.spans.TestSpansAdvanced2=85,104,108,145 +org.apache.lucene.store.TestBufferedIndexInput=2269,1131,5331,3833 +org.apache.lucene.store.TestByteArrayDataInput=3,4,3,2 +org.apache.lucene.store.TestCopyBytes=558,757,556,650 +org.apache.lucene.store.TestDirectory=28,24,29,27 +org.apache.lucene.store.TestFileSwitchDirectory=31,36,73,34 +org.apache.lucene.store.TestHugeRamFile=859,794,2281,654 +org.apache.lucene.store.TestLock=13,12,13,22 +org.apache.lucene.store.TestLockFactory=1039,1025,1061,1237 +org.apache.lucene.store.TestMultiMMap=5453,4687,3883,5281 +org.apache.lucene.store.TestNRTCachingDirectory=3631,661,3190,2063 +org.apache.lucene.store.TestRAMDirectory=2038,2258,2197,2227 +org.apache.lucene.store.TestWindowsMMap=280,135,280,185 +org.apache.lucene.util.TestArrayUtil=2332,2353,2346,3829 +org.apache.lucene.util.TestAttributeSource=6,7,10,10 +org.apache.lucene.util.TestBitUtil=3,3,3,4 +org.apache.lucene.util.TestByteBlockPool=3,3,2,3 +org.apache.lucene.util.TestBytesRef=4,8,4,4 +org.apache.lucene.util.TestBytesRefHash=65,50,33,122 +org.apache.lucene.util.TestCharsRef=13,12,14,13 +org.apache.lucene.util.TestCloseableThreadLocal=4,4,4,4 +org.apache.lucene.util.TestCollectionUtil=1772,1724,1887,1494 +org.apache.lucene.util.TestDoubleBarrelLRUCache=1007,1008,1009,1006 +org.apache.lucene.util.TestFieldCacheSanityChecker=560,559,414,759 +org.apache.lucene.util.TestFixedBitSet=627,562,870,559 +org.apache.lucene.util.TestIOUtils=4,3,4,5 +org.apache.lucene.util.TestIndexableBinaryStringTools=129,352,166,126 +org.apache.lucene.util.TestJUnitRuleOrder=4,3,2,2 +org.apache.lucene.util.TestNamedSPILoader=3,5,2,5 +org.apache.lucene.util.TestNumericUtils=1288,1565,1011,910 +org.apache.lucene.util.TestOpenBitSet=1607,2194,2600,2196 +org.apache.lucene.util.TestPagedBytes=537,295,199,387 +org.apache.lucene.util.TestPriorityQueue=14,25,61,16 +org.apache.lucene.util.TestRamUsageEstimator=4,2,2,3 +org.apache.lucene.util.TestRecyclingByteBlockAllocator=17,18,26,29 +org.apache.lucene.util.TestSentinelIntSet=132,106,108,113 +org.apache.lucene.util.TestSetOnce=18,39,15,16 +org.apache.lucene.util.TestSetupTeardownMethods=24,26,32,25 +org.apache.lucene.util.TestSmallFloat=27,34,31,29 +org.apache.lucene.util.TestSortedVIntList=21,21,22,27 +org.apache.lucene.util.TestTwoPhaseCommitTool=8,8,5,8 +org.apache.lucene.util.TestUnicodeUtil=257,256,314,244 +org.apache.lucene.util.TestVersion=2,2,2,4 +org.apache.lucene.util.TestVersionComparator=2,3,4,3 +org.apache.lucene.util.TestVirtualMethod=6,6,5,4 +org.apache.lucene.util.TestWeakIdentityMap=531,692,1114,445 +org.apache.lucene.util.automaton.TestBasicOperations=161,81,130,142 +org.apache.lucene.util.automaton.TestCompiledAutomaton=153,430,39,160 +org.apache.lucene.util.automaton.TestDeterminism=578,683,836,1021 +org.apache.lucene.util.automaton.TestDeterminizeLexicon=1174,1123,591,627 +org.apache.lucene.util.automaton.TestLevenshteinAutomata=662,800,596,463 +org.apache.lucene.util.automaton.TestMinimize=1910,2067,2633,1573 +org.apache.lucene.util.automaton.TestSpecialOperations=55,91,106,102 +org.apache.lucene.util.automaton.TestUTF32ToUTF8=1485,1659,2310,788 +org.apache.lucene.util.fst.TestFSTs=11876,220,10497,11092 +org.apache.lucene.util.packed.TestPackedInts=3696,2679,2803,3121 +org.apache.lucene.analysis.ar.TestArabicAnalyzer=861,315,404,472 +org.apache.lucene.analysis.ar.TestArabicLetterTokenizer=6,6,6,7 +org.apache.lucene.analysis.ar.TestArabicNormalizationFilter=22,23,37,21 +org.apache.lucene.analysis.ar.TestArabicStemFilter=39,41,36,102 +org.apache.lucene.analysis.bg.TestBulgarianAnalyzer=650,495,676,367 +org.apache.lucene.analysis.bg.TestBulgarianStemmer=24,17,19,33 +org.apache.lucene.analysis.br.TestBrazilianStemmer=1001,1049,988,999 +org.apache.lucene.analysis.ca.TestCatalanAnalyzer=706,685,584,549 +org.apache.lucene.analysis.charfilter.HTMLStripCharFilterTest=2695,3133,3300,3025 +org.apache.lucene.analysis.charfilter.TestCharFilter=7,7,7,8 +org.apache.lucene.analysis.charfilter.TestMappingCharFilter=570,743,803,793 +org.apache.lucene.analysis.cjk.TestCJKAnalyzer=3532,3604,4338,4222 +org.apache.lucene.analysis.cjk.TestCJKTokenizer=2089,1496,1885,2154 +org.apache.lucene.analysis.cjk.TestCJKWidthFilter=457,274,281,236 +org.apache.lucene.analysis.cn.TestChineseTokenizer=758,533,984,508 +org.apache.lucene.analysis.commongrams.CommonGramsFilterTest=1368,1167,1111,1160 +org.apache.lucene.analysis.compound.TestCompoundWordTokenFilter=3824,3623,3616,3465 +org.apache.lucene.analysis.core.TestAnalyzers=1354,723,1030,1309 +org.apache.lucene.analysis.core.TestClassicAnalyzer=585,642,663,495 +org.apache.lucene.analysis.core.TestDuelingAnalyzers=842,877,836,701 +org.apache.lucene.analysis.core.TestKeywordAnalyzer=835,1197,1526,872 +org.apache.lucene.analysis.core.TestStandardAnalyzer=2581,2360,1844,2154 +org.apache.lucene.analysis.core.TestStopAnalyzer=269,74,93,121 +org.apache.lucene.analysis.core.TestStopFilter=10,12,10,13 +org.apache.lucene.analysis.core.TestTypeTokenFilter=6,7,25,6 +org.apache.lucene.analysis.core.TestUAX29URLEmailTokenizer=788,437,726,745 +org.apache.lucene.analysis.cz.TestCzechAnalyzer=434,376,556,537 +org.apache.lucene.analysis.cz.TestCzechStemmer=22,19,15,17 +org.apache.lucene.analysis.da.TestDanishAnalyzer=3705,3360,4310,2629 +org.apache.lucene.analysis.de.TestGermanAnalyzer=742,653,578,594 +org.apache.lucene.analysis.de.TestGermanLightStemFilter=387,433,686,613 +org.apache.lucene.analysis.de.TestGermanMinimalStemFilter=355,651,748,619 +org.apache.lucene.analysis.de.TestGermanNormalizationFilter=216,449,507,273 +org.apache.lucene.analysis.de.TestGermanStemFilter=3040,2483,2514,2397 +org.apache.lucene.analysis.el.GreekAnalyzerTest=933,990,971,862 +org.apache.lucene.analysis.el.TestGreekStemmer=30,59,36,43 +org.apache.lucene.analysis.en.TestEnglishAnalyzer=1166,762,1395,1258 +org.apache.lucene.analysis.en.TestEnglishMinimalStemFilter=337,397,243,348 +org.apache.lucene.analysis.en.TestKStemmer=928,1149,1222,816 +org.apache.lucene.analysis.en.TestPorterStemFilter=375,325,463,289 +org.apache.lucene.analysis.es.TestSpanishAnalyzer=357,431,434,386 +org.apache.lucene.analysis.es.TestSpanishLightStemFilter=326,451,277,329 +org.apache.lucene.analysis.eu.TestBasqueAnalyzer=585,425,1382,823 +org.apache.lucene.analysis.fa.TestPersianAnalyzer=513,670,485,314 +org.apache.lucene.analysis.fa.TestPersianNormalizationFilter=11,13,13,11 +org.apache.lucene.analysis.fi.TestFinnishAnalyzer=792,900,685,939 +org.apache.lucene.analysis.fi.TestFinnishLightStemFilter=500,839,613,438 +org.apache.lucene.analysis.fr.TestElision=4,5,5,4 +org.apache.lucene.analysis.fr.TestFrenchAnalyzer=668,1074,489,479 +org.apache.lucene.analysis.fr.TestFrenchLightStemFilter=643,418,493,555 +org.apache.lucene.analysis.fr.TestFrenchMinimalStemFilter=3714,2525,4491,2535 +org.apache.lucene.analysis.gl.TestGalicianAnalyzer=918,958,1136,960 +org.apache.lucene.analysis.gl.TestGalicianMinimalStemFilter=673,706,640,369 +org.apache.lucene.analysis.gl.TestGalicianStemFilter=438,322,578,285 +org.apache.lucene.analysis.hi.TestHindiAnalyzer=726,636,654,612 +org.apache.lucene.analysis.hi.TestHindiNormalizer=10,10,12,11 +org.apache.lucene.analysis.hi.TestHindiStemmer=11,9,10,10 +org.apache.lucene.analysis.hu.TestHungarianAnalyzer=742,767,715,697 +org.apache.lucene.analysis.hu.TestHungarianLightStemFilter=572,686,989,735 +org.apache.lucene.analysis.hunspell.HunspellDictionaryTest=12,12,12,14 +org.apache.lucene.analysis.hunspell.HunspellStemFilterTest=1327,1212,1648,1306 +org.apache.lucene.analysis.hunspell.HunspellStemmerTest=48,19,18,14 +org.apache.lucene.analysis.hy.TestArmenianAnalyzer=1002,487,903,580 +org.apache.lucene.analysis.id.TestIndonesianAnalyzer=421,491,665,536 +org.apache.lucene.analysis.id.TestIndonesianStemmer=13,12,13,14 +org.apache.lucene.analysis.in.TestIndicNormalizer=14,14,14,9 +org.apache.lucene.analysis.it.TestItalianAnalyzer=1176,1091,1033,1204 +org.apache.lucene.analysis.it.TestItalianLightStemFilter=427,268,449,493 +org.apache.lucene.analysis.lv.TestLatvianAnalyzer=655,605,760,451 +org.apache.lucene.analysis.lv.TestLatvianStemmer=35,20,29,26 +org.apache.lucene.analysis.miscellaneous.PatternAnalyzerTest=882,936,1032,832 +org.apache.lucene.analysis.miscellaneous.TestASCIIFoldingFilter=416,454,442,385 +org.apache.lucene.analysis.miscellaneous.TestCapitalizationFilter=294,479,376,593 +org.apache.lucene.analysis.miscellaneous.TestEmptyTokenStream=5,4,6,3 +org.apache.lucene.analysis.miscellaneous.TestHyphenatedWordsFilter=813,427,434,750 +org.apache.lucene.analysis.miscellaneous.TestKeepWordFilter=227,209,158,208 +org.apache.lucene.analysis.miscellaneous.TestKeywordMarkerFilter=7,6,5,5 +org.apache.lucene.analysis.miscellaneous.TestLengthFilter=21,4,5,6 +org.apache.lucene.analysis.miscellaneous.TestLimitTokenCountAnalyzer=129,152,173,190 +org.apache.lucene.analysis.miscellaneous.TestPerFieldAnalzyerWrapper=7,7,6,26 +org.apache.lucene.analysis.miscellaneous.TestPrefixAndSuffixAwareTokenFilter=18,39,26,12 +org.apache.lucene.analysis.miscellaneous.TestPrefixAwareTokenFilter=7,8,5,8 +org.apache.lucene.analysis.miscellaneous.TestRemoveDuplicatesTokenFilter=1223,1192,1311,1130 +org.apache.lucene.analysis.miscellaneous.TestSingleTokenTokenFilter=5,4,20,4 +org.apache.lucene.analysis.miscellaneous.TestStemmerOverrideFilter=4,4,4,4 +org.apache.lucene.analysis.miscellaneous.TestTrimFilter=572,423,380,498 +org.apache.lucene.analysis.miscellaneous.TestWordDelimiterFilter=3187,2709,3407,2942 +org.apache.lucene.analysis.ngram.EdgeNGramTokenFilterTest=3660,4136,3892,2591 +org.apache.lucene.analysis.ngram.EdgeNGramTokenizerTest=1927,2088,2489,1295 +org.apache.lucene.analysis.ngram.NGramTokenFilterTest=5941,4022,5224,5217 +org.apache.lucene.analysis.ngram.NGramTokenizerTest=12649,13480,11157,11566 +org.apache.lucene.analysis.nl.TestDutchStemmer=1493,1169,1590,1109 +org.apache.lucene.analysis.no.TestNorwegianAnalyzer=686,594,827,894 +org.apache.lucene.analysis.path.TestPathHierarchyTokenizer=674,631,649,565 +org.apache.lucene.analysis.path.TestReversePathHierarchyTokenizer=488,376,567,332 +org.apache.lucene.analysis.pattern.TestPatternReplaceCharFilter=1486,1425,1646,1598 +org.apache.lucene.analysis.pattern.TestPatternReplaceFilter=1159,1067,824,1219 +org.apache.lucene.analysis.pattern.TestPatternTokenizer=943,1426,1328,1555 +org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterTest=55,14,16,19 +org.apache.lucene.analysis.payloads.NumericPayloadTokenFilterTest=5,7,4,5 +org.apache.lucene.analysis.payloads.TokenOffsetPayloadTokenFilterTest=6,4,7,5 +org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilterTest=46,28,33,35 +org.apache.lucene.analysis.position.PositionFilterTest=17,10,13,14 +org.apache.lucene.analysis.pt.TestPortugueseAnalyzer=497,415,382,407 +org.apache.lucene.analysis.pt.TestPortugueseLightStemFilter=585,664,657,584 +org.apache.lucene.analysis.pt.TestPortugueseMinimalStemFilter=525,504,624,669 +org.apache.lucene.analysis.pt.TestPortugueseStemFilter=1264,1229,1397,1195 +org.apache.lucene.analysis.query.QueryAutoStopWordAnalyzerTest=206,272,382,330 +org.apache.lucene.analysis.reverse.TestReverseStringFilter=525,278,426,417 +org.apache.lucene.analysis.ro.TestRomanianAnalyzer=966,597,972,684 +org.apache.lucene.analysis.ru.TestRussianAnalyzer=612,725,520,454 +org.apache.lucene.analysis.ru.TestRussianLetterTokenizer=6,7,10,10 +org.apache.lucene.analysis.ru.TestRussianLightStemFilter=438,567,485,602 +org.apache.lucene.analysis.shingle.ShingleAnalyzerWrapperTest=657,645,557,594 +org.apache.lucene.analysis.shingle.ShingleFilterTest=732,782,903,813 +org.apache.lucene.analysis.sinks.DateRecognizerSinkTokenizerTest=40,20,44,15 +org.apache.lucene.analysis.sinks.TestTeeSinkTokenFilter=101,70,174,32 +org.apache.lucene.analysis.sinks.TokenRangeSinkTokenizerTest=3,6,4,6 +org.apache.lucene.analysis.sinks.TokenTypeSinkTokenizerTest=48,10,6,17 +org.apache.lucene.analysis.snowball.TestSnowball=44,97,41,166 +org.apache.lucene.analysis.snowball.TestSnowballVocab=3614,3519,3640,3611 +org.apache.lucene.analysis.sv.TestSwedishAnalyzer=576,1216,1640,697 +org.apache.lucene.analysis.sv.TestSwedishLightStemFilter=372,328,682,377 +org.apache.lucene.analysis.synonym.TestSolrSynonymParser=16,31,15,33 +org.apache.lucene.analysis.synonym.TestSynonymMapFilter=1614,1807,1181,1745 +org.apache.lucene.analysis.synonym.TestWordnetSynonymParser=7,36,35,7 +org.apache.lucene.analysis.th.TestThaiAnalyzer=477,454,489,434 +org.apache.lucene.analysis.tr.TestTurkishAnalyzer=656,857,623,697 +org.apache.lucene.analysis.tr.TestTurkishLowerCaseFilter=7,7,6,7 +org.apache.lucene.analysis.util.TestCharArrayIterator=179,262,165,197 +org.apache.lucene.analysis.util.TestCharArrayMap=70,133,99,92 +org.apache.lucene.analysis.util.TestCharArraySet=43,42,52,41 +org.apache.lucene.analysis.util.TestCharTokenizers=880,1051,979,734 +org.apache.lucene.analysis.util.TestCharacterUtils=17,16,13,13 +org.apache.lucene.analysis.util.TestSegmentingTokenizerBase=705,748,931,1103 +org.apache.lucene.analysis.util.TestWordlistLoader=7,6,13,7 +org.apache.lucene.analysis.wikipedia.WikipediaTokenizerTest=766,706,816,649 +org.apache.lucene.collation.TestCollationKeyAnalyzer=1456,1241,790,206 +org.apache.lucene.collation.TestCollationKeyFilter=478,425,596,668 +org.apache.lucene.analysis.icu.TestICUFoldingFilter=2056,2573,1483,2241 +org.apache.lucene.analysis.icu.TestICUNormalizer2Filter=2187,2335,1831,1592 +org.apache.lucene.analysis.icu.TestICUTransformFilter=2678,4159,3243,3388 +org.apache.lucene.analysis.icu.segmentation.TestCharArrayIterator=26,53,21,19 +org.apache.lucene.analysis.icu.segmentation.TestICUTokenizer=2399,3110,2330,2314 +org.apache.lucene.analysis.icu.segmentation.TestLaoBreakIterator=79,36,111,148 +org.apache.lucene.analysis.icu.segmentation.TestWithCJKBigramFilter=673,707,760,667 +org.apache.lucene.collation.TestICUCollationKeyAnalyzer=1340,681,3304,1934 +org.apache.lucene.collation.TestICUCollationKeyFilter=1100,1118,1489,927 +org.apache.lucene.analysis.kuromoji.SegmenterTest=319,248,268,310 +org.apache.lucene.analysis.kuromoji.TestExtendedMode=1506,1174,1537,1780 +org.apache.lucene.analysis.kuromoji.TestKuromojiAnalyzer=3668,3597,3703,3410 +org.apache.lucene.analysis.kuromoji.TestKuromojiBaseFormFilter=2908,3299,3001,2800 +org.apache.lucene.analysis.kuromoji.TestKuromojiTokenizer=4494,4640,4435,4159 +org.apache.lucene.analysis.kuromoji.TestSearchMode=494,580,501,520 +org.apache.lucene.analysis.kuromoji.dict.TestTokenInfoDictionary=1406,1319,1249,1180 +org.apache.lucene.analysis.kuromoji.dict.UserDictionaryTest=276,258,257,232 +org.apache.lucene.analysis.kuromoji.util.TestToStringUtil=17,14,9,24 +org.apache.lucene.analysis.morfologik.TestMorfologikAnalyzer=1490,1559,1676,2357 +org.apache.lucene.analysis.phonetic.DoubleMetaphoneFilterTest=1586,1861,1666,1821 +org.apache.lucene.analysis.phonetic.TestBeiderMorseFilter=952,792,1084,945 +org.apache.lucene.analysis.phonetic.TestPhoneticFilter=2858,2913,3046,2568 +org.apache.lucene.analysis.cn.smart.TestSmartChineseAnalyzer=2786,2596,2856,2701 +org.apache.lucene.analysis.pl.TestPolishAnalyzer=1618,1484,1492,1569 +org.egothor.stemmer.TestCompile=239,269,209,186 +org.egothor.stemmer.TestStemmer=199,210,224,227 +org.apache.lucene.benchmark.byTask.TestPerfTasksLogic=6924,7297,7622,7434 +org.apache.lucene.benchmark.byTask.TestPerfTasksParse=173,110,230,139 +org.apache.lucene.benchmark.byTask.feeds.DocMakerTest=319,392,327,275 +org.apache.lucene.benchmark.byTask.feeds.LineDocSourceTest=1097,1408,2193,2224 +org.apache.lucene.benchmark.byTask.feeds.TrecContentSourceTest=114,110,89,116 +org.apache.lucene.benchmark.byTask.feeds.demohtml.TestHtmlParser=261,254,244,282 +org.apache.lucene.benchmark.byTask.tasks.CreateIndexTaskTest=607,591,631,512 +org.apache.lucene.benchmark.byTask.tasks.PerfTaskTest=194,212,224,182 +org.apache.lucene.benchmark.byTask.tasks.SearchWithSortTaskTest=75,109,86,103 +org.apache.lucene.benchmark.byTask.tasks.WriteLineDocTaskTest=443,616,929,364 +org.apache.lucene.benchmark.byTask.tasks.alt.AltPackageTaskTest=83,68,80,104 +org.apache.lucene.benchmark.byTask.utils.StreamUtilsTest=406,412,380,379 +org.apache.lucene.benchmark.byTask.utils.TestConfig=189,190,179,208 +org.apache.lucene.benchmark.quality.TestQualityRun=3117,1965,3256,3455 +org.apache.lucene.facet.enhancements.EnhancementsPayloadIteratorTest=59,40,66,274 +org.apache.lucene.facet.enhancements.TwoEnhancementsTest=115,79,203,89 +org.apache.lucene.facet.enhancements.association.AssociationPropertyTest=8,7,6,8 +org.apache.lucene.facet.enhancements.association.CustomAssociationPropertyTest=33,40,42,28 +org.apache.lucene.facet.enhancements.params.DefaultEnhancementsIndexingParamsTest=4,7,4,5 +org.apache.lucene.facet.example.TestAdaptiveExample=52,30,41,67 +org.apache.lucene.facet.example.TestAssociationExample=234,368,263,249 +org.apache.lucene.facet.example.TestMultiCLExample=167,145,137,271 +org.apache.lucene.facet.example.TestSimpleExample=54,56,71,78 +org.apache.lucene.facet.index.CategoryContainerTest=28,224,33,48 +org.apache.lucene.facet.index.CategoryListPayloadStreamTest=5,5,5,5 +org.apache.lucene.facet.index.FacetsPayloadProcessorProviderTest=593,929,672,587 +org.apache.lucene.facet.index.attributes.CategoryAttributeImplTest=7,10,8,8 +org.apache.lucene.facet.index.attributes.CategoryAttributesIterableTest=4,5,4,5 +org.apache.lucene.facet.index.categorypolicy.OrdinalPolicyTest=370,1572,853,415 +org.apache.lucene.facet.index.categorypolicy.PathPolicyTest=1568,547,899,708 +org.apache.lucene.facet.index.params.CategoryListParamsTest=6,7,7,7 +org.apache.lucene.facet.index.params.DefaultFacetIndexingParamsTest=9,7,7,5 +org.apache.lucene.facet.index.params.PerDimensionIndexingParamsTest=16,17,18,15 +org.apache.lucene.facet.index.streaming.CategoryAttributesStreamTest=9,8,12,9 +org.apache.lucene.facet.index.streaming.CategoryParentsStreamTest=98,129,127,166 +org.apache.lucene.facet.index.streaming.CategoryTokenizerTest=311,115,29,28 +org.apache.lucene.facet.search.AdaptiveAccumulatorTest=22499,11832,13654,10852 +org.apache.lucene.facet.search.CategoryListIteratorTest=307,413,284,289 +org.apache.lucene.facet.search.DrillDownTest=88,94,148,94 +org.apache.lucene.facet.search.SamplingWrapperTest=23585,14646,14012,15905 +org.apache.lucene.facet.search.TestCategoryListCache=57,58,59,123 +org.apache.lucene.facet.search.TestFacetArrays=3,3,2,3 +org.apache.lucene.facet.search.TestFacetsAccumulatorWithComplement=198,186,249,181 +org.apache.lucene.facet.search.TestMultipleCategoryLists=574,343,326,354 +org.apache.lucene.facet.search.TestScoredDocIdCollector=125,104,59,68 +org.apache.lucene.facet.search.TestTopKInEachNodeResultHandler=2707,605,816,972 +org.apache.lucene.facet.search.TestTopKResultsHandler=298,565,254,1042 +org.apache.lucene.facet.search.TestTopKResultsHandlerRandom=9714,7837,8206,14089 +org.apache.lucene.facet.search.TestTotalFacetCounts=287,1307,202,271 +org.apache.lucene.facet.search.TestTotalFacetCountsCache=1568,3359,3670,3162 +org.apache.lucene.facet.search.association.AssociationsFacetRequestTest=303,112,112,246 +org.apache.lucene.facet.search.params.FacetRequestTest=63,47,43,74 +org.apache.lucene.facet.search.params.FacetSearchParamsTest=32,307,40,45 +org.apache.lucene.facet.search.params.MultiIteratorsPerCLParamsTest=56,56,108,71 +org.apache.lucene.facet.search.sampling.SamplingAccumulatorTest=17217,17997,16760,19828 +org.apache.lucene.facet.taxonomy.TestCategoryPath=296,287,268,268 +org.apache.lucene.facet.taxonomy.TestTaxonomyCombined=1924,2478,1944,2179 +org.apache.lucene.facet.taxonomy.directory.TestAddTaxonomies=592,571,2415,1608 +org.apache.lucene.facet.taxonomy.directory.TestDirectoryTaxonomyReader=905,996,835,943 +org.apache.lucene.facet.taxonomy.directory.TestDirectoryTaxonomyWriter=172,110,89,276 +org.apache.lucene.facet.taxonomy.directory.TestIndexClose=2324,3257,2841,2855 +org.apache.lucene.facet.taxonomy.writercache.cl2o.TestCharBlockArray=1094,1019,1165,1022 +org.apache.lucene.facet.taxonomy.writercache.cl2o.TestCompactLabelToOrdinal=891,813,1331,836 +org.apache.lucene.facet.util.TestScoredDocIDsUtils=1435,1312,1768,993 +org.apache.lucene.util.UnsafeByteArrayInputStreamTest=233,253,272,218 +org.apache.lucene.util.UnsafeByteArrayOutputStreamTest=18,21,18,23 +org.apache.lucene.util.Vint8Test=12,9,12,15 +org.apache.lucene.util.collections.ArrayHashMapTest=97,100,33,89 +org.apache.lucene.util.collections.FloatToObjectMapTest=32,35,31,31 +org.apache.lucene.util.collections.IntArrayTest=6,6,8,12 +org.apache.lucene.util.collections.IntHashSetTest=81,59,62,53 +org.apache.lucene.util.collections.IntToDoubleMapTest=70,70,68,66 +org.apache.lucene.util.collections.IntToIntMapTest=49,42,34,34 +org.apache.lucene.util.collections.IntToObjectMapTest=165,28,73,61 +org.apache.lucene.util.collections.ObjectToFloatMapTest=68,57,79,72 +org.apache.lucene.util.collections.ObjectToIntMapTest=41,36,44,57 +org.apache.lucene.util.collections.TestLRUHashMap=8,4,5,4 +org.apache.lucene.util.encoding.EncodingTest=221,197,174,203 +org.apache.lucene.search.grouping.AllGroupHeadsCollectorTest=3657,3132,4805,4304 +org.apache.lucene.search.grouping.AllGroupsCollectorTest=456,430,426,444 +org.apache.lucene.search.grouping.TestGrouping=5285,5376,5745,5765 +org.apache.lucene.search.join.TestBlockJoin=3159,4003,2733,3472 +org.apache.lucene.search.join.TestJoinUtil=5935,6234,6195,7757 +org.apache.lucene.queries.BooleanFilterTest=908,1097,970,728 +org.apache.lucene.queries.BoostingQueryTest=216,201,213,197 +org.apache.lucene.queries.ChainedFilterTest=1250,1188,1264,1057 +org.apache.lucene.queries.TermsFilterTest=501,684,516,487 +org.apache.lucene.queries.TestCustomScoreQuery=1913,2408,3202,2145 +org.apache.lucene.queries.function.TestFieldScoreQuery=515,402,500,420 +org.apache.lucene.queries.function.TestOrdValues=563,644,648,670 +org.apache.lucene.queries.mlt.TestMoreLikeThis=104,46,74,103 +org.apache.lucene.queryparser.analyzing.TestAnalyzingQueryParser=24,16,19,26 +org.apache.lucene.queryparser.classic.TestMultiAnalyzer=20,28,20,16 +org.apache.lucene.queryparser.classic.TestMultiFieldQueryParser=270,287,289,181 +org.apache.lucene.queryparser.classic.TestMultiPhraseQueryParsing=6,5,8,11 +org.apache.lucene.queryparser.classic.TestQueryParser=1152,998,1010,1173 +org.apache.lucene.queryparser.complexPhrase.TestComplexPhraseQuery=126,121,93,286 +org.apache.lucene.queryparser.ext.TestExtendableQueryParser=611,526,511,566 +org.apache.lucene.queryparser.ext.TestExtensions=35,66,36,55 +org.apache.lucene.queryparser.flexible.core.builders.TestQueryTreeBuilder=45,8,7,9 +org.apache.lucene.queryparser.flexible.core.nodes.TestQueryNode=210,263,231,222 +org.apache.lucene.queryparser.flexible.messages.TestNLS=56,23,30,33 +org.apache.lucene.queryparser.flexible.precedence.TestPrecedenceQueryParser=387,476,528,392 +org.apache.lucene.queryparser.flexible.spans.TestSpanQueryParser=18,17,46,23 +org.apache.lucene.queryparser.flexible.spans.TestSpanQueryParserSimpleSample=9,27,12,10 +org.apache.lucene.queryparser.flexible.standard.TestMultiAnalyzerQPHelper=23,74,20,22 +org.apache.lucene.queryparser.flexible.standard.TestMultiFieldQPHelper=198,96,96,116 +org.apache.lucene.queryparser.flexible.standard.TestNumericQueryParser=556,462,506,612 +org.apache.lucene.queryparser.flexible.standard.TestQPHelper=763,803,1054,799 +org.apache.lucene.queryparser.surround.query.SrndQueryTest=461,516,420,424 +org.apache.lucene.queryparser.surround.query.Test01Exceptions=302,221,283,220 +org.apache.lucene.queryparser.surround.query.Test02Boolean=396,389,336,338 +org.apache.lucene.queryparser.surround.query.Test03Distance=1024,958,1189,1165 +org.apache.lucene.queryparser.xml.TestParser=1149,1577,1520,1789 +org.apache.lucene.queryparser.xml.TestQueryTemplateManager=401,293,332,279 +org.apache.lucene.queryparser.xml.builders.TestNumericRangeFilterBuilder=93,112,127,115 +org.apache.lucene.queryparser.xml.builders.TestNumericRangeQueryBuilder=62,56,66,65 +org.apache.lucene.search.spell.TestDirectSpellChecker=544,604,516,663 +org.apache.lucene.search.spell.TestJaroWinklerDistance=7,10,8,7 +org.apache.lucene.search.spell.TestLevenshteinDistance=6,10,12,9 +org.apache.lucene.search.spell.TestLuceneDictionary=760,641,666,766 +org.apache.lucene.search.spell.TestNGramDistance=21,21,17,24 +org.apache.lucene.search.spell.TestPlainTextDictionary=356,296,269,324 +org.apache.lucene.search.spell.TestSpellChecker=3390,4686,4593,4629 +org.apache.lucene.search.suggest.LookupBenchmarkTest=14,9,14,12 +org.apache.lucene.search.suggest.PersistenceTest=41,38,43,42 +org.apache.lucene.search.suggest.fst.BytesRefSortersTest=324,331,295,288 +org.apache.lucene.search.suggest.fst.FSTCompletionTest=624,630,702,584 +org.apache.lucene.search.suggest.fst.FloatMagicTest=9,12,15,10 +org.apache.lucene.search.suggest.fst.TestSort=3757,4325,4061,4481 +org.apache.solr.analysis.TestICUCollationKeyFilterFactory=1033,876,858,854 +org.apache.solr.analysis.TestICUCollationKeyRangeQueries=278,273,311,236 +org.apache.solr.analysis.TestICUFoldingFilterFactory=319,291,301,285 +org.apache.solr.analysis.TestICUNormalizer2FilterFactory=334,323,311,272 +org.apache.solr.analysis.TestICUTokenizerFactory=156,150,156,493 +org.apache.solr.analysis.TestICUTransformFilterFactory=984,935,811,837 +org.apache.solr.analysis.TestSmartChineseFactories=441,474,430,450 +org.apache.solr.analysis.TestStempelPolishStemFilterFactory=235,151,176,157 +org.apache.solr.schema.TestICUCollationField=2491,2367,2309,232 +org.apache.solr.schema.TestICUCollationFieldOptions=2065,1893,1916,1745 +org.apache.solr.handler.extraction.ExtractingRequestHandlerTest=3112,3082,3114,3071 +org.apache.solr.handler.clustering.ClusteringComponentTest=2498,2606,2458,2459 +org.apache.solr.handler.clustering.DistributedClusteringComponentTest=6889,9315,8222,5929 +org.apache.solr.handler.clustering.carrot2.CarrotClusteringEngineTest=2927,3022,2956,2837 +org.apache.solr.handler.dataimport.TestMailEntityProcessor=200,196,203,209 +org.apache.solr.handler.dataimport.TestTikaEntityProcessor=2454,2286,2330,2375 +org.apache.solr.handler.dataimport.TestCachedSqlEntityProcessor=35,32,16,31 +org.apache.solr.handler.dataimport.TestClobTransformer=5,7,5,5 +org.apache.solr.handler.dataimport.TestContentStreamDataSource=1990,2014,1987,2000 +org.apache.solr.handler.dataimport.TestContextImpl=6,5,7,6 +org.apache.solr.handler.dataimport.TestDataConfig=149,146,149,142 +org.apache.solr.handler.dataimport.TestDateFormatTransformer=6,6,5,5 +org.apache.solr.handler.dataimport.TestDocBuilder=17,16,16,16 +org.apache.solr.handler.dataimport.TestDocBuilder2=301,300,292,289 +org.apache.solr.handler.dataimport.TestDocBuilderThreaded=1341,1346,1369,1311 +org.apache.solr.handler.dataimport.TestEntityProcessorBase=5,5,5,4 +org.apache.solr.handler.dataimport.TestEphemeralCache=159,156,150,152 +org.apache.solr.handler.dataimport.TestErrorHandling=261,246,239,237 +org.apache.solr.handler.dataimport.TestEvaluatorBag=6,6,6,7 +org.apache.solr.handler.dataimport.TestFieldReader=18,16,15,16 +org.apache.solr.handler.dataimport.TestFileListEntityProcessor=45,46,46,45 +org.apache.solr.handler.dataimport.TestJdbcDataSource=43,39,39,39 +org.apache.solr.handler.dataimport.TestLineEntityProcessor=7,7,7,6 +org.apache.solr.handler.dataimport.TestNumberFormatTransformer=13,15,13,13 +org.apache.solr.handler.dataimport.TestPlainTextEntityProcessor=7,7,7,7 +org.apache.solr.handler.dataimport.TestRegexTransformer=6,15,10,5 +org.apache.solr.handler.dataimport.TestScriptTransformer=98,43,44,44 +org.apache.solr.handler.dataimport.TestSolrEntityProcessorEndToEnd=7135,7080,6979,7025 +org.apache.solr.handler.dataimport.TestSolrEntityProcessorUnit=9,8,9,9 +org.apache.solr.handler.dataimport.TestSortedMapBackedCache=27,25,26,24 +org.apache.solr.handler.dataimport.TestSqlEntityProcessor=11,9,9,9 +org.apache.solr.handler.dataimport.TestSqlEntityProcessor2=260,258,243,264 +org.apache.solr.handler.dataimport.TestSqlEntityProcessorDelta=579,574,585,581 +org.apache.solr.handler.dataimport.TestSqlEntityProcessorDelta2=612,632,477,460 +org.apache.solr.handler.dataimport.TestSqlEntityProcessorDelta3=328,332,324,320 +org.apache.solr.handler.dataimport.TestSqlEntityProcessorDeltaPrefixedPk=280,269,274,311 +org.apache.solr.handler.dataimport.TestTemplateString=3,3,2,4 +org.apache.solr.handler.dataimport.TestTemplateTransformer=3,3,4,3 +org.apache.solr.handler.dataimport.TestThreaded=179,180,179,178 +org.apache.solr.handler.dataimport.TestURLDataSource=4,6,4,3 +org.apache.solr.handler.dataimport.TestVariableResolver=13,11,13,12 +org.apache.solr.handler.dataimport.TestXPathEntityProcessor=413,390,397,423 +org.apache.solr.handler.dataimport.TestXPathRecordReader=36,36,35,36 +org.apache.solr.update.processor.LangDetectLanguageIdentifierUpdateProcessorFactoryTest=2033,2090,2026,2078 +org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactoryTest=2267,2271,2217,2231 +org.apache.solr.uima.processor.UIMAUpdateRequestProcessorTest=3940,3844,4066,3891 +org.apache.solr.velocity.VelocityResponseWriterTest=975,972,968,978 +org.apache.solr.BasicFunctionalityTest=1500,1334,1232,1102 +org.apache.solr.ConvertedLegacyTest=2435,1872,2131,2620 +org.apache.solr.DisMaxRequestHandlerTest=459,523,446,603 +org.apache.solr.EchoParamsTest=93,93,91,81 +org.apache.solr.MinimalSchemaTest=173,151,162,151 +org.apache.solr.OutputWriterTest=193,374,199,230 +org.apache.solr.SampleTest=220,221,233,302 +org.apache.solr.SolrInfoMBeanTest=1481,434,1136,1501 +org.apache.solr.TestDistributedGrouping=14182,13937,13655,14416 +org.apache.solr.TestDistributedSearch=16279,14823,15468,15004 +org.apache.solr.TestGroupingSearch=4797,5620,5990,6053 +org.apache.solr.TestJoin=7374,7450,7478,6796 +org.apache.solr.TestPluginEnable=71,54,56,58 +org.apache.solr.TestSolrCoreProperties=109,183,86,212 +org.apache.solr.TestTrie=2342,2345,2368,2558 +org.apache.solr.analysis.CommonGramsFilterFactoryTest=21,8,11,20 +org.apache.solr.analysis.CommonGramsQueryFilterFactoryTest=6,7,6,8 +org.apache.solr.analysis.DoubleMetaphoneFilterFactoryTest=11,13,7,12 +org.apache.solr.analysis.LegacyHTMLStripCharFilterTest=194,88,98,159 +org.apache.solr.analysis.LengthFilterTest=6,5,6,7 +org.apache.solr.analysis.SnowballPorterFilterFactoryTest=10,11,12,9 +org.apache.solr.analysis.TestArabicFilters=8,10,11,6 +org.apache.solr.analysis.TestBeiderMorseFilterFactory=331,303,237,330 +org.apache.solr.analysis.TestBrazilianStemFilterFactory=6,4,7,17 +org.apache.solr.analysis.TestBulgarianStemFilterFactory=3,3,4,3 +org.apache.solr.analysis.TestCJKBigramFilterFactory=4,4,3,4 +org.apache.solr.analysis.TestCJKTokenizerFactory=4,9,5,8 +org.apache.solr.analysis.TestCJKWidthFilterFactory=3,5,3,4 +org.apache.solr.analysis.TestCapitalizationFilterFactory=7,12,8,10 +org.apache.solr.analysis.TestChineseFilterFactory=4,3,3,2 +org.apache.solr.analysis.TestChineseTokenizerFactory=6,5,5,6 +org.apache.solr.analysis.TestCollationKeyFilterFactory=118,93,109,109 +org.apache.solr.analysis.TestCollationKeyRangeQueries=149,105,164,105 +org.apache.solr.analysis.TestCzechStemFilterFactory=4,3,3,2 +org.apache.solr.analysis.TestDelimitedPayloadTokenFilterFactory=4,6,4,4 +org.apache.solr.analysis.TestDictionaryCompoundWordTokenFilterFactory=15,5,5,8 +org.apache.solr.analysis.TestElisionFilterFactory=5,8,6,5 +org.apache.solr.analysis.TestEnglishMinimalStemFilterFactory=2,2,3,3 +org.apache.solr.analysis.TestFinnishLightStemFilterFactory=10,6,5,9 +org.apache.solr.analysis.TestFrenchLightStemFilterFactory=7,3,8,8 +org.apache.solr.analysis.TestFrenchMinimalStemFilterFactory=3,6,4,3 +org.apache.solr.analysis.TestGalicianMinimalStemFilterFactory=13,14,14,16 +org.apache.solr.analysis.TestGalicianStemFilterFactory=22,20,27,23 +org.apache.solr.analysis.TestGermanLightStemFilterFactory=5,3,6,3 +org.apache.solr.analysis.TestGermanMinimalStemFilterFactory=4,3,3,3 +org.apache.solr.analysis.TestGermanNormalizationFilterFactory=3,2,3,4 +org.apache.solr.analysis.TestGermanStemFilterFactory=5,4,3,4 +org.apache.solr.analysis.TestGreekLowerCaseFilterFactory=3,2,3,2 +org.apache.solr.analysis.TestGreekStemFilterFactory=6,5,4,5 +org.apache.solr.analysis.TestHTMLStripCharFilterFactory=51,6,9,6 +org.apache.solr.analysis.TestHindiFilters=12,7,8,8 +org.apache.solr.analysis.TestHungarianLightStemFilterFactory=13,3,7,12 +org.apache.solr.analysis.TestHunspellStemFilterFactory=7,8,7,8 +org.apache.solr.analysis.TestHyphenationCompoundWordTokenFilterFactory=80,79,74,84 +org.apache.solr.analysis.TestIndonesianStemFilterFactory=6,4,3,7 +org.apache.solr.analysis.TestItalianLightStemFilterFactory=2,3,4,3 +org.apache.solr.analysis.TestKStemFilterFactory=103,98,100,109 +org.apache.solr.analysis.TestKeepFilterFactory=7,5,7,5 +org.apache.solr.analysis.TestKeywordMarkerFilterFactory=7,5,8,7 +org.apache.solr.analysis.TestKuromojiBaseFormFilterFactory=385,235,442,485 +org.apache.solr.analysis.TestKuromojiPartOfSpeechStopFilterFactory=206,201,195,216 +org.apache.solr.analysis.TestKuromojiTokenizerFactory=324,299,13,213 +org.apache.solr.analysis.TestLatvianStemFilterFactory=11,8,32,14 +org.apache.solr.analysis.TestLuceneMatchVersion=154,134,217,136 +org.apache.solr.analysis.TestMappingCharFilterFactory=3,3,2,4 +org.apache.solr.analysis.TestMultiWordSynonyms=12,10,11,9 +org.apache.solr.analysis.TestNGramFilters=12,18,12,12 +org.apache.solr.analysis.TestPatternReplaceCharFilterFactory=16,5,8,16 +org.apache.solr.analysis.TestPatternReplaceFilterFactory=6,5,5,6 +org.apache.solr.analysis.TestPatternTokenizerFactory=5,5,5,6 +org.apache.solr.analysis.TestPersianNormalizationFilterFactory=5,5,5,5 +org.apache.solr.analysis.TestPhoneticFilterFactory=20120,5958,14344,15767 +org.apache.solr.analysis.TestPorterStemFilterFactory=18,4,4,6 +org.apache.solr.analysis.TestPortugueseLightStemFilterFactory=8,7,8,8 +org.apache.solr.analysis.TestPortugueseMinimalStemFilterFactory=16,11,22,18 +org.apache.solr.analysis.TestPortugueseStemFilterFactory=8,8,11,15 +org.apache.solr.analysis.TestRemoveDuplicatesTokenFilterFactory=2,3,2,1 +org.apache.solr.analysis.TestReverseStringFilterFactory=3,3,3,2 +org.apache.solr.analysis.TestReversedWildcardFilterFactory=464,393,542,591 +org.apache.solr.analysis.TestRussianFilters=4,5,3,3 +org.apache.solr.analysis.TestRussianLightStemFilterFactory=10,11,8,8 +org.apache.solr.analysis.TestShingleFilterFactory=20,15,14,23 +org.apache.solr.analysis.TestSlowSynonymFilter=18,12,17,14 +org.apache.solr.analysis.TestSpanishLightStemFilterFactory=7,4,8,4 +org.apache.solr.analysis.TestStandardFactories=15,27,19,15 +org.apache.solr.analysis.TestStemmerOverrideFilterFactory=5,7,4,4 +org.apache.solr.analysis.TestStopFilterFactory=4,7,5,5 +org.apache.solr.analysis.TestSwedishLightStemFilterFactory=5,3,8,8 +org.apache.solr.analysis.TestSynonymFilterFactory=17,10,14,10 +org.apache.solr.analysis.TestSynonymMap=8,8,7,8 +org.apache.solr.analysis.TestThaiWordFilterFactory=19,29,28,24 +org.apache.solr.analysis.TestTrimFilterFactory=10,3,6,3 +org.apache.solr.analysis.TestTurkishLowerCaseFilterFactory=4,4,8,4 +org.apache.solr.analysis.TestTypeTokenFilterFactory=12,7,8,7 +org.apache.solr.analysis.TestUAX29URLEmailTokenizerFactory=51,46,48,67 +org.apache.solr.analysis.TestWikipediaTokenizerFactory=7,6,6,9 +org.apache.solr.analysis.TestWordDelimiterFilterFactory=909,900,822,830 +org.apache.solr.cloud.BasicDistributedZkTest=23174,23981,24200,23322 +org.apache.solr.cloud.BasicZkTest=9193,9055,9447,10470 +org.apache.solr.cloud.ChaosMonkeyNothingIsSafeTest=1,0,1,1 +org.apache.solr.cloud.ChaosMonkeySafeLeaderTest=1,0,0,1 +org.apache.solr.cloud.CloudStateTest=5,4,4,3 +org.apache.solr.cloud.CloudStateUpdateTest=11657,12138,13721,13338 +org.apache.solr.cloud.FullSolrCloudDistribCmdsTest=52827,44720,50511,51658 +org.apache.solr.cloud.FullSolrCloudTest=0,1,1,1 +org.apache.solr.cloud.LeaderElectionIntegrationTest=25451,24310,23188,23681 +org.apache.solr.cloud.LeaderElectionTest=34997,20373,17821,24752 +org.apache.solr.cloud.OverseerTest=29472,28619,29386,29074 +org.apache.solr.cloud.RecoveryZkTest=25932,27672,26705,26491 +org.apache.solr.cloud.TestHashPartitioner=4678,5325,5437,4308 +org.apache.solr.cloud.ZkControllerTest=7251,7097,7069,7015 +org.apache.solr.cloud.ZkNodePropsTest=10,12,9,12 +org.apache.solr.cloud.ZkSolrClientTest=15964,15673,15688,15912 +org.apache.solr.core.AlternateDirectoryTest=396,309,349,472 +org.apache.solr.core.IndexReaderFactoryTest=470,506,635,452 +org.apache.solr.core.PluginInfoTest=24,34,61,34 +org.apache.solr.core.RAMDirectoryFactoryTest=2,2,3,3 +org.apache.solr.core.RequestHandlersTest=409,648,631,717 +org.apache.solr.core.ResourceLoaderTest=24,24,29,25 +org.apache.solr.core.SOLR749Test=299,277,271,341 +org.apache.solr.core.SolrCoreTest=2185,2198,2247,2191 +org.apache.solr.core.TestArbitraryIndexDir=704,441,457,481 +org.apache.solr.core.TestBadConfig=4,6,5,7 +org.apache.solr.core.TestCodecSupport=68,73,71,84 +org.apache.solr.core.TestConfig=109,108,108,103 +org.apache.solr.core.TestCoreContainer=1266,1268,1275,1212 +org.apache.solr.core.TestJmxIntegration=1745,1683,1579,1663 +org.apache.solr.core.TestJmxMonitoredMap=690,76,184,158 +org.apache.solr.core.TestLegacyMergeSchedulerPolicyConfig=310,315,294,393 +org.apache.solr.core.TestMergePolicyConfig=292,332,284,297 +org.apache.solr.core.TestPropInject=711,699,619,723 +org.apache.solr.core.TestPropInjectDefaults=384,418,445,564 +org.apache.solr.core.TestQuerySenderListener=450,306,352,303 +org.apache.solr.core.TestQuerySenderNoQuery=357,327,304,349 +org.apache.solr.core.TestSolrDeletionPolicy1=609,667,606,668 +org.apache.solr.core.TestSolrDeletionPolicy2=1273,364,846,1457 +org.apache.solr.core.TestSolrXMLSerializer=46,64,57,46 +org.apache.solr.core.TestXIncludeConfig=339,298,320,315 +org.apache.solr.handler.BinaryUpdateRequestHandlerTest=414,454,349,401 +org.apache.solr.handler.CSVRequestHandlerTest=1187,377,944,1423 +org.apache.solr.handler.DocumentAnalysisRequestHandlerTest=732,823,727,751 +org.apache.solr.handler.FieldAnalysisRequestHandlerTest=354,363,459,415 +org.apache.solr.handler.JsonLoaderTest=359,410,340,520 +org.apache.solr.handler.MoreLikeThisHandlerTest=2497,2699,2495,2473 +org.apache.solr.handler.StandardRequestHandlerTest=1569,1623,1609,1603 +org.apache.solr.handler.TestCSVLoader=589,540,486,489 +org.apache.solr.handler.TestReplicationHandler=22484,24030,23080,23374 +org.apache.solr.handler.XmlUpdateRequestHandlerTest=569,503,731,612 +org.apache.solr.handler.XsltUpdateRequestHandlerTest=856,863,892,892 +org.apache.solr.handler.admin.CoreAdminHandlerTest=753,871,795,993 +org.apache.solr.handler.admin.LukeRequestHandlerTest=1118,1473,1204,1189 +org.apache.solr.handler.admin.ShowFileRequestHandlerTest=1335,1137,1149,1356 +org.apache.solr.handler.admin.SystemInfoHandlerTest=1,1,1,2 +org.apache.solr.handler.component.BadComponentTest=426,401,526,568 +org.apache.solr.handler.component.DebugComponentTest=503,575,573,821 +org.apache.solr.handler.component.DistributedSpellCheckComponentTest=9117,11526,10039,11602 +org.apache.solr.handler.component.DistributedTermsComponentTest=10076,9811,9628,9013 +org.apache.solr.handler.component.QueryElevationComponentTest=4732,4506,4729,4419 +org.apache.solr.handler.component.SearchHandlerTest=495,386,531,403 +org.apache.solr.handler.component.SpellCheckComponentTest=5842,4847,3472,3600 +org.apache.solr.handler.component.StatsComponentTest=1715,2068,1992,1701 +org.apache.solr.handler.component.TermVectorComponentTest=484,410,388,375 +org.apache.solr.handler.component.TermsComponentTest=1595,608,1169,2046 +org.apache.solr.highlight.FastVectorHighlighterTest=647,632,609,492 +org.apache.solr.highlight.HighlighterConfigTest=353,344,288,583 +org.apache.solr.highlight.HighlighterTest=1014,851,980,1013 +org.apache.solr.request.JSONWriterTest=427,442,382,731 +org.apache.solr.request.SimpleFacetsTest=3676,4663,4225,3916 +org.apache.solr.request.TestBinaryResponseWriter=664,787,674,977 +org.apache.solr.request.TestFaceting=6576,7198,7028,7421 +org.apache.solr.request.TestRemoteStreaming=892,893,819,1245 +org.apache.solr.request.TestWriterPerf=1503,507,1236,1449 +org.apache.solr.response.TestCSVResponseWriter=352,599,511,405 +org.apache.solr.response.TestPHPSerializedResponseWriter=524,533,767,544 +org.apache.solr.schema.BadIndexSchemaTest=1999,1993,1930,2216 +org.apache.solr.schema.CopyFieldTest=1023,291,814,704 +org.apache.solr.schema.DateFieldTest=16,17,12,10 +org.apache.solr.schema.IndexSchemaRuntimeFieldTest=442,420,368,327 +org.apache.solr.schema.IndexSchemaTest=439,449,470,522 +org.apache.solr.schema.MultiTermTest=460,184,365,495 +org.apache.solr.schema.NotRequiredUniqueKeyTest=123,121,118,121 +org.apache.solr.schema.NumericFieldsTest=257,307,264,244 +org.apache.solr.schema.PolyFieldTest=619,822,894,731 +org.apache.solr.schema.RequiredFieldsTest=463,518,490,531 +org.apache.solr.schema.TestBinaryField=212,224,229,231 +org.apache.solr.schema.TestCollationField=9,296,454,380 +org.apache.solr.schema.TestOmitPositions=554,582,629,823 +org.apache.solr.schema.UUIDFieldTest=253,254,266,286 +org.apache.solr.search.QueryParsingTest=378,345,418,480 +org.apache.solr.search.SpatialFilterTest=644,862,652,855 +org.apache.solr.search.TestDocSet=687,625,601,795 +org.apache.solr.search.TestExtendedDismaxParser=419,477,476,493 +org.apache.solr.search.TestFastLRUCache=43,55,27,57 +org.apache.solr.search.TestFiltering=1695,1762,2065,2350 +org.apache.solr.search.TestFoldingMultitermQuery=701,509,667,509 +org.apache.solr.search.TestIndexSearcher=565,528,620,558 +org.apache.solr.search.TestLFUCache=300,332,292,358 +org.apache.solr.search.TestLRUCache=6,6,6,6 +org.apache.solr.search.TestPseudoReturnFields=1054,1145,1323,1178 +org.apache.solr.search.TestQueryTypes=558,723,610,554 +org.apache.solr.search.TestQueryUtils=486,434,408,539 +org.apache.solr.search.TestRangeQuery=3052,3155,3319,3579 +org.apache.solr.search.TestRealTimeGet=38908,10904,37602,36269 +org.apache.solr.search.TestRecovery=13035,4203,10753,13229 +org.apache.solr.search.TestSearchPerf=205,330,243,197 +org.apache.solr.search.TestSolrQueryParser=444,349,322,549 +org.apache.solr.search.TestSort=2493,2740,2523,2678 +org.apache.solr.search.TestSurroundQueryParser=1635,430,1089,1417 +org.apache.solr.search.TestValueSourceCache=501,499,652,558 +org.apache.solr.search.function.SortByFunctionTest=953,813,978,866 +org.apache.solr.search.function.TestFunctionQuery=2923,3714,2794,5056 +org.apache.solr.search.function.distance.DistanceFunctionTest=894,706,634,596 +org.apache.solr.search.similarities.TestBM25SimilarityFactory=115,131,107,107 +org.apache.solr.search.similarities.TestDFRSimilarityFactory=67,76,112,96 +org.apache.solr.search.similarities.TestDefaultSimilarityFactory=187,75,170,158 +org.apache.solr.search.similarities.TestIBSimilarityFactory=234,68,122,177 +org.apache.solr.search.similarities.TestLMDirichletSimilarityFactory=1272,1300,1120,1175 +org.apache.solr.search.similarities.TestLMJelinekMercerSimilarityFactory=68,55,57,58 +org.apache.solr.search.similarities.TestPerFieldSimilarity=70,74,71,85 +org.apache.solr.servlet.CacheHeaderTest=682,714,660,824 +org.apache.solr.servlet.DirectSolrConnectionTest=154,168,168,294 +org.apache.solr.servlet.NoCacheHeaderTest=775,770,915,713 +org.apache.solr.servlet.SolrRequestParserTest=1732,1613,1548,2117 +org.apache.solr.spelling.DirectSolrSpellCheckerTest=1679,488,1159,1079 +org.apache.solr.spelling.FileBasedSpellCheckerTest=468,672,574,488 +org.apache.solr.spelling.IndexBasedSpellCheckerTest=1367,1364,1401,1352 +org.apache.solr.spelling.SpellCheckCollatorTest=889,708,912,964 +org.apache.solr.spelling.SpellPossibilityIteratorTest=38,37,33,34 +org.apache.solr.spelling.SpellingQueryConverterTest=6,6,7,9 +org.apache.solr.spelling.suggest.SuggesterFSTTest=2825,2835,2856,2738 +org.apache.solr.spelling.suggest.SuggesterTSTTest=632,855,796,879 +org.apache.solr.spelling.suggest.SuggesterTest=1980,605,1516,1304 +org.apache.solr.update.AutoCommitTest=9271,9145,9368,9550 +org.apache.solr.update.DirectUpdateHandlerOptimizeTest=500,411,403,723 +org.apache.solr.update.DirectUpdateHandlerTest=3903,2586,3609,5868 +org.apache.solr.update.DocumentBuilderTest=966,621,903,1161 +org.apache.solr.update.PeerSyncTest=3435,3504,3263,3222 +org.apache.solr.update.SoftAutoCommitTest=10112,9973,10083,9950 +org.apache.solr.update.SolrCmdDistributorTest=1604,1613,1471,1342 +org.apache.solr.update.TestIndexingPerformance=1260,530,1222,1755 +org.apache.solr.update.UpdateParamsTest=332,354,372,530 +org.apache.solr.update.processor.FieldMutatingUpdateProcessorTest=608,722,527,650 +org.apache.solr.update.processor.SignatureUpdateProcessorFactoryTest=1282,1245,1174,1191 +org.apache.solr.update.processor.UniqFieldsUpdateProcessorFactoryTest=940,365,1044,1078 +org.apache.solr.update.processor.UpdateRequestProcessorFactoryTest=378,332,375,381 +org.apache.solr.util.DateMathParserTest=38,37,42,40 +org.apache.solr.util.PrimUtilsTest=27,29,24,27 +org.apache.solr.util.SolrPluginUtilsTest=517,533,500,522 +org.apache.solr.util.TestNumberUtils=210,250,290,354 +org.apache.solr.util.TestUtils=7,8,8,9 +org.apache.solr.client.solrj.SolrExampleBinaryTest=7643,7512,6962,7091 +org.apache.solr.client.solrj.SolrExceptionTest=367,353,311,358 +org.apache.solr.client.solrj.SolrQueryTest=16,13,16,14 +org.apache.solr.client.solrj.TestBatchUpdate=4209,4045,4004,4079 +org.apache.solr.client.solrj.TestLBHttpSolrServer=8879,9830,9004,7846 +org.apache.solr.client.solrj.beans.TestDocumentObjectBinder=26,31,36,36 +org.apache.solr.client.solrj.embedded.JettyWebappTest=4743,4566,4392,4602 +org.apache.solr.client.solrj.embedded.LargeVolumeBinaryJettyTest=847,976,1062,825 +org.apache.solr.client.solrj.embedded.LargeVolumeEmbeddedTest=1221,1333,1358,1091 +org.apache.solr.client.solrj.embedded.LargeVolumeJettyTest=1319,1275,1364,1296 +org.apache.solr.client.solrj.embedded.MergeIndexesEmbeddedTest=717,775,734,667 +org.apache.solr.client.solrj.embedded.MultiCoreEmbeddedTest=420,415,367,381 +org.apache.solr.client.solrj.embedded.MultiCoreExampleJettyTest=735,952,968,943 +org.apache.solr.client.solrj.embedded.SolrExampleEmbeddedTest=4953,4762,4825,4703 +org.apache.solr.client.solrj.embedded.SolrExampleJettyTest=4491,4580,4337,4405 +org.apache.solr.client.solrj.embedded.SolrExampleStreamingBinaryTest=9544,9591,9796,9684 +org.apache.solr.client.solrj.embedded.SolrExampleStreamingTest=10177,9896,10353,10507 +org.apache.solr.client.solrj.embedded.TestEmbeddedSolrServer=349,546,621,406 +org.apache.solr.client.solrj.embedded.TestSolrProperties=403,366,400,357 +org.apache.solr.client.solrj.request.TestUpdateRequestCodec=8,6,8,6 +org.apache.solr.client.solrj.response.AnlysisResponseBaseTest=4,5,5,5 +org.apache.solr.client.solrj.response.DocumentAnalysisResponseTest=5,5,6,7 +org.apache.solr.client.solrj.response.FacetFieldTest=4,4,4,6 +org.apache.solr.client.solrj.response.FieldAnalysisResponseTest=7,3,3,4 +org.apache.solr.client.solrj.response.QueryResponseTest=29,40,31,29 +org.apache.solr.client.solrj.response.TermsResponseTest=2499,2741,2620,2502 +org.apache.solr.client.solrj.response.TestSpellCheckResponse=1118,1091,1060,985 +org.apache.solr.client.solrj.util.ClientUtilsTest=4,5,7,2 +org.apache.solr.common.SolrDocumentTest=10,15,19,9 +org.apache.solr.common.params.ModifiableSolrParamsTest=9,9,10,11 +org.apache.solr.common.params.SolrParamTest=217,202,212,222 +org.apache.solr.common.util.ContentStreamTest=220,287,387,183 +org.apache.solr.common.util.DOMUtilTest=16,20,16,18 +org.apache.solr.common.util.FileUtilsTest=3,3,3,4 +org.apache.solr.common.util.IteratorChainTest=11,9,12,14 +org.apache.solr.common.util.NamedListTest=8,7,3,3 +org.apache.solr.common.util.TestFastInputStream=7,6,22,4 +org.apache.solr.common.util.TestHash=44,65,67,53 +org.apache.solr.common.util.TestJavaBinCodec=110,127,295,157 +org.apache.solr.common.util.TestNamedListCodec=958,817,1042,878 +org.apache.solr.common.util.TestSystemIdResolver=4,6,4,4 +org.apache.solr.common.util.TestXMLEscaping=10,9,12,9 diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java index 66fe05b..9a880a9 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java @@ -96,6 +96,6 @@ public class TestArabicAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new ArabicAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new ArabicAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java index 9296e31..8eb7634 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java @@ -76,6 +76,6 @@ public class TestBulgarianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new BulgarianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new BulgarianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java modules/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java index 5d3b42e..177af4b 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java @@ -160,6 +160,6 @@ public class TestBrazilianStemmer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new BrazilianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new BrazilianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } \ No newline at end of file diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/ca/TestCatalanAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/ca/TestCatalanAnalyzer.java index 2680819..7435dc0 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/ca/TestCatalanAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/ca/TestCatalanAnalyzer.java @@ -58,6 +58,6 @@ public class TestCatalanAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new CatalanAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new CatalanAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java index 6b89fae..9c694e2 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterTest.java @@ -508,7 +508,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { }; int numRounds = RANDOM_MULTIPLIER * 10000; - checkRandomData(random, analyzer, numRounds); + checkRandomData(getRandom(), analyzer, numRounds); } public void testServerSideIncludes() throws Exception { @@ -777,7 +777,7 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { public void testRandomBrokenHTML() throws Exception { int maxNumElements = 10000; - String text = _TestUtil.randomHtmlishString(random, maxNumElements); + String text = _TestUtil.randomHtmlishString(getRandom(), maxNumElements); Reader reader = new HTMLStripCharFilter (CharReader.get(new StringReader(text))); while (reader.read() != -1); @@ -789,11 +789,11 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { int maxNumWords = 10000; int minWordLength = 3; int maxWordLength = 20; - int numWords = _TestUtil.nextInt(random, minNumWords, maxNumWords); - switch (_TestUtil.nextInt(random, 0, 4)) { + int numWords = _TestUtil.nextInt(getRandom(), minNumWords, maxNumWords); + switch (_TestUtil.nextInt(getRandom(), 0, 4)) { case 0: { for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { - text.append(_TestUtil.randomUnicodeString(random, maxWordLength)); + text.append(_TestUtil.randomUnicodeString(getRandom(), maxWordLength)); text.append(' '); } break; @@ -801,14 +801,14 @@ public class HTMLStripCharFilterTest extends BaseTokenStreamTestCase { case 1: { for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { text.append(_TestUtil.randomRealisticUnicodeString - (random, minWordLength, maxWordLength)); + (getRandom(), minWordLength, maxWordLength)); text.append(' '); } break; } default: { // ASCII 50% of the time for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { - text.append(_TestUtil.randomSimpleString(random)); + text.append(_TestUtil.randomSimpleString(getRandom())); text.append(' '); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java index 9740baf..3c7af77 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilter.java @@ -188,6 +188,6 @@ public class TestMappingCharFilter extends BaseTokenStreamTestCase { }; int numRounds = RANDOM_MULTIPLIER * 10000; - checkRandomData(random, analyzer, numRounds); + checkRandomData(getRandom(), analyzer, numRounds); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKAnalyzer.java index 75d2147..9519512 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKAnalyzer.java @@ -270,6 +270,6 @@ public class TestCJKAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new CJKAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new CJKAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java index b391088..88c4135 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java @@ -276,6 +276,6 @@ public class TestCJKTokenizer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new CJKAnalyzer(Version.LUCENE_30), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new CJKAnalyzer(Version.LUCENE_30), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKWidthFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKWidthFilter.java index 4a2efe3..58399c6 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKWidthFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKWidthFilter.java @@ -62,6 +62,6 @@ public class TestCJKWidthFilter extends BaseTokenStreamTestCase { } public void testRandomData() throws IOException { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/cn/TestChineseTokenizer.java modules/analysis/common/src/test/org/apache/lucene/analysis/cn/TestChineseTokenizer.java index 29d1aeb..59542aa 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/cn/TestChineseTokenizer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/cn/TestChineseTokenizer.java @@ -120,7 +120,7 @@ public class TestChineseTokenizer extends BaseTokenStreamTestCase /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new ChineseAnalyzer(), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new ChineseAnalyzer(), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java modules/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java index 56ff278..e21ef0f 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java @@ -319,7 +319,7 @@ public class CommonGramsFilterTest extends BaseTokenStreamTestCase { } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); Analyzer b = new Analyzer() { @@ -331,6 +331,6 @@ public class CommonGramsFilterTest extends BaseTokenStreamTestCase { } }; - checkRandomData(random, b, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), b, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java index eba1d1f..7e2348a 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java @@ -346,7 +346,7 @@ public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new DictionaryCompoundWordTokenFilter(TEST_VERSION_CURRENT, tokenizer, dict)); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); InputSource is = new InputSource(getClass().getResource("da_UTF8.xml").toExternalForm()); final HyphenationTree hyphenator = HyphenationCompoundWordTokenFilter.getHyphenationTree(is); @@ -359,6 +359,6 @@ public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, filter); } }; - checkRandomData(random, b, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), b, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java index 3f3974d..9d8ae45 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java @@ -210,9 +210,9 @@ public class TestAnalyzers extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); - checkRandomData(random, new SimpleAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); - checkRandomData(random, new StopAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new SimpleAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new StopAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java index 5ad0358..e26b16f 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java @@ -313,6 +313,6 @@ public class TestClassicAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new ClassicAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new ClassicAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java index 8b5c691..e5f79d2 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestDuelingAnalyzers.java @@ -63,7 +63,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase { } public void testLetterAscii() throws Exception { - Analyzer left = new MockAnalyzer(random, jvmLetter, false); + Analyzer left = new MockAnalyzer(getRandom(), jvmLetter, false); Analyzer right = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { @@ -72,14 +72,14 @@ public class TestDuelingAnalyzers extends LuceneTestCase { } }; for (int i = 0; i < 10000; i++) { - String s = _TestUtil.randomSimpleString(random); + String s = _TestUtil.randomSimpleString(getRandom()); assertEquals(s, left.tokenStream("foo", new StringReader(s)), right.tokenStream("foo", new StringReader(s))); } } public void testLetterUnicode() throws Exception { - Analyzer left = new MockAnalyzer(random, jvmLetter, false); + Analyzer left = new MockAnalyzer(getRandom(), jvmLetter, false); Analyzer right = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { @@ -88,7 +88,7 @@ public class TestDuelingAnalyzers extends LuceneTestCase { } }; for (int i = 0; i < 10000; i++) { - String s = _TestUtil.randomUnicodeString(random); + String s = _TestUtil.randomUnicodeString(getRandom()); assertEquals(s, left.tokenStream("foo", new StringReader(s)), right.tokenStream("foo", new StringReader(s))); } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java index 5f9f656..3560a01 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java @@ -95,7 +95,7 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase { writer.close(); IndexReader reader = IndexReader.open(dir); - DocsEnum td = _TestUtil.docs(random, + DocsEnum td = _TestUtil.docs(getRandom(), reader, "partnum", new BytesRef("Q36"), @@ -103,7 +103,7 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase { null, false); assertTrue(td.nextDoc() != DocsEnum.NO_MORE_DOCS); - td = _TestUtil.docs(random, + td = _TestUtil.docs(getRandom(), reader, "partnum", new BytesRef("Q37"), @@ -125,6 +125,6 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new KeywordAnalyzer(), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new KeywordAnalyzer(), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java index fa1a638..947f8bc 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java @@ -240,6 +240,6 @@ public class TestStandardAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new StandardAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new StandardAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java index 1af100c..9469ff3 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java @@ -447,6 +447,6 @@ public class TestUAX29URLEmailTokenizer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java index d728bc8..29253f2 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java @@ -70,6 +70,6 @@ public class TestCzechAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new CzechAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new CzechAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java index d39409a..56d8761 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java @@ -51,6 +51,6 @@ public class TestDanishAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new DanishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new DanishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java index 0565e7b..4bc35f6 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java @@ -66,6 +66,6 @@ public class TestGermanAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new GermanAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new GermanAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanLightStemFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanLightStemFilter.java index 3c3528d..1f0c8c3 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanLightStemFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanLightStemFilter.java @@ -47,6 +47,6 @@ public class TestGermanLightStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanMinimalStemFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanMinimalStemFilter.java index cc1a669..49a0890 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanMinimalStemFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanMinimalStemFilter.java @@ -59,6 +59,6 @@ public class TestGermanMinimalStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanNormalizationFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanNormalizationFilter.java index 4bdeaac..9fe1088 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanNormalizationFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanNormalizationFilter.java @@ -63,6 +63,6 @@ public class TestGermanNormalizationFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java index 27e9a84..d3361ab 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java @@ -59,6 +59,6 @@ public class TestGermanStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java modules/analysis/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java index 88e924e..8db93bf 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java @@ -90,6 +90,6 @@ public class GreekAnalyzerTest extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new GreekAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new GreekAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java index baf3ba7..41df759 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java @@ -55,6 +55,6 @@ public class TestEnglishAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new EnglishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new EnglishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishMinimalStemFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishMinimalStemFilter.java index 43c2696..5627632 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishMinimalStemFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishMinimalStemFilter.java @@ -53,6 +53,6 @@ public class TestEnglishMinimalStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemmer.java modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemmer.java index 3449f81..6c27a22 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemmer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemmer.java @@ -40,7 +40,7 @@ public class TestKStemmer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } /** diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestPorterStemFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestPorterStemFilter.java index 0aec8d6..acad007 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestPorterStemFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestPorterStemFilter.java @@ -62,6 +62,6 @@ public class TestPorterStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java index e4fd9f6..4035aad 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java @@ -51,6 +51,6 @@ public class TestSpanishAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new SpanishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new SpanishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishLightStemFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishLightStemFilter.java index daaca46..e0d8187 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishLightStemFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishLightStemFilter.java @@ -47,6 +47,6 @@ public class TestSpanishLightStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/eu/TestBasqueAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/eu/TestBasqueAnalyzer.java index 8461d54..798bc8a 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/eu/TestBasqueAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/eu/TestBasqueAnalyzer.java @@ -51,6 +51,6 @@ public class TestBasqueAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new BasqueAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new BasqueAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java index 9e4022a..a66e870 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java @@ -224,6 +224,6 @@ public class TestPersianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new PersianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new PersianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java index 0c24d8b..dc7859a 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java @@ -51,6 +51,6 @@ public class TestFinnishAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new FinnishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new FinnishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishLightStemFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishLightStemFilter.java index 5cd6455..f16fe66 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishLightStemFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishLightStemFilter.java @@ -47,6 +47,6 @@ public class TestFinnishLightStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java index c395246..69e8aa3 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java @@ -262,6 +262,6 @@ public class TestFrenchAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new FrenchAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new FrenchAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchLightStemFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchLightStemFilter.java index 57eb8ad..bb1a82c 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchLightStemFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchLightStemFilter.java @@ -161,6 +161,6 @@ public class TestFrenchLightStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchMinimalStemFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchMinimalStemFilter.java index e6fb11f..b33ced0 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchMinimalStemFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchMinimalStemFilter.java @@ -61,6 +61,6 @@ public class TestFrenchMinimalStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianAnalyzer.java index 8f9ea8e..889e8ea 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianAnalyzer.java @@ -51,6 +51,6 @@ public class TestGalicianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new GalicianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new GalicianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianMinimalStemFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianMinimalStemFilter.java index f8be16e..8e85738 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianMinimalStemFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianMinimalStemFilter.java @@ -50,6 +50,6 @@ public class TestGalicianMinimalStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java index 393226d..5fb5d8f 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java @@ -47,6 +47,6 @@ public class TestHindiAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new HindiAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new HindiAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/hu/TestHungarianAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/hu/TestHungarianAnalyzer.java index 5f7e223..da52aed 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/hu/TestHungarianAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/hu/TestHungarianAnalyzer.java @@ -51,6 +51,6 @@ public class TestHungarianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new HungarianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new HungarianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellStemFilterTest.java modules/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellStemFilterTest.java index a5cb1ef..8d5f508 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellStemFilterTest.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/hunspell/HunspellStemFilterTest.java @@ -71,6 +71,6 @@ public class HunspellStemFilterTest extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new HunspellStemFilter(tokenizer, DICTIONARY)); } }; - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/hy/TestArmenianAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/hy/TestArmenianAnalyzer.java index 7bb72c7..f9434dc 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/hy/TestArmenianAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/hy/TestArmenianAnalyzer.java @@ -51,6 +51,6 @@ public class TestArmenianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new ArmenianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new ArmenianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/id/TestIndonesianAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/id/TestIndonesianAnalyzer.java index 0967ed6..57bfcfc 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/id/TestIndonesianAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/id/TestIndonesianAnalyzer.java @@ -51,6 +51,6 @@ public class TestIndonesianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new IndonesianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new IndonesianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianAnalyzer.java index 079ce8f..c100507 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianAnalyzer.java @@ -54,7 +54,7 @@ public class TestItalianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new ItalianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new ItalianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } /** test that the elisionfilter is working */ diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianLightStemFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianLightStemFilter.java index 90f9616..a581f68 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianLightStemFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianLightStemFilter.java @@ -47,6 +47,6 @@ public class TestItalianLightStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianAnalyzer.java index 2f7ff13..69c0ad4 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianAnalyzer.java @@ -51,6 +51,6 @@ public class TestLatvianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new LatvianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new LatvianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java index 3b1b229..11bfa0b 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java @@ -152,7 +152,7 @@ public class PatternAnalyzerTest extends BaseTokenStreamTestCase { try { Thread.getDefaultUncaughtExceptionHandler(); - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } catch (ArrayIndexOutOfBoundsException ex) { assumeTrue("not failing due to jre bug ", !isJREBug7104012(ex)); throw ex; // otherwise rethrow diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestASCIIFoldingFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestASCIIFoldingFilter.java index 27e8d05..c41a1fd 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestASCIIFoldingFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestASCIIFoldingFilter.java @@ -1921,6 +1921,6 @@ public class TestASCIIFoldingFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new ASCIIFoldingFilter(tokenizer)); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilter.java index 95f3e6a..d779917 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilter.java @@ -131,6 +131,6 @@ public class TestCapitalizationFilter extends BaseTokenStreamTestCase { } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestHyphenatedWordsFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestHyphenatedWordsFilter.java index 54931a8..c246688 100755 --- modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestHyphenatedWordsFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestHyphenatedWordsFilter.java @@ -72,6 +72,6 @@ public class TestHyphenatedWordsFilter extends BaseTokenStreamTestCase { } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java index 667617a..749e583 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepWordFilter.java @@ -77,6 +77,6 @@ public class TestKeepWordFilter extends BaseTokenStreamTestCase { } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java index 61f6b7a..bbdaa43 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java @@ -50,7 +50,7 @@ public class TestLimitTokenCountAnalyzer extends BaseTokenStreamTestCase { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( - TEST_VERSION_CURRENT, new LimitTokenCountAnalyzer(new MockAnalyzer(random), 100000))); + TEST_VERSION_CURRENT, new LimitTokenCountAnalyzer(new MockAnalyzer(getRandom()), 100000))); Document doc = new Document(); StringBuilder b = new StringBuilder(); diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java index 2effc37..6342d4b 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilter.java @@ -127,7 +127,7 @@ public class TestRemoveDuplicatesTokenFilter extends BaseTokenStreamTestCase { // some helper methods for the below test with synonyms private String randomNonEmptyString() { while(true) { - final String s = _TestUtil.randomUnicodeString(random).trim(); + final String s = _TestUtil.randomUnicodeString(getRandom()).trim(); if (s.length() != 0 && s.indexOf('\u0000') == -1) { return s; } @@ -144,13 +144,13 @@ public class TestRemoveDuplicatesTokenFilter extends BaseTokenStreamTestCase { public void testRandomStrings() throws Exception { final int numIters = atLeast(10); for (int i = 0; i < numIters; i++) { - SynonymMap.Builder b = new SynonymMap.Builder(random.nextBoolean()); + SynonymMap.Builder b = new SynonymMap.Builder(getRandom().nextBoolean()); final int numEntries = atLeast(10); for (int j = 0; j < numEntries; j++) { - add(b, randomNonEmptyString(), randomNonEmptyString(), random.nextBoolean()); + add(b, randomNonEmptyString(), randomNonEmptyString(), getRandom().nextBoolean()); } final SynonymMap map = b.build(); - final boolean ignoreCase = random.nextBoolean(); + final boolean ignoreCase = getRandom().nextBoolean(); final Analyzer analyzer = new Analyzer() { @Override @@ -161,7 +161,7 @@ public class TestRemoveDuplicatesTokenFilter extends BaseTokenStreamTestCase { } }; - checkRandomData(random, analyzer, 1000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 1000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java index 5c53da0..157d421 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilter.java @@ -118,7 +118,7 @@ public class TestTrimFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new TrimFilter(tokenizer, false)); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); Analyzer b = new Analyzer() { @@ -128,6 +128,6 @@ public class TestTrimFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new TrimFilter(tokenizer, true)); } }; - checkRandomData(random, b, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), b, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java index 4f64510..ddb05cd 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java @@ -303,9 +303,9 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase { public void testRandomStrings() throws Exception { int numIterations = atLeast(5); for (int i = 0; i < numIterations; i++) { - final int flags = random.nextInt(512); + final int flags = getRandom().nextInt(512); final CharArraySet protectedWords; - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { protectedWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet(Arrays.asList("a", "b", "cd")), false); } else { protectedWords = null; @@ -319,7 +319,7 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new WordDelimiterFilter(tokenizer, flags, protectedWords)); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java index e46fd52..7931f2a 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java @@ -140,7 +140,7 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase { new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.FRONT, 2, 15)); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); Analyzer b = new Analyzer() { @Override @@ -150,6 +150,6 @@ public class EdgeNGramTokenFilterTest extends BaseTokenStreamTestCase { new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.BACK, 2, 15)); } }; - checkRandomData(random, b, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), b, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java index 5d4976f..c48c783 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java @@ -109,7 +109,7 @@ public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, tokenizer); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); Analyzer b = new Analyzer() { @Override @@ -118,6 +118,6 @@ public class EdgeNGramTokenizerTest extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, tokenizer); } }; - checkRandomData(random, b, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), b, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java index ec93076..065e89d 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java @@ -130,6 +130,6 @@ public class NGramTokenFilterTest extends BaseTokenStreamTestCase { new NGramTokenFilter(tokenizer, 2, 15)); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java index 49e00a8..2ad9573 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java @@ -98,6 +98,6 @@ public class NGramTokenizerTest extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, tokenizer); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java modules/analysis/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java index cd91cbc..f099640 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java @@ -194,7 +194,7 @@ public class TestDutchStemmer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new DutchAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new DutchAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } \ No newline at end of file diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java index acf6e0b9..0d51136 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java @@ -51,6 +51,6 @@ public class TestNorwegianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new NorwegianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new NorwegianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java index 41b78c5..a2c4344 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestPathHierarchyTokenizer.java @@ -206,6 +206,6 @@ public class TestPathHierarchyTokenizer extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, tokenizer); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestReversePathHierarchyTokenizer.java modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestReversePathHierarchyTokenizer.java index ada772a..9b12ca1 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestReversePathHierarchyTokenizer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/path/TestReversePathHierarchyTokenizer.java @@ -168,6 +168,6 @@ public class TestReversePathHierarchyTokenizer extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, tokenizer); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilter.java index f05c5aa..e4db501 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilter.java @@ -190,6 +190,6 @@ public class TestPatternReplaceCharFilter extends BaseTokenStreamTestCase { return new PatternReplaceCharFilter(Pattern.compile("a"), "b", CharReader.get(reader)); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceFilter.java index 3efb23f..0a03a54 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceFilter.java @@ -91,7 +91,7 @@ public class TestPatternReplaceFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, filter); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); Analyzer b = new Analyzer() { @Override @@ -101,7 +101,7 @@ public class TestPatternReplaceFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, filter); } }; - checkRandomData(random, b, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), b, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizer.java modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizer.java index 66c0881..52de629 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizer.java @@ -137,7 +137,7 @@ public class TestPatternTokenizer extends BaseTokenStreamTestCase return new TokenStreamComponents(tokenizer, tokenizer); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); Analyzer b = new Analyzer() { @Override @@ -151,6 +151,6 @@ public class TestPatternTokenizer extends BaseTokenStreamTestCase return new TokenStreamComponents(tokenizer, tokenizer); } }; - checkRandomData(random, b, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), b, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java index 8c96b2b..eabff55 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java @@ -51,6 +51,6 @@ public class TestPortugueseAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new PortugueseAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new PortugueseAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilter.java index a5b6ec2..ceac462 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilter.java @@ -94,6 +94,6 @@ public class TestPortugueseLightStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilter.java index 1e6afe8..b2b50a9 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilter.java @@ -68,6 +68,6 @@ public class TestPortugueseMinimalStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilter.java index c71c8d6..22b1e4a 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilter.java @@ -68,6 +68,6 @@ public class TestPortugueseStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java index 47b0f11..c4dab62 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java @@ -41,7 +41,7 @@ public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase { public void setUp() throws Exception { super.setUp(); dir = new RAMDirectory(); - appAnalyzer = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + appAnalyzer = new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false); IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, appAnalyzer)); int numDocs = 200; for (int i = 0; i < numDocs; i++) { @@ -132,7 +132,7 @@ public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase { public void testTokenStream() throws Exception { QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer( TEST_VERSION_CURRENT, - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), reader, 10); + new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false), reader, 10); TokenStream ts = a.tokenStream("repetitiveField", new StringReader("this boring")); assertTokenStreamContents(ts, new String[] { "this" }); } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java index 15cf33f..38552bd 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java @@ -109,6 +109,6 @@ public class TestReverseStringFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new ReverseStringFilter(TEST_VERSION_CURRENT, tokenizer)); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java index 10bfa43..562b8a1 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java @@ -51,6 +51,6 @@ public class TestRomanianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new RomanianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new RomanianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java index b0534e8..0e5b069 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java @@ -67,6 +67,6 @@ public class TestRussianAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new RussianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new RussianAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java index 015a772..eec8692 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java @@ -47,6 +47,6 @@ public class TestRussianLightStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java index a3c4878..521f548 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java @@ -55,7 +55,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { @Override public void setUp() throws Exception { super.setUp(); - analyzer = new ShingleAnalyzerWrapper(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), 2); + analyzer = new ShingleAnalyzerWrapper(new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false), 2); directory = newDirectory(); IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); @@ -142,7 +142,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { } public void testReusableTokenStream() throws Exception { - Analyzer a = new ShingleAnalyzerWrapper(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), 2); + Analyzer a = new ShingleAnalyzerWrapper(new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false), 2); assertAnalyzesToReuse(a, "please divide into shingles", new String[] { "please", "please divide", "divide", "divide into", "into", "into shingles", "shingles" }, new int[] { 0, 0, 7, 7, 14, 14, 19 }, @@ -157,7 +157,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { public void testNonDefaultMinShingleSize() throws Exception { ShingleAnalyzerWrapper analyzer - = new ShingleAnalyzerWrapper(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), 3, 4); + = new ShingleAnalyzerWrapper(new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false), 3, 4); assertAnalyzesToReuse(analyzer, "please divide this sentence into shingles", new String[] { "please", "please divide this", "please divide this sentence", "divide", "divide this sentence", "divide this sentence into", @@ -170,7 +170,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { new int[] { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1 }); analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), 3, 4, ShingleFilter.TOKEN_SEPARATOR, false, false); + new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false), 3, 4, ShingleFilter.TOKEN_SEPARATOR, false, false); assertAnalyzesToReuse(analyzer, "please divide this sentence into shingles", new String[] { "please divide this", "please divide this sentence", "divide this sentence", "divide this sentence into", @@ -183,7 +183,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { public void testNonDefaultMinAndSameMaxShingleSize() throws Exception { ShingleAnalyzerWrapper analyzer - = new ShingleAnalyzerWrapper(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), 3, 3); + = new ShingleAnalyzerWrapper(new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false), 3, 3); assertAnalyzesToReuse(analyzer, "please divide this sentence into shingles", new String[] { "please", "please divide this", "divide", "divide this sentence", @@ -196,7 +196,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { new int[] { 1, 0, 1, 0, 1, 0, 1, 0, 1, 1 }); analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), 3, 3, ShingleFilter.TOKEN_SEPARATOR, false, false); + new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false), 3, 3, ShingleFilter.TOKEN_SEPARATOR, false, false); assertAnalyzesToReuse(analyzer, "please divide this sentence into shingles", new String[] { "please divide this", "divide this sentence", @@ -209,7 +209,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { public void testNoTokenSeparator() throws Exception { ShingleAnalyzerWrapper analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), + new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false), ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, "", true, false); @@ -223,7 +223,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { new int[] { 1, 0, 1, 0, 1, 0, 1 }); analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), + new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false), ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, "", false, false); @@ -238,7 +238,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { public void testNullTokenSeparator() throws Exception { ShingleAnalyzerWrapper analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), + new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false), ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, null, true, false); @@ -252,7 +252,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { new int[] { 1, 0, 1, 0, 1, 0, 1 }); analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), + new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false), ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, "", false, false); @@ -266,7 +266,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { } public void testAltTokenSeparator() throws Exception { ShingleAnalyzerWrapper analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), + new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false), ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, "", true, false); @@ -280,7 +280,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { new int[] { 1, 0, 1, 0, 1, 0, 1 }); analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), + new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false), ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, "", false, false); @@ -295,7 +295,7 @@ public class ShingleAnalyzerWrapperTest extends BaseTokenStreamTestCase { public void testOutputUnigramsIfNoShinglesSingleToken() throws Exception { ShingleAnalyzerWrapper analyzer = new ShingleAnalyzerWrapper( - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), + new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false), ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, "", false, true); diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java index a288864..e4d4579 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java @@ -1142,6 +1142,6 @@ public class ShingleFilterTest extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new ShingleFilter(tokenizer)); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java index 01a7430..69b8665 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java @@ -86,7 +86,7 @@ public class TestTeeSinkTokenFilter extends BaseTokenStreamTestCase { // with BaseTokenStreamTestCase now... public void testEndOffsetPositionWithTeeSinkTokenFilter() throws Exception { Directory dir = newDirectory(); - Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer analyzer = new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); Document doc = new Document(); TokenStream tokenStream = analyzer.tokenStream("field", new StringReader("abcd ")); diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java index 15a1841..631934f 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java @@ -51,6 +51,6 @@ public class TestSwedishAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new SwedishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new SwedishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java index ae30b9b..584aeca 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java @@ -47,6 +47,6 @@ public class TestSwedishLightStemFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java index f958f01..0f8a93f 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java @@ -43,7 +43,7 @@ public class TestSolrSynonymParser extends BaseTokenStreamTestCase { "foo => baz\n" + "this test, that testing"; - SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random)); + SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(getRandom())); parser.add(new StringReader(testFile)); final SynonymMap map = parser.build(); @@ -76,7 +76,7 @@ public class TestSolrSynonymParser extends BaseTokenStreamTestCase { @Test(expected=ParseException.class) public void testInvalidDoubleMap() throws Exception { String testFile = "a => b => c"; - SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random)); + SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(getRandom())); parser.add(new StringReader(testFile)); } @@ -84,7 +84,7 @@ public class TestSolrSynonymParser extends BaseTokenStreamTestCase { @Test(expected=ParseException.class) public void testInvalidAnalyzesToNothingOutput() throws Exception { String testFile = "a => 1"; - SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random, MockTokenizer.SIMPLE, false)); + SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, false)); parser.add(new StringReader(testFile)); } @@ -92,7 +92,7 @@ public class TestSolrSynonymParser extends BaseTokenStreamTestCase { @Test(expected=ParseException.class) public void testInvalidAnalyzesToNothingInput() throws Exception { String testFile = "1 => a"; - SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random, MockTokenizer.SIMPLE, false)); + SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, false)); parser.add(new StringReader(testFile)); } @@ -117,7 +117,7 @@ public class TestSolrSynonymParser extends BaseTokenStreamTestCase { String testFile = "a\\=>a => b\\=>b\n" + "a\\,a => b\\,b"; - SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random, MockTokenizer.KEYWORD, false)); + SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(getRandom(), MockTokenizer.KEYWORD, false)); parser.add(new StringReader(testFile)); final SynonymMap map = parser.build(); Analyzer analyzer = new Analyzer() { diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java index 5c07a81..dd71741 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java @@ -186,7 +186,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { assert alphabetSize <= 26; char[] s = new char[2*length]; for(int charIDX=0;charIDX synMap = new HashMap(); final List syns = new ArrayList(); - final boolean dedup = random.nextBoolean(); + final boolean dedup = getRandom().nextBoolean(); if (VERBOSE) { System.out.println(" dedup=" + dedup); } b = new SynonymMap.Builder(dedup); for(int synIDX=0;synIDX(); synMap.put(synIn, s); - s.keepOrig = random.nextBoolean(); + s.keepOrig = getRandom().nextBoolean(); } - final String synOut = getRandomString('0', 10, _TestUtil.nextInt(random, 1, 5)).trim(); + final String synOut = getRandomString('0', 10, _TestUtil.nextInt(getRandom(), 1, 5)).trim(); s.out.add(synOut); add(synIn, synOut, s.keepOrig); if (VERBOSE) { @@ -395,7 +395,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { private String randomNonEmptyString() { while(true) { - final String s = _TestUtil.randomUnicodeString(random).trim(); + final String s = _TestUtil.randomUnicodeString(getRandom()).trim(); if (s.length() != 0 && s.indexOf('\u0000') == -1) { return s; } @@ -408,13 +408,13 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { public void testRandom2() throws Exception { final int numIters = atLeast(10); for (int i = 0; i < numIters; i++) { - b = new SynonymMap.Builder(random.nextBoolean()); + b = new SynonymMap.Builder(getRandom().nextBoolean()); final int numEntries = atLeast(10); for (int j = 0; j < numEntries; j++) { - add(randomNonEmptyString(), randomNonEmptyString(), random.nextBoolean()); + add(randomNonEmptyString(), randomNonEmptyString(), getRandom().nextBoolean()); } final SynonymMap map = b.build(); - final boolean ignoreCase = random.nextBoolean(); + final boolean ignoreCase = getRandom().nextBoolean(); final Analyzer analyzer = new Analyzer() { @Override @@ -424,7 +424,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { } }; - checkRandomData(random, analyzer, 1000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 1000*RANDOM_MULTIPLIER); } } @@ -434,7 +434,7 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase { "aaa => aaaa1 aaaa2 aaaa3\n" + "bbb => bbbb1 bbbb2\n"; - SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random)); + SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(getRandom())); parser.add(new StringReader(testFile)); final SynonymMap map = parser.build(); diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestWordnetSynonymParser.java modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestWordnetSynonymParser.java index ed3472e..497df95 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestWordnetSynonymParser.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestWordnetSynonymParser.java @@ -41,7 +41,7 @@ public class TestWordnetSynonymParser extends BaseTokenStreamTestCase { "s(100000004,2,'king''s meany',n,1,1).\n"; public void testSynonyms() throws Exception { - WordnetSynonymParser parser = new WordnetSynonymParser(true, true, new MockAnalyzer(random)); + WordnetSynonymParser parser = new WordnetSynonymParser(true, true, new MockAnalyzer(getRandom())); parser.add(new StringReader(synonymsFile)); final SynonymMap map = parser.build(); diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java index 7ab69c1..5ea81f7 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java @@ -164,7 +164,7 @@ public class TestThaiAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new ThaiAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new ThaiAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } // LUCENE-3044 diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java modules/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java index fce2e6f..89a41bc 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java @@ -51,6 +51,6 @@ public class TestTurkishAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new TurkishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new TurkishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayIterator.java modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayIterator.java index bfcab81..104c680 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayIterator.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayIterator.java @@ -35,7 +35,7 @@ public class TestCharArrayIterator extends LuceneTestCase { BreakIterator bi = BreakIterator.getWordInstance(); CharArrayIterator ci = CharArrayIterator.newWordInstance(); for (int i = 0; i < 10000; i++) { - char text[] = _TestUtil.randomUnicodeString(random).toCharArray(); + char text[] = _TestUtil.randomUnicodeString(getRandom()).toCharArray(); ci.setText(text, 0, text.length); consume(bi, ci); } @@ -63,7 +63,7 @@ public class TestCharArrayIterator extends LuceneTestCase { BreakIterator bi = BreakIterator.getSentenceInstance(); CharArrayIterator ci = CharArrayIterator.newSentenceInstance(); for (int i = 0; i < 10000; i++) { - char text[] = _TestUtil.randomUnicodeString(random).toCharArray(); + char text[] = _TestUtil.randomUnicodeString(getRandom()).toCharArray(); ci.setText(text, 0, text.length); consume(bi, ci); } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java index 9ff33aa..8156cfa 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestCharArrayMap.java @@ -30,15 +30,15 @@ public class TestCharArrayMap extends LuceneTestCase { char[] key; for (int i=0; iSMP and check that offsets are correct @@ -159,7 +159,7 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase { }; int num = 10000 * RANDOM_MULTIPLIER; for (int i = 0; i < num; i++) { - String s = _TestUtil.randomUnicodeString(random); + String s = _TestUtil.randomUnicodeString(getRandom()); TokenStream ts = analyzer.tokenStream("foo", new StringReader(s)); ts.reset(); OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class); @@ -174,6 +174,6 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase { ts.close(); } // just for fun - checkRandomData(random, analyzer, num); + checkRandomData(getRandom(), analyzer, num); } } diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestSegmentingTokenizerBase.java modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestSegmentingTokenizerBase.java index f3a28a3..c6a9270 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestSegmentingTokenizerBase.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/util/TestSegmentingTokenizerBase.java @@ -129,8 +129,8 @@ public class TestSegmentingTokenizerBase extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, sentence, 10000*RANDOM_MULTIPLIER); - checkRandomData(random, sentenceAndWord, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), sentence, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), sentenceAndWord, 10000*RANDOM_MULTIPLIER); } // some tokenizers for testing diff --git modules/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java modules/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java index c2b0d6c..f000aad 100644 --- modules/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java +++ modules/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerTest.java @@ -183,6 +183,6 @@ public class WikipediaTokenizerTest extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, tokenizer); } }; - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilter.java modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilter.java index dc0f264..d865e1d 100644 --- modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilter.java +++ modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilter.java @@ -76,6 +76,6 @@ public class TestICUFoldingFilter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2Filter.java modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2Filter.java index e81c02d..7c6d3039 100644 --- modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2Filter.java +++ modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2Filter.java @@ -76,6 +76,6 @@ public class TestICUNormalizer2Filter extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilter.java modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilter.java index 4d76615..b09dc50 100644 --- modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilter.java +++ modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilter.java @@ -98,6 +98,6 @@ public class TestICUTransformFilter extends BaseTokenStreamTestCase { return new TokenStreamComponents(tokenizer, new ICUTransformFilter(tokenizer, transform)); } }; - checkRandomData(random, a, 1000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 1000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java index c768e0f..56a1cd6 100644 --- modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java +++ modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java @@ -234,6 +234,6 @@ public class TestICUTokenizer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, a, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/kuromoji/TestExtendedMode.java modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/kuromoji/TestExtendedMode.java index ed9e2c1..9e2158c 100644 --- modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/kuromoji/TestExtendedMode.java +++ modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/kuromoji/TestExtendedMode.java @@ -51,7 +51,7 @@ public class TestExtendedMode extends BaseTokenStreamTestCase { public void testSurrogates2() throws IOException { int numIterations = atLeast(10000); for (int i = 0; i < numIterations; i++) { - String s = _TestUtil.randomUnicodeString(random, 100); + String s = _TestUtil.randomUnicodeString(getRandom(), 100); TokenStream ts = analyzer.tokenStream("foo", new StringReader(s)); CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); ts.reset(); diff --git modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/kuromoji/TestKuromojiAnalyzer.java modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/kuromoji/TestKuromojiAnalyzer.java index 4e6928d..c0fc97b 100644 --- modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/kuromoji/TestKuromojiAnalyzer.java +++ modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/kuromoji/TestKuromojiAnalyzer.java @@ -55,6 +55,6 @@ public class TestKuromojiAnalyzer extends BaseTokenStreamTestCase { * blast random strings against the analyzer */ public void testRandom() throws IOException { - checkRandomData(random, new KuromojiAnalyzer(TEST_VERSION_CURRENT), atLeast(10000)); + checkRandomData(getRandom(), new KuromojiAnalyzer(TEST_VERSION_CURRENT), atLeast(10000)); } } diff --git modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/kuromoji/TestKuromojiBaseFormFilter.java modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/kuromoji/TestKuromojiBaseFormFilter.java index 4d38ccf..31ff072 100644 --- modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/kuromoji/TestKuromojiBaseFormFilter.java +++ modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/kuromoji/TestKuromojiBaseFormFilter.java @@ -45,6 +45,6 @@ public class TestKuromojiBaseFormFilter extends BaseTokenStreamTestCase { } public void testRandomStrings() throws IOException { - checkRandomData(random, analyzer, atLeast(10000)); + checkRandomData(getRandom(), analyzer, atLeast(10000)); } } diff --git modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/kuromoji/TestKuromojiTokenizer.java modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/kuromoji/TestKuromojiTokenizer.java index f7514e9..d961851 100644 --- modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/kuromoji/TestKuromojiTokenizer.java +++ modules/analysis/kuromoji/src/test/org/apache/lucene/analysis/kuromoji/TestKuromojiTokenizer.java @@ -102,12 +102,12 @@ public class TestKuromojiTokenizer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, analyzer, 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 10000*RANDOM_MULTIPLIER); } public void testLargeDocReliability() throws Exception { for (int i = 0; i < 100; i++) { - String s = _TestUtil.randomUnicodeString(random, 10000); + String s = _TestUtil.randomUnicodeString(getRandom(), 10000); TokenStream ts = analyzer.tokenStream("foo", new StringReader(s)); ts.reset(); while (ts.incrementToken()) { @@ -125,7 +125,7 @@ public class TestKuromojiTokenizer extends BaseTokenStreamTestCase { public void testSurrogates2() throws IOException { int numIterations = atLeast(10000); for (int i = 0; i < numIterations; i++) { - String s = _TestUtil.randomUnicodeString(random, 100); + String s = _TestUtil.randomUnicodeString(getRandom(), 100); TokenStream ts = analyzer.tokenStream("foo", new StringReader(s)); CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class); ts.reset(); diff --git modules/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java modules/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java index c272b7d..03dc321 100644 --- modules/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java +++ modules/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikAnalyzer.java @@ -105,6 +105,6 @@ public class TestMorfologikAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandom() throws Exception { - checkRandomData(random, getTestAnalyzer(), 10000 * RANDOM_MULTIPLIER); + checkRandomData(getRandom(), getTestAnalyzer(), 10000 * RANDOM_MULTIPLIER); } } diff --git modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterTest.java modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterTest.java index 181e4f5..9ac7cb3 100644 --- modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterTest.java +++ modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterTest.java @@ -72,7 +72,7 @@ public class DoubleMetaphoneFilterTest extends BaseTokenStreamTestCase { } public void testRandom() throws Exception { - final int codeLen = _TestUtil.nextInt(random, 1, 8); + final int codeLen = _TestUtil.nextInt(getRandom(), 1, 8); Analyzer a = new Analyzer() { @Override @@ -82,7 +82,7 @@ public class DoubleMetaphoneFilterTest extends BaseTokenStreamTestCase { } }; - checkRandomData(random, a, 1000 * RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 1000 * RANDOM_MULTIPLIER); Analyzer b = new Analyzer() { @@ -93,6 +93,6 @@ public class DoubleMetaphoneFilterTest extends BaseTokenStreamTestCase { } }; - checkRandomData(random, b, 1000 * RANDOM_MULTIPLIER); + checkRandomData(getRandom(), b, 1000 * RANDOM_MULTIPLIER); } } diff --git modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilter.java modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilter.java index 9f9ef9d..6572727 100644 --- modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilter.java +++ modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilter.java @@ -89,6 +89,6 @@ public class TestBeiderMorseFilter extends BaseTokenStreamTestCase { @Ignore("broken: causes OOM on some strings (https://issues.apache.org/jira/browse/CODEC-132)") public void testRandom() throws Exception { - checkRandomData(random, analyzer, 1000 * RANDOM_MULTIPLIER); + checkRandomData(getRandom(), analyzer, 1000 * RANDOM_MULTIPLIER); } } diff --git modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilter.java modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilter.java index 674699f..5b18ff7 100644 --- modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilter.java +++ modules/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilter.java @@ -90,7 +90,7 @@ public class TestPhoneticFilter extends BaseTokenStreamTestCase { } }; - checkRandomData(random, a, 1000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), a, 1000*RANDOM_MULTIPLIER); Analyzer b = new Analyzer() { @Override @@ -100,7 +100,7 @@ public class TestPhoneticFilter extends BaseTokenStreamTestCase { } }; - checkRandomData(random, b, 1000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), b, 1000*RANDOM_MULTIPLIER); } } } diff --git modules/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java modules/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java index 78fe87f..8d7fdcd 100644 --- modules/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java +++ modules/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java @@ -221,6 +221,6 @@ public class TestSmartChineseAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new SmartChineseAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new SmartChineseAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/stempel/src/test/org/apache/lucene/analysis/pl/TestPolishAnalyzer.java modules/analysis/stempel/src/test/org/apache/lucene/analysis/pl/TestPolishAnalyzer.java index 21c5523..74cc45e 100644 --- modules/analysis/stempel/src/test/org/apache/lucene/analysis/pl/TestPolishAnalyzer.java +++ modules/analysis/stempel/src/test/org/apache/lucene/analysis/pl/TestPolishAnalyzer.java @@ -51,6 +51,6 @@ public class TestPolishAnalyzer extends BaseTokenStreamTestCase { /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { - checkRandomData(random, new PolishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); + checkRandomData(getRandom(), new PolishAnalyzer(TEST_VERSION_CURRENT), 10000*RANDOM_MULTIPLIER); } } diff --git modules/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java modules/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java index 0fee64a..166d142 100644 --- modules/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java +++ modules/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMABaseAnalyzerTest.java @@ -118,7 +118,7 @@ public class UIMABaseAnalyzerTest extends BaseTokenStreamTestCase { @Test public void testRandomStrings() throws Exception { - checkRandomData(random, new UIMABaseAnalyzer("/uima/TestAggregateSentenceAE.xml", "org.apache.lucene.uima.ts.TokenAnnotation"), + checkRandomData(getRandom(), new UIMABaseAnalyzer("/uima/TestAggregateSentenceAE.xml", "org.apache.lucene.uima.ts.TokenAnnotation"), 1000 * RANDOM_MULTIPLIER); } diff --git modules/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMATypeAwareAnalyzerTest.java modules/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMATypeAwareAnalyzerTest.java index 85035cc..00c8531 100644 --- modules/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMATypeAwareAnalyzerTest.java +++ modules/analysis/uima/src/test/org/apache/lucene/analysis/uima/UIMATypeAwareAnalyzerTest.java @@ -60,7 +60,7 @@ public class UIMATypeAwareAnalyzerTest extends BaseTokenStreamTestCase { @Test public void testRandomStrings() throws Exception { - checkRandomData(random, new UIMATypeAwareAnalyzer("/uima/TestAggregateSentenceAE.xml", + checkRandomData(getRandom(), new UIMATypeAwareAnalyzer("/uima/TestAggregateSentenceAE.xml", "org.apache.lucene.uima.ts.TokenAnnotation", "pos"), 1000 * RANDOM_MULTIPLIER); } diff --git modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java index 7c978c6..b416aa3 100755 --- modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java +++ modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java @@ -101,7 +101,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { assertTrue("Index does not exist?...!", DirectoryReader.indexExists(benchmark.getRunData().getDirectory())); // now we should be able to open the index for write. IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), - new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setOpenMode(OpenMode.APPEND)); iw.close(); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); @@ -188,7 +188,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { assertTrue("Index does not exist?...!", DirectoryReader.indexExists(benchmark.getRunData().getDirectory())); // now we should be able to open the index for write. - IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.APPEND)); iw.close(); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); assertEquals("100 docs were added to the index, this is what we expect to find!",100,ir.numDocs()); @@ -227,7 +227,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { assertTrue("Index does not exist?...!", DirectoryReader.indexExists(benchmark.getRunData().getDirectory())); // now we should be able to open the index for write. - IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.APPEND)); iw.close(); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); assertEquals("1000 docs were added to the index, this is what we expect to find!",1000,ir.numDocs()); @@ -300,7 +300,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { assertEquals("TestSearchTask was supposed to be called!",139,CountingSearchTestTask.numSearches); assertTrue("Index does not exist?...!", DirectoryReader.indexExists(benchmark.getRunData().getDirectory())); // now we should be able to open the index for write. - IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND)); + IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setOpenMode(OpenMode.APPEND)); iw.close(); IndexReader ir = IndexReader.open(benchmark.getRunData().getDirectory()); assertEquals("1 docs were added to the index, this is what we expect to find!",1,ir.numDocs()); @@ -430,7 +430,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { // now we should be able to open the index for write. IndexWriter iw = new IndexWriter(benchmark.getRunData().getDirectory(), - new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) + new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())) .setOpenMode(OpenMode.APPEND)); iw.close(); @@ -496,7 +496,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase { TermsEnum termsEnum = terms.iterator(null); DocsEnum docs = null; while(termsEnum.next() != null) { - docs = _TestUtil.docs(random, termsEnum, MultiFields.getLiveDocs(reader), docs, true); + docs = _TestUtil.docs(getRandom(), termsEnum, MultiFields.getLiveDocs(reader), docs, true); while(docs.nextDoc() != docs.NO_MORE_DOCS) { totalTokenCount2 += docs.freq(); } diff --git modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java index b5d5b54..bb3e790 100644 --- modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java +++ modules/facet/src/test/org/apache/lucene/facet/FacetTestBase.java @@ -157,7 +157,7 @@ public abstract class FacetTestBase extends LuceneTestCase { pair.taxoDir = newDirectory(); } - RandomIndexWriter iw = new RandomIndexWriter(random, pair.searchDir, getIndexWriterConfig(getAnalyzer())); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), pair.searchDir, getIndexWriterConfig(getAnalyzer())); TaxonomyWriter taxo = new DirectoryTaxonomyWriter(pair.taxoDir, OpenMode.CREATE); populateIndex(iw, taxo, getFacetIndexingParams(partitionSize)); @@ -242,7 +242,7 @@ public abstract class FacetTestBase extends LuceneTestCase { * Sub classes should override in order to test with different analyzer. */ protected Analyzer getAnalyzer() { - return new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + return new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false); } /** convenience method: convert sub results to an array */ @@ -285,7 +285,7 @@ public abstract class FacetTestBase extends LuceneTestCase { TermsEnum te = terms.iterator(null); DocsEnum de = null; while (te.next() != null) { - de = _TestUtil.docs(random, te, liveDocs, de, false); + de = _TestUtil.docs(getRandom(), te, liveDocs, de, false); int cnt = 0; while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { cnt++; diff --git modules/facet/src/test/org/apache/lucene/facet/enhancements/TwoEnhancementsTest.java modules/facet/src/test/org/apache/lucene/facet/enhancements/TwoEnhancementsTest.java index 866bb11..977d3db 100644 --- modules/facet/src/test/org/apache/lucene/facet/enhancements/TwoEnhancementsTest.java +++ modules/facet/src/test/org/apache/lucene/facet/enhancements/TwoEnhancementsTest.java @@ -56,8 +56,8 @@ public class TwoEnhancementsTest extends LuceneTestCase { List categoryPaths = new ArrayList(); categoryPaths.add(new CategoryPath("a", "b")); - RandomIndexWriter indexWriter = new RandomIndexWriter(random, indexDir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + RandomIndexWriter indexWriter = new RandomIndexWriter(getRandom(), indexDir, newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); TaxonomyWriter taxo = new DirectoryTaxonomyWriter(taxoDir); // a category document builder will add the categories to a document @@ -101,8 +101,8 @@ public class TwoEnhancementsTest extends LuceneTestCase { List categoryPaths = new ArrayList(); categoryPaths.add(new CategoryPath("a", "b")); - RandomIndexWriter indexWriter = new RandomIndexWriter(random, indexDir, newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + RandomIndexWriter indexWriter = new RandomIndexWriter(getRandom(), indexDir, newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); TaxonomyWriter taxo = new DirectoryTaxonomyWriter(taxoDir); // a category document builder will add the categories to a document diff --git modules/facet/src/test/org/apache/lucene/facet/enhancements/association/CustomAssociationPropertyTest.java modules/facet/src/test/org/apache/lucene/facet/enhancements/association/CustomAssociationPropertyTest.java index 09bcab4..70ae5a6 100644 --- modules/facet/src/test/org/apache/lucene/facet/enhancements/association/CustomAssociationPropertyTest.java +++ modules/facet/src/test/org/apache/lucene/facet/enhancements/association/CustomAssociationPropertyTest.java @@ -57,8 +57,8 @@ public class CustomAssociationPropertyTest extends LuceneTestCase { Directory iDir = newDirectory(); Directory tDir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, iDir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), iDir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.KEYWORD, false))); DirectoryTaxonomyWriter taxoW = new DirectoryTaxonomyWriter(tDir); CategoryContainer cc = new CategoryContainer(); diff --git modules/facet/src/test/org/apache/lucene/facet/index/FacetsPayloadProcessorProviderTest.java modules/facet/src/test/org/apache/lucene/facet/index/FacetsPayloadProcessorProviderTest.java index ba654ad..05a441e 100644 --- modules/facet/src/test/org/apache/lucene/facet/index/FacetsPayloadProcessorProviderTest.java +++ modules/facet/src/test/org/apache/lucene/facet/index/FacetsPayloadProcessorProviderTest.java @@ -91,8 +91,8 @@ public class FacetsPayloadProcessorProviderTest extends LuceneTestCase { private void buildIndexWithFacets(Directory dir, Directory taxDir, boolean asc) throws IOException { IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, config); + new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, config); DirectoryTaxonomyWriter taxonomyWriter = new DirectoryTaxonomyWriter(taxDir); for (int i = 1; i <= NUM_DOCS; i++) { diff --git modules/facet/src/test/org/apache/lucene/facet/index/categorypolicy/OrdinalPolicyTest.java modules/facet/src/test/org/apache/lucene/facet/index/categorypolicy/OrdinalPolicyTest.java index 7514143..7e697be 100644 --- modules/facet/src/test/org/apache/lucene/facet/index/categorypolicy/OrdinalPolicyTest.java +++ modules/facet/src/test/org/apache/lucene/facet/index/categorypolicy/OrdinalPolicyTest.java @@ -38,7 +38,7 @@ public class OrdinalPolicyTest extends LuceneTestCase { assertFalse("default ordinal policy should not match root", ordinalPolicy .shouldAdd(TaxonomyReader.ROOT_ORDINAL)); for (int i = 0; i < 300; i++) { - int ordinal = 1 + random.nextInt(Integer.MAX_VALUE - 1); + int ordinal = 1 + getRandom().nextInt(Integer.MAX_VALUE - 1); assertTrue("default ordinal policy should match " + ordinal, ordinalPolicy.shouldAdd(ordinal)); } @@ -53,17 +53,17 @@ public class OrdinalPolicyTest extends LuceneTestCase { int[] topLevelOrdinals = new int[10]; String[] topLevelStrings = new String[10]; for (int i = 0; i < 10; i++) { - topLevelStrings[i] = Integer.valueOf(random.nextInt(30)).toString(); + topLevelStrings[i] = Integer.valueOf(getRandom().nextInt(30)).toString(); topLevelOrdinals[i] = taxonomy.addCategory(new CategoryPath( topLevelStrings[i])); } int[] nonTopLevelOrdinals = new int[300]; for (int i = 0; i < 300; i++) { - int nComponents = 2 + random.nextInt(10); + int nComponents = 2 + getRandom().nextInt(10); String[] components = new String[nComponents]; components[0] = topLevelStrings[i % 10]; for (int j = 1; j < components.length; j++) { - components[j] = (Integer.valueOf(random.nextInt(30))).toString(); + components[j] = (Integer.valueOf(getRandom().nextInt(30))).toString(); } nonTopLevelOrdinals[i] = taxonomy.addCategory(new CategoryPath( components)); diff --git modules/facet/src/test/org/apache/lucene/facet/index/categorypolicy/PathPolicyTest.java modules/facet/src/test/org/apache/lucene/facet/index/categorypolicy/PathPolicyTest.java index df7f9f3..c115823 100644 --- modules/facet/src/test/org/apache/lucene/facet/index/categorypolicy/PathPolicyTest.java +++ modules/facet/src/test/org/apache/lucene/facet/index/categorypolicy/PathPolicyTest.java @@ -38,10 +38,10 @@ public class PathPolicyTest extends LuceneTestCase { assertFalse("default path policy should not accept root", pathPolicy.shouldAdd(cp)); for (int i = 0; i < 300; i++) { - int nComponents = 1 + random.nextInt(10); + int nComponents = 1 + getRandom().nextInt(10); String[] components = new String[nComponents]; for (int j = 0; j < components.length; j++) { - components[j] = (Integer.valueOf(random.nextInt(30))).toString(); + components[j] = (Integer.valueOf(getRandom().nextInt(30))).toString(); } cp = new CategoryPath(components); assertTrue("default path policy should accept " @@ -59,18 +59,18 @@ public class PathPolicyTest extends LuceneTestCase { CategoryPath[] topLevelPaths = new CategoryPath[10]; String[] topLevelStrings = new String[10]; for (int i = 0; i < 10; i++) { - topLevelStrings[i] = Integer.valueOf(random.nextInt(30)).toString(); + topLevelStrings[i] = Integer.valueOf(getRandom().nextInt(30)).toString(); topLevelPaths[i] = new CategoryPath(topLevelStrings[i]); taxonomy.addCategory(topLevelPaths[i]); } CategoryPath[] nonTopLevelPaths = new CategoryPath[300]; for (int i = 0; i < 300; i++) { - int nComponents = 2 + random.nextInt(10); + int nComponents = 2 + getRandom().nextInt(10); String[] components = new String[nComponents]; components[0] = topLevelStrings[i % 10]; for (int j = 1; j < components.length; j++) { - components[j] = (Integer.valueOf(random.nextInt(30))).toString(); + components[j] = (Integer.valueOf(getRandom().nextInt(30))).toString(); } nonTopLevelPaths[i] = new CategoryPath(components); taxonomy.addCategory(nonTopLevelPaths[i]); diff --git modules/facet/src/test/org/apache/lucene/facet/index/params/DefaultFacetIndexingParamsTest.java modules/facet/src/test/org/apache/lucene/facet/index/params/DefaultFacetIndexingParamsTest.java index 86d4e2a..c4e2948 100644 --- modules/facet/src/test/org/apache/lucene/facet/index/params/DefaultFacetIndexingParamsTest.java +++ modules/facet/src/test/org/apache/lucene/facet/index/params/DefaultFacetIndexingParamsTest.java @@ -92,10 +92,10 @@ public class DefaultFacetIndexingParamsTest extends LuceneTestCase { + seed + ")", pathPolicy.shouldAdd(cp), dfip.getPathPolicy() .shouldAdd(cp)); for (int i = 0; i < 30; i++) { - int nComponents = random.nextInt(10); + int nComponents = getRandom().nextInt(10); String[] components = new String[nComponents]; for (int j = 0; j < components.length; j++) { - components[j] = (Integer.valueOf(random.nextInt(30))).toString(); + components[j] = (Integer.valueOf(getRandom().nextInt(30))).toString(); } cp = new CategoryPath(components); assertEquals("path policy does not match default for " @@ -110,7 +110,7 @@ public class DefaultFacetIndexingParamsTest extends LuceneTestCase { .shouldAdd(TaxonomyReader.ROOT_ORDINAL), dfip .getOrdinalPolicy().shouldAdd(TaxonomyReader.ROOT_ORDINAL)); for (int i = 0; i < 30; i++) { - int ordinal = random.nextInt(); + int ordinal = getRandom().nextInt(); assertEquals("ordinal policy does not match default for " + ordinal + "(seed " + seed + ")", ordinalPolicy.shouldAdd(ordinal), dfip.getOrdinalPolicy().shouldAdd(ordinal)); diff --git modules/facet/src/test/org/apache/lucene/facet/search/BaseTestTopK.java modules/facet/src/test/org/apache/lucene/facet/search/BaseTestTopK.java index 5129da8..40afe26 100644 --- modules/facet/src/test/org/apache/lucene/facet/search/BaseTestTopK.java +++ modules/facet/src/test/org/apache/lucene/facet/search/BaseTestTopK.java @@ -62,14 +62,14 @@ public abstract class BaseTestTopK extends FacetTestBase { return; } currDoc = doc; - nextInt = random.nextInt(categoriesPow2); + nextInt = getRandom().nextInt(categoriesPow2); nextInt = (int)Math.sqrt(nextInt); } @Override protected String getContent(int doc) { nextInt(doc); - if (random.nextDouble() > 0.1) { + if (getRandom().nextDouble() > 0.1) { return ALPHA + ' ' + BETA; } return ALPHA; @@ -109,6 +109,6 @@ public abstract class BaseTestTopK extends FacetTestBase { @Override protected IndexWriterConfig getIndexWriterConfig(Analyzer analyzer) { - return super.getIndexWriterConfig(analyzer).setMaxBufferedDocs(_TestUtil.nextInt(random, 500, 10000)); + return super.getIndexWriterConfig(analyzer).setMaxBufferedDocs(_TestUtil.nextInt(getRandom(), 500, 10000)); } } diff --git modules/facet/src/test/org/apache/lucene/facet/search/CategoryListIteratorTest.java modules/facet/src/test/org/apache/lucene/facet/search/CategoryListIteratorTest.java index 9e94f97..1c2c7ce 100644 --- modules/facet/src/test/org/apache/lucene/facet/search/CategoryListIteratorTest.java +++ modules/facet/src/test/org/apache/lucene/facet/search/CategoryListIteratorTest.java @@ -96,8 +96,8 @@ public class CategoryListIteratorTest extends LuceneTestCase { Directory dir = newDirectory(); DataTokenStream dts = new DataTokenStream("1",new SortingIntEncoder( new UniqueValuesIntEncoder(new DGapIntEncoder(new VInt8IntEncoder())))); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random, MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy())); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, + new MockAnalyzer(getRandom(), MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy())); for (int i = 0; i < data.length; i++) { dts.setIdx(i); Document doc = new Document(); @@ -144,7 +144,7 @@ public class CategoryListIteratorTest extends LuceneTestCase { } }; // NOTE: test is wired to LogMP... because test relies on certain docids having payloads - RandomIndexWriter writer = new RandomIndexWriter(random, dir, + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, noPayloadsAnalyzer).setMergePolicy(newLogMergePolicy())); for (int i = 0; i < data.length; i++) { Document doc = new Document(); diff --git modules/facet/src/test/org/apache/lucene/facet/search/DrillDownTest.java modules/facet/src/test/org/apache/lucene/facet/search/DrillDownTest.java index beba5e0..863a495 100644 --- modules/facet/src/test/org/apache/lucene/facet/search/DrillDownTest.java +++ modules/facet/src/test/org/apache/lucene/facet/search/DrillDownTest.java @@ -67,11 +67,12 @@ public class DrillDownTest extends LuceneTestCase { nonDefaultParams = new FacetSearchParams(iParams); } + @BeforeClass public static void createIndexes() throws CorruptIndexException, LockObtainFailedException, IOException { dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))); + RandomIndexWriter writer = new RandomIndexWriter(getStaticRandom(), dir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getStaticRandom(), MockTokenizer.KEYWORD, false))); taxoDir = newDirectory(); TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir); diff --git modules/facet/src/test/org/apache/lucene/facet/search/TestMultipleCategoryLists.java modules/facet/src/test/org/apache/lucene/facet/search/TestMultipleCategoryLists.java index b392783..7f978f5 100644 --- modules/facet/src/test/org/apache/lucene/facet/search/TestMultipleCategoryLists.java +++ modules/facet/src/test/org/apache/lucene/facet/search/TestMultipleCategoryLists.java @@ -62,8 +62,8 @@ public class TestMultipleCategoryLists extends LuceneTestCase { public void testDefault() throws Exception { Directory[][] dirs = getDirs(); // create and open an index writer - RandomIndexWriter iw = new RandomIndexWriter(random, dirs[0][0], newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), dirs[0][0], newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); // create and open a taxonomy writer TaxonomyWriter tw = new DirectoryTaxonomyWriter(dirs[0][1], OpenMode.CREATE); @@ -89,7 +89,7 @@ public class TestMultipleCategoryLists extends LuceneTestCase { // Obtain facets results and hand-test them assertCorrectResults(facetsCollector); - DocsEnum td = _TestUtil.docs(random, ir, "$facets", new BytesRef("$fulltree$"), MultiFields.getLiveDocs(ir), null, false); + DocsEnum td = _TestUtil.docs(getRandom(), ir, "$facets", new BytesRef("$fulltree$"), MultiFields.getLiveDocs(ir), null, false); assertTrue(td.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); tr.close(); @@ -103,8 +103,8 @@ public class TestMultipleCategoryLists extends LuceneTestCase { public void testCustom() throws Exception { Directory[][] dirs = getDirs(); // create and open an index writer - RandomIndexWriter iw = new RandomIndexWriter(random, dirs[0][0], newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), dirs[0][0], newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); // create and open a taxonomy writer TaxonomyWriter tw = new DirectoryTaxonomyWriter(dirs[0][1], OpenMode.CREATE); @@ -143,8 +143,8 @@ public class TestMultipleCategoryLists extends LuceneTestCase { public void testTwoCustomsSameField() throws Exception { Directory[][] dirs = getDirs(); // create and open an index writer - RandomIndexWriter iw = new RandomIndexWriter(random, dirs[0][0], newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), dirs[0][0], newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); // create and open a taxonomy writer TaxonomyWriter tw = new DirectoryTaxonomyWriter(dirs[0][1], OpenMode.CREATE); @@ -183,7 +183,7 @@ public class TestMultipleCategoryLists extends LuceneTestCase { } private void assertPostingListExists(String field, String text, IndexReader ir) throws IOException { - DocsEnum de = _TestUtil.docs(random, ir, field, new BytesRef(text), null, null, false); + DocsEnum de = _TestUtil.docs(getRandom(), ir, field, new BytesRef(text), null, null, false); assertTrue(de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS); } @@ -191,8 +191,8 @@ public class TestMultipleCategoryLists extends LuceneTestCase { public void testDifferentFieldsAndText() throws Exception { Directory[][] dirs = getDirs(); // create and open an index writer - RandomIndexWriter iw = new RandomIndexWriter(random, dirs[0][0], newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), dirs[0][0], newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); // create and open a taxonomy writer TaxonomyWriter tw = new DirectoryTaxonomyWriter(dirs[0][1], OpenMode.CREATE); @@ -231,8 +231,8 @@ public class TestMultipleCategoryLists extends LuceneTestCase { public void testSomeSameSomeDifferent() throws Exception { Directory[][] dirs = getDirs(); // create and open an index writer - RandomIndexWriter iw = new RandomIndexWriter(random, dirs[0][0], newIndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), dirs[0][0], newIndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); // create and open a taxonomy writer TaxonomyWriter tw = new DirectoryTaxonomyWriter(dirs[0][1], OpenMode.CREATE); diff --git modules/facet/src/test/org/apache/lucene/facet/search/TestTopKInEachNodeResultHandler.java modules/facet/src/test/org/apache/lucene/facet/search/TestTopKInEachNodeResultHandler.java index 5fbb06a..5bed7a1 100644 --- modules/facet/src/test/org/apache/lucene/facet/search/TestTopKInEachNodeResultHandler.java +++ modules/facet/src/test/org/apache/lucene/facet/search/TestTopKInEachNodeResultHandler.java @@ -77,9 +77,9 @@ public class TestTopKInEachNodeResultHandler extends LuceneTestCase { } }; - RandomIndexWriter iw = new RandomIndexWriter(random, iDir, + RandomIndexWriter iw = new RandomIndexWriter(getRandom(), iDir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); + new MockAnalyzer(getRandom())).setOpenMode(OpenMode.CREATE)); TaxonomyWriter tw = new DirectoryTaxonomyWriter(tDir); prvt_add(iParams, iw, tw, "a", "b"); prvt_add(iParams, iw, tw, "a", "b", "1"); diff --git modules/facet/src/test/org/apache/lucene/facet/search/TestTotalFacetCountsCache.java modules/facet/src/test/org/apache/lucene/facet/search/TestTotalFacetCountsCache.java index a3299e2..f15f8ff 100644 --- modules/facet/src/test/org/apache/lucene/facet/search/TestTotalFacetCountsCache.java +++ modules/facet/src/test/org/apache/lucene/facet/search/TestTotalFacetCountsCache.java @@ -114,9 +114,9 @@ public class TestTotalFacetCountsCache extends LuceneTestCase { public void testGeneralSynchronization() throws Exception { int numIters = atLeast(2); for (int i = 0; i < numIters; i++) { - doTestGeneralSynchronization(_TestUtil.nextInt(random, 2, 4), - random.nextBoolean() ? -1 : _TestUtil.nextInt(random, 1, 10), - _TestUtil.nextInt(random, 0, 3)); + doTestGeneralSynchronization(_TestUtil.nextInt(getRandom(), 2, 4), + getRandom().nextBoolean() ? -1 : _TestUtil.nextInt(getRandom(), 1, 10), + _TestUtil.nextInt(getRandom(), 0, 3)); } } @@ -143,10 +143,10 @@ public class TestTotalFacetCountsCache extends LuceneTestCase { int cacheSize) throws Exception, CorruptIndexException, IOException, InterruptedException { TFC.setCacheSize(cacheSize); - SlowRAMDirectory slowIndexDir = new SlowRAMDirectory(-1, random); - MockDirectoryWrapper indexDir = new MockDirectoryWrapper(random, slowIndexDir); - SlowRAMDirectory slowTaxoDir = new SlowRAMDirectory(-1, random); - MockDirectoryWrapper taxoDir = new MockDirectoryWrapper(random, slowTaxoDir); + SlowRAMDirectory slowIndexDir = new SlowRAMDirectory(-1, getRandom()); + MockDirectoryWrapper indexDir = new MockDirectoryWrapper(getRandom(), slowIndexDir); + SlowRAMDirectory slowTaxoDir = new SlowRAMDirectory(-1, getRandom()); + MockDirectoryWrapper taxoDir = new MockDirectoryWrapper(getRandom(), slowTaxoDir); // Index documents without the "slowness" @@ -408,7 +408,7 @@ public class TestTotalFacetCountsCache extends LuceneTestCase { // Write index using 'normal' directories IndexWriter w = new IndexWriter(indexDir, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); DirectoryTaxonomyWriter tw = new DirectoryTaxonomyWriter(taxoDir); DefaultFacetIndexingParams iParams = new DefaultFacetIndexingParams(); // Add documents and facets diff --git modules/facet/src/test/org/apache/lucene/facet/search/association/AssociationsFacetRequestTest.java modules/facet/src/test/org/apache/lucene/facet/search/association/AssociationsFacetRequestTest.java index 08d63d2..dd99a5e 100644 --- modules/facet/src/test/org/apache/lucene/facet/search/association/AssociationsFacetRequestTest.java +++ modules/facet/src/test/org/apache/lucene/facet/search/association/AssociationsFacetRequestTest.java @@ -66,8 +66,8 @@ public class AssociationsFacetRequestTest extends LuceneTestCase { dir = newDirectory(); taxoDir = newDirectory(); // preparations - index, taxonomy, content - RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random, MockTokenizer.KEYWORD, false))); + RandomIndexWriter writer = new RandomIndexWriter(getStaticRandom(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, + new MockAnalyzer(getStaticRandom(), MockTokenizer.KEYWORD, false))); TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir); diff --git modules/facet/src/test/org/apache/lucene/facet/search/params/MultiIteratorsPerCLParamsTest.java modules/facet/src/test/org/apache/lucene/facet/search/params/MultiIteratorsPerCLParamsTest.java index 3689d04..9703cf2 100644 --- modules/facet/src/test/org/apache/lucene/facet/search/params/MultiIteratorsPerCLParamsTest.java +++ modules/facet/src/test/org/apache/lucene/facet/search/params/MultiIteratorsPerCLParamsTest.java @@ -166,8 +166,8 @@ public class MultiIteratorsPerCLParamsTest extends LuceneTestCase { private void populateIndex(FacetIndexingParams iParams, Directory indexDir, Directory taxoDir) throws Exception { - RandomIndexWriter writer = new RandomIndexWriter(random, indexDir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false))); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), indexDir, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.KEYWORD, false))); TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir); for (CategoryPath[] categories : perDocCategories) { diff --git modules/facet/src/test/org/apache/lucene/facet/search/sampling/BaseSampleTestTopK.java modules/facet/src/test/org/apache/lucene/facet/search/sampling/BaseSampleTestTopK.java index 3554dd9..3e653f1 100644 --- modules/facet/src/test/org/apache/lucene/facet/search/sampling/BaseSampleTestTopK.java +++ modules/facet/src/test/org/apache/lucene/facet/search/sampling/BaseSampleTestTopK.java @@ -54,7 +54,7 @@ public abstract class BaseSampleTestTopK extends BaseTestTopK { * is performed. The results are compared to non-sampled ones. */ public void testCountUsingSamping() throws Exception, IOException { - boolean useRandomSampler = random.nextBoolean(); + boolean useRandomSampler = getRandom().nextBoolean(); for (int partitionSize : partitionSizes) { try { initIndex(partitionSize); @@ -132,7 +132,7 @@ public abstract class BaseSampleTestTopK extends BaseTestTopK { samplingParams.setSampingThreshold(11000); //force sampling Sampler sampler = useRandomSampler ? - new RandomSampler(samplingParams, new Random(random.nextLong())) : + new RandomSampler(samplingParams, new Random(getRandom().nextLong())) : new RepeatableSampler(samplingParams); assertTrue("must enable sampling for this test!",sampler.shouldSample(scoredDocIDs)); return sampler; diff --git modules/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomies.java modules/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomies.java index a25cd4c..e79975a 100644 --- modules/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomies.java +++ modules/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomies.java @@ -105,10 +105,10 @@ public class TestAddTaxonomies extends LuceneTestCase { public void testmedium() throws Exception { int numTests = atLeast(3); for (int i = 0; i < numTests; i++) { - dotest(_TestUtil.nextInt(random, 1, 10), - _TestUtil.nextInt(random, 1, 100), - _TestUtil.nextInt(random, 100, 1000), - random.nextBoolean()); + dotest(_TestUtil.nextInt(getRandom(), 1, 10), + _TestUtil.nextInt(getRandom(), 1, 100), + _TestUtil.nextInt(getRandom(), 100, 1000), + getRandom().nextBoolean()); } } @@ -135,7 +135,7 @@ public class TestAddTaxonomies extends LuceneTestCase { DirectoryTaxonomyWriter tw = new DirectoryTaxonomyWriter(dirs[i]); DirectoryTaxonomyWriter copytw = new DirectoryTaxonomyWriter(copydirs[i]); for (int j=0; j set2 = new HashSet(); for (int i = 0; i < ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; ++i) { - int value = random.nextInt() % 500; - boolean shouldAdd = random.nextBoolean(); + int value = getRandom().nextInt() % 500; + boolean shouldAdd = getRandom().nextBoolean(); if (shouldAdd) { set1.add(value); set2.add(value); @@ -133,8 +133,8 @@ public class IntHashSetTest extends LuceneTestCase { HashSet set = new HashSet(); for (int j = 0; j < 100; ++j) { for (int i = 0; i < ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; ++i) { - int value = random.nextInt() % 5000; - boolean shouldAdd = random.nextBoolean(); + int value = getRandom().nextInt() % 5000; + boolean shouldAdd = getRandom().nextBoolean(); if (shouldAdd) { set.add(value); } else { @@ -150,8 +150,8 @@ public class IntHashSetTest extends LuceneTestCase { IntHashSet set = new IntHashSet(); for (int j = 0; j < 100; ++j) { for (int i = 0; i < ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; ++i) { - int value = random.nextInt() % 5000; - boolean shouldAdd = random.nextBoolean(); + int value = getRandom().nextInt() % 5000; + boolean shouldAdd = getRandom().nextBoolean(); if (shouldAdd) { set.add(value); } else { @@ -167,8 +167,8 @@ public class IntHashSetTest extends LuceneTestCase { IntHashSet set = new IntHashSet(); for (int j = 0; j < 100; ++j) { for (int i = 0; i < ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; ++i) { - int value = random.nextInt() % 5000; - boolean shouldAdd = random.nextBoolean(); + int value = getRandom().nextInt() % 5000; + boolean shouldAdd = getRandom().nextBoolean(); if (shouldAdd) { set.add(value); } else { @@ -195,8 +195,8 @@ public class IntHashSetTest extends LuceneTestCase { HashSet set = new HashSet(); for (int j = 0; j < 100; ++j) { for (int i = 0; i < ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; ++i) { - int value = random.nextInt() % 5000; - boolean shouldAdd = random.nextBoolean(); + int value = getRandom().nextInt() % 5000; + boolean shouldAdd = getRandom().nextBoolean(); if (shouldAdd) { set.add(value); } else { @@ -212,8 +212,8 @@ public class IntHashSetTest extends LuceneTestCase { IntHashSet set = new IntHashSet(); for (int j = 0; j < 100; ++j) { for (int i = 0; i < ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; ++i) { - int value = random.nextInt() % 5000; - boolean shouldAdd = random.nextBoolean(); + int value = getRandom().nextInt() % 5000; + boolean shouldAdd = getRandom().nextBoolean(); if (shouldAdd) { set.add(value); } else { diff --git modules/facet/src/test/org/apache/lucene/util/collections/IntToDoubleMapTest.java modules/facet/src/test/org/apache/lucene/util/collections/IntToDoubleMapTest.java index b2e28fa..7475491 100644 --- modules/facet/src/test/org/apache/lucene/util/collections/IntToDoubleMapTest.java +++ modules/facet/src/test/org/apache/lucene/util/collections/IntToDoubleMapTest.java @@ -208,7 +208,7 @@ public class IntToDoubleMapTest extends LuceneTestCase { IntToDoubleMap map = new IntToDoubleMap(); int length = ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; // for a repeatable random sequence - long seed = random.nextLong(); + long seed = getRandom().nextLong(); Random random = new Random(seed); for (int i = 0; i < length; ++i) { diff --git modules/facet/src/test/org/apache/lucene/util/collections/IntToIntMapTest.java modules/facet/src/test/org/apache/lucene/util/collections/IntToIntMapTest.java index a61e64c..e671236 100644 --- modules/facet/src/test/org/apache/lucene/util/collections/IntToIntMapTest.java +++ modules/facet/src/test/org/apache/lucene/util/collections/IntToIntMapTest.java @@ -208,7 +208,7 @@ public class IntToIntMapTest extends LuceneTestCase { int length = ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; // for a repeatable random sequence - long seed = random.nextLong(); + long seed = getRandom().nextLong(); Random random = new Random(seed); for (int i = 0; i < length; ++i) { diff --git modules/facet/src/test/org/apache/lucene/util/collections/IntToObjectMapTest.java modules/facet/src/test/org/apache/lucene/util/collections/IntToObjectMapTest.java index a1cb69b..6a4560d 100644 --- modules/facet/src/test/org/apache/lucene/util/collections/IntToObjectMapTest.java +++ modules/facet/src/test/org/apache/lucene/util/collections/IntToObjectMapTest.java @@ -205,7 +205,7 @@ public class IntToObjectMapTest extends LuceneTestCase { int length = ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; // for a repeatable random sequence - long seed = random.nextLong(); + long seed = getRandom().nextLong(); Random random = new Random(seed); for (int i = 0; i < length; ++i) { diff --git modules/facet/src/test/org/apache/lucene/util/collections/ObjectToFloatMapTest.java modules/facet/src/test/org/apache/lucene/util/collections/ObjectToFloatMapTest.java index faeb8ef..02656ff 100644 --- modules/facet/src/test/org/apache/lucene/util/collections/ObjectToFloatMapTest.java +++ modules/facet/src/test/org/apache/lucene/util/collections/ObjectToFloatMapTest.java @@ -215,7 +215,7 @@ public class ObjectToFloatMapTest extends LuceneTestCase { int length = ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; // for a repeatable random sequence - long seed = random.nextLong(); + long seed = getRandom().nextLong(); Random random = new Random(seed); for (int i = 0; i < length; ++i) { diff --git modules/facet/src/test/org/apache/lucene/util/collections/ObjectToIntMapTest.java modules/facet/src/test/org/apache/lucene/util/collections/ObjectToIntMapTest.java index d2a7ff2..fbfbdde 100644 --- modules/facet/src/test/org/apache/lucene/util/collections/ObjectToIntMapTest.java +++ modules/facet/src/test/org/apache/lucene/util/collections/ObjectToIntMapTest.java @@ -215,7 +215,7 @@ public class ObjectToIntMapTest extends LuceneTestCase { int length = ArrayHashMapTest.RANDOM_TEST_NUM_ITERATIONS; // for a repeatable random sequence - long seed = random.nextLong(); + long seed = getRandom().nextLong(); Random random = new Random(seed); for (int i = 0; i < length; ++i) { diff --git modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java index 4e1f822..6ad42e7 100644 --- modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java +++ modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupHeadsCollectorTest.java @@ -50,12 +50,12 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { final String groupField = "author"; Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter( - random, + getRandom(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + new MockAnalyzer(getRandom())).setMergePolicy(newLogMergePolicy())); boolean canUseIDV = !"Lucene3x".equals(w.w.getConfig().getCodec().getName()); - Type valueType = vts[random.nextInt(vts.length)]; + Type valueType = vts[getRandom().nextInt(vts.length)]; // 0 Document doc = new Document(); @@ -156,14 +156,14 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { } public void testRandom() throws Exception { - int numberOfRuns = _TestUtil.nextInt(random, 3, 6); + int numberOfRuns = _TestUtil.nextInt(getRandom(), 3, 6); for (int iter = 0; iter < numberOfRuns; iter++) { if (VERBOSE) { System.out.println(String.format("TEST: iter=%d total=%d", iter, numberOfRuns)); } - final int numDocs = _TestUtil.nextInt(random, 100, 1000) * RANDOM_MULTIPLIER; - final int numGroups = _TestUtil.nextInt(random, 1, numDocs); + final int numDocs = _TestUtil.nextInt(getRandom(), 100, 1000) * RANDOM_MULTIPLIER; + final int numGroups = _TestUtil.nextInt(getRandom(), 1, numDocs); if (VERBOSE) { System.out.println("TEST: numDocs=" + numDocs + " numGroups=" + numGroups); @@ -175,18 +175,18 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { do { // B/c of DV based impl we can't see the difference between an empty string and a null value. // For that reason we don't generate empty string groups. - randomValue = _TestUtil.randomRealisticUnicodeString(random); + randomValue = _TestUtil.randomRealisticUnicodeString(getRandom()); } while ("".equals(randomValue)); groups.add(new BytesRef(randomValue)); } - final String[] contentStrings = new String[_TestUtil.nextInt(random, 2, 20)]; + final String[] contentStrings = new String[_TestUtil.nextInt(getRandom(), 2, 20)]; if (VERBOSE) { System.out.println("TEST: create fake content"); } for (int contentIDX = 0; contentIDX < contentStrings.length; contentIDX++) { final StringBuilder sb = new StringBuilder(); - sb.append("real").append(random.nextInt(3)).append(' '); - final int fakeCount = random.nextInt(10); + sb.append("real").append(getRandom().nextInt(3)).append(' '); + final int fakeCount = getRandom().nextInt(10); for (int fakeIDX = 0; fakeIDX < fakeCount; fakeIDX++) { sb.append("fake "); } @@ -198,13 +198,13 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter( - random, + getRandom(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random))); + new MockAnalyzer(getRandom()))); boolean preFlex = "Lucene3x".equals(w.w.getConfig().getCodec().getName()); boolean canUseIDV = !preFlex; - Type valueType = vts[random.nextInt(vts.length)]; + Type valueType = vts[getRandom().nextInt(vts.length)]; Document doc = new Document(); Document docNoGroup = new Document(); @@ -233,21 +233,21 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { final GroupDoc[] groupDocs = new GroupDoc[numDocs]; for (int i = 0; i < numDocs; i++) { final BytesRef groupValue; - if (random.nextInt(24) == 17) { + if (getRandom().nextInt(24) == 17) { // So we test the "doc doesn't have the group'd // field" case: groupValue = null; } else { - groupValue = groups.get(random.nextInt(groups.size())); + groupValue = groups.get(getRandom().nextInt(groups.size())); } final GroupDoc groupDoc = new GroupDoc( i, groupValue, - groups.get(random.nextInt(groups.size())), - groups.get(random.nextInt(groups.size())), + groups.get(getRandom().nextInt(groups.size())), + groups.get(getRandom().nextInt(groups.size())), new BytesRef(String.format("%05d", i)), - contentStrings[random.nextInt(contentStrings.length)] + contentStrings[getRandom().nextInt(contentStrings.length)] ); if (VERBOSE) { @@ -313,8 +313,8 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { System.out.println("TEST: searchIter=" + searchIter); } - final String searchTerm = "real" + random.nextInt(3); - boolean sortByScoreOnly = random.nextBoolean(); + final String searchTerm = "real" + getRandom().nextInt(3); + boolean sortByScoreOnly = getRandom().nextBoolean(); Sort sortWithinGroup = getRandomSort(sortByScoreOnly); AbstractAllGroupHeadsCollector allGroupHeadsCollector = createRandomCollector("group", sortWithinGroup, canUseIDV, valueType); s.search(new TermQuery(new Term("content", searchTerm)), allGroupHeadsCollector); @@ -447,22 +447,22 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { private Sort getRandomSort(boolean scoreOnly) { final List sortFields = new ArrayList(); - if (random.nextInt(7) == 2 || scoreOnly) { + if (getRandom().nextInt(7) == 2 || scoreOnly) { sortFields.add(SortField.FIELD_SCORE); } else { - if (random.nextBoolean()) { - if (random.nextBoolean()) { - sortFields.add(new SortField("sort1", SortField.Type.STRING, random.nextBoolean())); + if (getRandom().nextBoolean()) { + if (getRandom().nextBoolean()) { + sortFields.add(new SortField("sort1", SortField.Type.STRING, getRandom().nextBoolean())); } else { - sortFields.add(new SortField("sort2", SortField.Type.STRING, random.nextBoolean())); + sortFields.add(new SortField("sort2", SortField.Type.STRING, getRandom().nextBoolean())); } - } else if (random.nextBoolean()) { - sortFields.add(new SortField("sort1", SortField.Type.STRING, random.nextBoolean())); - sortFields.add(new SortField("sort2", SortField.Type.STRING, random.nextBoolean())); + } else if (getRandom().nextBoolean()) { + sortFields.add(new SortField("sort1", SortField.Type.STRING, getRandom().nextBoolean())); + sortFields.add(new SortField("sort2", SortField.Type.STRING, getRandom().nextBoolean())); } } // Break ties: - if (random.nextBoolean() && !scoreOnly) { + if (getRandom().nextBoolean() && !scoreOnly) { sortFields.add(new SortField("sort3", SortField.Type.STRING)); } else if (!scoreOnly) { sortFields.add(new SortField("id", SortField.Type.INT)); @@ -508,11 +508,11 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase { private AbstractAllGroupHeadsCollector createRandomCollector(String groupField, Sort sortWithinGroup, boolean canUseIDV, Type valueType) throws IOException { AbstractAllGroupHeadsCollector collector; - if (random.nextBoolean()) { + if (getRandom().nextBoolean()) { ValueSource vs = new BytesRefFieldSource(groupField); collector = new FunctionAllGroupHeadsCollector(vs, new HashMap(), sortWithinGroup); - } else if (canUseIDV && random.nextBoolean()) { - boolean diskResident = random.nextBoolean(); + } else if (canUseIDV && getRandom().nextBoolean()) { + boolean diskResident = getRandom().nextBoolean(); collector = DVAllGroupHeadsCollector.create(groupField, sortWithinGroup, valueType, diskResident); } else { collector = TermAllGroupHeadsCollector.create(groupField, sortWithinGroup); diff --git modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java index 719fd66..648add5 100644 --- modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java +++ modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java @@ -46,10 +46,10 @@ public class AllGroupsCollectorTest extends LuceneTestCase { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter( - random, + getRandom(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + new MockAnalyzer(getRandom())).setMergePolicy(newLogMergePolicy())); boolean canUseIDV = !"Lucene3x".equals(w.w.getConfig().getCodec().getName()); // 0 @@ -129,10 +129,10 @@ public class AllGroupsCollectorTest extends LuceneTestCase { private AbstractAllGroupsCollector createRandomCollector(String groupField, boolean canUseIDV) throws IOException { AbstractAllGroupsCollector selected; - if (random.nextBoolean() && canUseIDV) { - boolean diskResident = random.nextBoolean(); + if (getRandom().nextBoolean() && canUseIDV) { + boolean diskResident = getRandom().nextBoolean(); selected = DVAllGroupsCollector.create(groupField, Type.BYTES_VAR_SORTED, diskResident); - } else if (random.nextBoolean()) { + } else if (getRandom().nextBoolean()) { selected = new TermAllGroupsCollector(groupField); } else { ValueSource vs = new BytesRefFieldSource(groupField); diff --git modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java index d5a77ee..65ed1a3 100644 --- modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java +++ modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java @@ -69,10 +69,10 @@ public class TestGrouping extends LuceneTestCase { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter( - random, + getRandom(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + new MockAnalyzer(getRandom())).setMergePolicy(newLogMergePolicy())); boolean canUseIDV = !"Lucene3x".equals(w.w.getConfig().getCodec().getName()); // 0 Document doc = new Document(); @@ -181,10 +181,10 @@ public class TestGrouping extends LuceneTestCase { private AbstractFirstPassGroupingCollector createRandomFirstPassCollector(String groupField, Sort groupSort, int topDocs, boolean canUseIDV) throws IOException { AbstractFirstPassGroupingCollector selected; - if (canUseIDV && random.nextBoolean()) { - boolean diskResident = random.nextBoolean(); + if (canUseIDV && getRandom().nextBoolean()) { + boolean diskResident = getRandom().nextBoolean(); selected = DVFirstPassGroupingCollector.create(groupSort, topDocs, groupField, Type.BYTES_VAR_SORTED, diskResident); - } else if (random.nextBoolean()) { + } else if (getRandom().nextBoolean()) { ValueSource vs = new BytesRefFieldSource(groupField); selected = new FunctionFirstPassGroupingCollector(vs, new HashMap(), groupSort, topDocs); } else { @@ -198,7 +198,7 @@ public class TestGrouping extends LuceneTestCase { private AbstractFirstPassGroupingCollector createFirstPassCollector(String groupField, Sort groupSort, int topDocs, AbstractFirstPassGroupingCollector firstPassGroupingCollector) throws IOException { if (DVFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) { - boolean diskResident = random.nextBoolean(); + boolean diskResident = getRandom().nextBoolean(); return DVFirstPassGroupingCollector.create(groupSort, topDocs, groupField, Type.BYTES_VAR_SORTED, diskResident); } else if (TermFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) { ValueSource vs = new BytesRefFieldSource(groupField); @@ -220,7 +220,7 @@ public class TestGrouping extends LuceneTestCase { boolean fillSortFields) throws IOException { if (DVFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) { - boolean diskResident = random.nextBoolean(); + boolean diskResident = getRandom().nextBoolean(); Collection searchGroups = firstPassGroupingCollector.getTopGroups(groupOffset, fillSortFields); return DVSecondPassGroupingCollector.create(groupField, diskResident, Type.BYTES_VAR_SORTED, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getScores, getMaxScores, fillSortFields); } else if (TermFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) { @@ -245,7 +245,7 @@ public class TestGrouping extends LuceneTestCase { boolean getMaxScores, boolean fillSortFields) throws IOException { if (DVFirstPassGroupingCollector.class.isAssignableFrom(firstPassGroupingCollector.getClass())) { - boolean diskResident = random.nextBoolean(); + boolean diskResident = getRandom().nextBoolean(); return DVSecondPassGroupingCollector.create(groupField, diskResident, Type.BYTES_VAR_SORTED, (Collection) searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup, getScores, getMaxScores, fillSortFields); } else if (firstPassGroupingCollector.getClass().isAssignableFrom(TermFirstPassGroupingCollector.class)) { return new TermSecondPassGroupingCollector(groupField, searchGroups, groupSort, sortWithinGroup, maxDocsPerGroup , getScores, getMaxScores, fillSortFields); @@ -275,7 +275,7 @@ public class TestGrouping extends LuceneTestCase { if (firstPassGroupingCollector.getClass().isAssignableFrom(TermFirstPassGroupingCollector.class)) { return new TermAllGroupsCollector(groupField); } else if (firstPassGroupingCollector.getClass().isAssignableFrom(DVFirstPassGroupingCollector.class)) { - boolean diskResident = random.nextBoolean(); + boolean diskResident = getRandom().nextBoolean(); return DVAllGroupsCollector.create(groupField, Type.BYTES_VAR_SORTED, diskResident); } else { ValueSource vs = new BytesRefFieldSource(groupField); @@ -372,18 +372,18 @@ public class TestGrouping extends LuceneTestCase { private Sort getRandomSort() { final List sortFields = new ArrayList(); - if (random.nextInt(7) == 2) { + if (getRandom().nextInt(7) == 2) { sortFields.add(SortField.FIELD_SCORE); } else { - if (random.nextBoolean()) { - if (random.nextBoolean()) { - sortFields.add(new SortField("sort1", SortField.Type.STRING, random.nextBoolean())); + if (getRandom().nextBoolean()) { + if (getRandom().nextBoolean()) { + sortFields.add(new SortField("sort1", SortField.Type.STRING, getRandom().nextBoolean())); } else { - sortFields.add(new SortField("sort2", SortField.Type.STRING, random.nextBoolean())); + sortFields.add(new SortField("sort2", SortField.Type.STRING, getRandom().nextBoolean())); } - } else if (random.nextBoolean()) { - sortFields.add(new SortField("sort1", SortField.Type.STRING, random.nextBoolean())); - sortFields.add(new SortField("sort2", SortField.Type.STRING, random.nextBoolean())); + } else if (getRandom().nextBoolean()) { + sortFields.add(new SortField("sort1", SortField.Type.STRING, getRandom().nextBoolean())); + sortFields.add(new SortField("sort2", SortField.Type.STRING, getRandom().nextBoolean())); } } // Break ties: @@ -559,7 +559,7 @@ public class TestGrouping extends LuceneTestCase { private DirectoryReader getDocBlockReader(Directory dir, GroupDoc[] groupDocs) throws IOException { // Coalesce by group, but in random order: - Collections.shuffle(Arrays.asList(groupDocs), random); + Collections.shuffle(Arrays.asList(groupDocs), getRandom()); final Map> groupMap = new HashMap>(); final List groupValues = new ArrayList(); @@ -572,10 +572,10 @@ public class TestGrouping extends LuceneTestCase { } RandomIndexWriter w = new RandomIndexWriter( - random, + getRandom(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random))); + new MockAnalyzer(getRandom()))); final List> updateDocs = new ArrayList>(); @@ -604,7 +604,7 @@ public class TestGrouping extends LuceneTestCase { docs.get(docs.size()-1).add(groupEnd); // Add as a doc block: w.addDocuments(docs); - if (group != null && random.nextInt(7) == 4) { + if (group != null && getRandom().nextInt(7) == 4) { updateDocs.add(docs); } } @@ -651,16 +651,16 @@ public class TestGrouping extends LuceneTestCase { } public void testRandom() throws Exception { - int numberOfRuns = _TestUtil.nextInt(random, 3, 6); + int numberOfRuns = _TestUtil.nextInt(getRandom(), 3, 6); for (int iter=0; iter docs = new ArrayList(); @@ -230,7 +230,7 @@ public class TestBlockJoin extends LuceneTestCase { public void testBoostBug() throws Exception { final Directory dir = newDirectory(); - final RandomIndexWriter w = new RandomIndexWriter(random, dir); + final RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir); IndexReader r = w.getReader(); w.close(); IndexSearcher s = newSearcher(r); @@ -247,18 +247,18 @@ public class TestBlockJoin extends LuceneTestCase { private String[][] getRandomFields(int maxUniqueValues) { - final String[][] fields = new String[_TestUtil.nextInt(random, 2, 4)][]; + final String[][] fields = new String[_TestUtil.nextInt(getRandom(), 2, 4)][]; for(int fieldID=0;fieldID toDelete = new ArrayList(); // TODO: parallel star join, nested join cases too! - final RandomIndexWriter w = new RandomIndexWriter(random, dir); - final RandomIndexWriter joinW = new RandomIndexWriter(random, joinDir); + final RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir); + final RandomIndexWriter joinW = new RandomIndexWriter(getRandom(), joinDir); for(int parentDocID=0;parentDocID joinResults = c.getTopGroups(childJoinQuery, childSort, 0, hitsPerGroup, 0, true); @@ -617,27 +617,27 @@ public class TestBlockJoin extends LuceneTestCase { // Get random query against parent documents: final Query parentQuery2; - if (random.nextInt(3) == 2) { - final int fieldID = random.nextInt(parentFields.length); + if (getRandom().nextInt(3) == 2) { + final int fieldID = getRandom().nextInt(parentFields.length); parentQuery2 = new TermQuery(new Term("parent" + fieldID, - parentFields[fieldID][random.nextInt(parentFields[fieldID].length)])); - } else if (random.nextInt(3) == 2) { + parentFields[fieldID][getRandom().nextInt(parentFields[fieldID].length)])); + } else if (getRandom().nextInt(3) == 2) { BooleanQuery bq = new BooleanQuery(); parentQuery2 = bq; - final int numClauses = _TestUtil.nextInt(random, 2, 4); + final int numClauses = _TestUtil.nextInt(getRandom(), 2, 4); boolean didMust = false; for(int clauseIDX=0;clauseIDX docs = new ArrayList(); @@ -891,7 +891,7 @@ public class TestBlockJoin extends LuceneTestCase { public void testAdvanceSingleParentSingleChild() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir); Document childDoc = new Document(); childDoc.add(newField("child", "1", StringField.TYPE_UNSTORED)); Document parentDoc = new Document(); @@ -915,7 +915,7 @@ public class TestBlockJoin extends LuceneTestCase { public void testAdvanceSingleParentNoChild() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(new LogDocMergePolicy())); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom())).setMergePolicy(new LogDocMergePolicy())); Document parentDoc = new Document(); parentDoc.add(newField("parent", "1", StringField.TYPE_UNSTORED)); parentDoc.add(newField("isparent", "yes", StringField.TYPE_UNSTORED)); diff --git modules/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java modules/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java index 1b1a040..2462695 100644 --- modules/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java +++ modules/join/src/test/org/apache/lucene/search/join/TestJoinUtil.java @@ -42,10 +42,10 @@ public class TestJoinUtil extends LuceneTestCase { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter( - random, + getRandom(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, - new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + new MockAnalyzer(getRandom())).setMergePolicy(newLogMergePolicy())); // 0 Document doc = new Document(); @@ -120,16 +120,16 @@ public class TestJoinUtil extends LuceneTestCase { @Test public void testSingleValueRandomJoin() throws Exception { - int maxIndexIter = _TestUtil.nextInt(random, 6, 12); - int maxSearchIter = _TestUtil.nextInt(random, 13, 26); + int maxIndexIter = _TestUtil.nextInt(getRandom(), 6, 12); + int maxSearchIter = _TestUtil.nextInt(getRandom(), 13, 26); executeRandomJoin(false, maxIndexIter, maxSearchIter); } @Test // This test really takes more time, that is why the number of iterations are smaller. public void testMultiValueRandomJoin() throws Exception { - int maxIndexIter = _TestUtil.nextInt(random, 3, 6); - int maxSearchIter = _TestUtil.nextInt(random, 6, 12); + int maxIndexIter = _TestUtil.nextInt(getRandom(), 3, 6); + int maxSearchIter = _TestUtil.nextInt(getRandom(), 6, 12); executeRandomJoin(true, maxIndexIter, maxSearchIter); } @@ -140,11 +140,11 @@ public class TestJoinUtil extends LuceneTestCase { } Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter( - random, + getRandom(), dir, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy()) + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy()) ); - int numberOfDocumentsToIndex = _TestUtil.nextInt(random, 87, 764); + int numberOfDocumentsToIndex = _TestUtil.nextInt(getRandom(), 87, 764); IndexIterationContext context = createContext(numberOfDocumentsToIndex, w, multipleValuesPerDocument); IndexReader topLevelReader = w.getReader(); @@ -155,7 +155,7 @@ public class TestJoinUtil extends LuceneTestCase { } IndexSearcher indexSearcher = newSearcher(topLevelReader); - int r = random.nextInt(context.randomUniqueValues.length); + int r = getRandom().nextInt(context.randomUniqueValues.length); boolean from = context.randomFrom[r]; String randomValue = context.randomUniqueValues[r]; FixedBitSet expectedResult = createExpectedResult(randomValue, from, indexSearcher.getIndexReader(), context); @@ -229,28 +229,28 @@ public class TestJoinUtil extends LuceneTestCase { for (int i = 0; i < numRandomValues; i++) { String uniqueRandomValue; do { - uniqueRandomValue = _TestUtil.randomRealisticUnicodeString(random); + uniqueRandomValue = _TestUtil.randomRealisticUnicodeString(getRandom()); // uniqueRandomValue = _TestUtil.randomSimpleString(random); } while ("".equals(uniqueRandomValue) || trackSet.contains(uniqueRandomValue)); // Generate unique values and empty strings aren't allowed. trackSet.add(uniqueRandomValue); - context.randomFrom[i] = random.nextBoolean(); + context.randomFrom[i] = getRandom().nextBoolean(); context.randomUniqueValues[i] = uniqueRandomValue; } for (int i = 0; i < nDocs; i++) { String id = Integer.toString(i); - int randomI = random.nextInt(context.randomUniqueValues.length); + int randomI = getRandom().nextInt(context.randomUniqueValues.length); String value = context.randomUniqueValues[randomI]; Document document = new Document(); - document.add(newField(random, "id", id, TextField.TYPE_STORED)); - document.add(newField(random, "value", value, TextField.TYPE_STORED)); + document.add(newField(getRandom(), "id", id, TextField.TYPE_STORED)); + document.add(newField(getRandom(), "value", value, TextField.TYPE_STORED)); boolean from = context.randomFrom[randomI]; - int numberOfLinkValues = multipleValuesPerDocument ? 2 + random.nextInt(10) : 1; + int numberOfLinkValues = multipleValuesPerDocument ? 2 + getRandom().nextInt(10) : 1; RandomDoc doc = new RandomDoc(id, numberOfLinkValues, value); for (int j = 0; j < numberOfLinkValues; j++) { - String linkValue = context.randomUniqueValues[random.nextInt(context.randomUniqueValues.length)]; + String linkValue = context.randomUniqueValues[getRandom().nextInt(context.randomUniqueValues.length)]; doc.linkValues.add(linkValue); if (from) { if (!context.fromDocuments.containsKey(linkValue)) { @@ -262,7 +262,7 @@ public class TestJoinUtil extends LuceneTestCase { context.fromDocuments.get(linkValue).add(doc); context.randomValueFromDocs.get(value).add(doc); - document.add(newField(random, "from", linkValue, TextField.TYPE_STORED)); + document.add(newField(getRandom(), "from", linkValue, TextField.TYPE_STORED)); } else { if (!context.toDocuments.containsKey(linkValue)) { context.toDocuments.put(linkValue, new ArrayList()); @@ -273,7 +273,7 @@ public class TestJoinUtil extends LuceneTestCase { context.toDocuments.get(linkValue).add(doc); context.randomValueToDocs.get(value).add(doc); - document.add(newField(random, "to", linkValue, TextField.TYPE_STORED)); + document.add(newField(getRandom(), "to", linkValue, TextField.TYPE_STORED)); } } @@ -285,7 +285,7 @@ public class TestJoinUtil extends LuceneTestCase { } w.addDocument(document); - if (random.nextInt(10) == 4) { + if (getRandom().nextInt(10) == 4) { w.commit(); } if (VERBOSE) { diff --git modules/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java modules/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java index 37ca1ae..a607815 100644 --- modules/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java +++ modules/queries/src/test/org/apache/lucene/queries/BooleanFilterTest.java @@ -48,7 +48,7 @@ public class BooleanFilterTest extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); //Add series of docs with filterable fields : acces rights, prices, dates and "in-stock" flags addDoc(writer, "admin guest", "010", "20040101", "Y"); diff --git modules/queries/src/test/org/apache/lucene/queries/ChainedFilterTest.java modules/queries/src/test/org/apache/lucene/queries/ChainedFilterTest.java index 3614daf..0e7eb30 100644 --- modules/queries/src/test/org/apache/lucene/queries/ChainedFilterTest.java +++ modules/queries/src/test/org/apache/lucene/queries/ChainedFilterTest.java @@ -55,7 +55,7 @@ public class ChainedFilterTest extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); Calendar cal = new GregorianCalendar(); cal.clear(); cal.setTimeInMillis(1041397200000L); // 2003 January 01 @@ -189,7 +189,7 @@ public class ChainedFilterTest extends LuceneTestCase { public void testWithCachingFilter() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); IndexReader reader = writer.getReader(); writer.close(); diff --git modules/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java modules/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java index 8814b7e..890dd56 100644 --- modules/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java +++ modules/queries/src/test/org/apache/lucene/queries/TermsFilterTest.java @@ -54,7 +54,7 @@ public class TermsFilterTest extends LuceneTestCase { public void testMissingTerms() throws Exception { String fieldName = "field1"; Directory rd = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random, rd); + RandomIndexWriter w = new RandomIndexWriter(getRandom(), rd); for (int i = 0; i < 100; i++) { Document doc = new Document(); int term = i * 10; //terms are units of 10; @@ -90,7 +90,7 @@ public class TermsFilterTest extends LuceneTestCase { public void testMissingField() throws Exception { String fieldName = "field1"; Directory rd1 = newDirectory(); - RandomIndexWriter w1 = new RandomIndexWriter(random, rd1); + RandomIndexWriter w1 = new RandomIndexWriter(getRandom(), rd1); Document doc = new Document(); doc.add(newField(fieldName, "content1", StringField.TYPE_STORED)); w1.addDocument(doc); @@ -99,7 +99,7 @@ public class TermsFilterTest extends LuceneTestCase { fieldName = "field2"; Directory rd2 = newDirectory(); - RandomIndexWriter w2 = new RandomIndexWriter(random, rd2); + RandomIndexWriter w2 = new RandomIndexWriter(getRandom(), rd2); doc = new Document(); doc.add(newField(fieldName, "content2", StringField.TYPE_STORED)); w2.addDocument(doc); diff --git modules/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java modules/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java index 32c8877..889502a 100755 --- modules/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java +++ modules/queries/src/test/org/apache/lucene/queries/TestCustomScoreQuery.java @@ -303,11 +303,11 @@ public class TestCustomScoreQuery extends FunctionTestSetup { assertEquals("queries should have same #hits",h1.size(),h4CustomAdd.size()); assertEquals("queries should have same #hits",h1.size(),h5CustomMulAdd.size()); - QueryUtils.check(random, q1, s, rarely()); - QueryUtils.check(random, q2, s, rarely()); - QueryUtils.check(random, q3, s, rarely()); - QueryUtils.check(random, q4, s, rarely()); - QueryUtils.check(random, q5, s, rarely()); + QueryUtils.check(getRandom(), q1, s, rarely()); + QueryUtils.check(getRandom(), q2, s, rarely()); + QueryUtils.check(getRandom(), q3, s, rarely()); + QueryUtils.check(getRandom(), q4, s, rarely()); + QueryUtils.check(getRandom(), q5, s, rarely()); // verify scores ratios for (final Integer doc : h1.keySet()) { diff --git modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java index ebfad3d..62fcb6d 100644 --- modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java +++ modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java @@ -92,12 +92,12 @@ public abstract class FunctionTestSetup extends LuceneTestCase { } // prepare a small index with just a few documents. dir = newDirectory(); - anlzr = new MockAnalyzer(random); + anlzr = new MockAnalyzer(getStaticRandom()); IndexWriterConfig iwc = newIndexWriterConfig( TEST_VERSION_CURRENT, anlzr).setMergePolicy(newLogMergePolicy()); if (doMultiSegment) { - iwc.setMaxBufferedDocs(_TestUtil.nextInt(random, 2, 7)); + iwc.setMaxBufferedDocs(_TestUtil.nextInt(getStaticRandom(), 2, 7)); } - RandomIndexWriter iw = new RandomIndexWriter(random, dir, iwc); + RandomIndexWriter iw = new RandomIndexWriter(getStaticRandom(), dir, iwc); // add docs not exactly in natural ID order, to verify we do check the order of docs by scores int remaining = N_DOCS; boolean done[] = new boolean[N_DOCS]; diff --git modules/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java modules/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java index 3be1b4b..b6358e0 100755 --- modules/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java +++ modules/queries/src/test/org/apache/lucene/queries/function/TestFieldScoreQuery.java @@ -84,7 +84,7 @@ public class TestFieldScoreQuery extends FunctionTestSetup { IndexReader r = IndexReader.open(dir); IndexSearcher s = new IndexSearcher(r); log("test: "+ functionQuery); - QueryUtils.check(random, functionQuery,s); + QueryUtils.check(getRandom(), functionQuery,s); ScoreDoc[] h = s.search(functionQuery, null, 1000).scoreDocs; assertEquals("All docs should be matched!",N_DOCS,h.length); String prevID = "ID"+(N_DOCS+1); // greater than all ids of docs in this test diff --git modules/queries/src/test/org/apache/lucene/queries/function/TestOrdValues.java modules/queries/src/test/org/apache/lucene/queries/function/TestOrdValues.java index 0d33d1a..f153c29 100644 --- modules/queries/src/test/org/apache/lucene/queries/function/TestOrdValues.java +++ modules/queries/src/test/org/apache/lucene/queries/function/TestOrdValues.java @@ -72,7 +72,7 @@ public class TestOrdValues extends FunctionTestSetup { Query q = new FunctionQuery(vs); log("test: " + q); - QueryUtils.check(random, q, s); + QueryUtils.check(getRandom(), q, s); ScoreDoc[] h = s.search(q, null, 1000).scoreDocs; assertEquals("All docs should be matched!", N_DOCS, h.length); String prevID = inOrder diff --git modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java index fe71dff..be1bb90 100644 --- modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java +++ modules/queries/src/test/org/apache/lucene/queries/mlt/TestMoreLikeThis.java @@ -45,7 +45,7 @@ public class TestMoreLikeThis extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), directory); // Add series of docs with specific information for MoreLikeThis addDoc(writer, "lucene"); @@ -73,7 +73,7 @@ public class TestMoreLikeThis extends LuceneTestCase { Map originalValues = getOriginalValues(); MoreLikeThis mlt = new MoreLikeThis(reader); - mlt.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + mlt.setAnalyzer(new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); mlt.setMinDocFreq(1); mlt.setMinTermFreq(1); mlt.setMinWordLen(1); @@ -107,7 +107,7 @@ public class TestMoreLikeThis extends LuceneTestCase { private Map getOriginalValues() throws IOException { Map originalValues = new HashMap(); MoreLikeThis mlt = new MoreLikeThis(reader); - mlt.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + mlt.setAnalyzer(new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); mlt.setMinDocFreq(1); mlt.setMinTermFreq(1); mlt.setMinWordLen(1); @@ -127,7 +127,7 @@ public class TestMoreLikeThis extends LuceneTestCase { // LUCENE-3326 public void testMultiFields() throws Exception { MoreLikeThis mlt = new MoreLikeThis(reader); - mlt.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + mlt.setAnalyzer(new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); mlt.setMinDocFreq(1); mlt.setMinTermFreq(1); mlt.setMinWordLen(1); diff --git modules/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java modules/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java index 80cfff3..4643790 100644 --- modules/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java +++ modules/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java @@ -98,47 +98,45 @@ public class TestAnalyzingQueryParser extends LuceneTestCase { org.apache.lucene.search.Query q = qp.parse(s); return q.toString("field"); } + + final static class FoldingFilter extends TokenFilter { + final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); -} - -final class TestFoldingFilter extends TokenFilter { - final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); - - public TestFoldingFilter(TokenStream input) { - super(input); - } + public FoldingFilter(TokenStream input) { + super(input); + } - @Override - public boolean incrementToken() throws IOException { - if (input.incrementToken()) { - char term[] = termAtt.buffer(); - for (int i = 0; i < term.length; i++) - switch(term[i]) { - case 'ü': - term[i] = 'u'; - break; - case 'ö': - term[i] = 'o'; - break; - case 'é': - term[i] = 'e'; - break; - case 'ï': - term[i] = 'i'; - break; - } - return true; - } else { - return false; + @Override + public boolean incrementToken() throws IOException { + if (input.incrementToken()) { + char term[] = termAtt.buffer(); + for (int i = 0; i < term.length; i++) + switch(term[i]) { + case 'ü': + term[i] = 'u'; + break; + case 'ö': + term[i] = 'o'; + break; + case 'é': + term[i] = 'e'; + break; + case 'ï': + term[i] = 'i'; + break; + } + return true; + } else { + return false; + } } } -} -final class ASCIIAnalyzer extends Analyzer { - - @Override - public TokenStreamComponents createComponents(String fieldName, Reader reader) { - Tokenizer result = new MockTokenizer(reader, MockTokenizer.SIMPLE, true); - return new TokenStreamComponents(result, new TestFoldingFilter(result)); - } -} + final static class ASCIIAnalyzer extends Analyzer { + @Override + public TokenStreamComponents createComponents(String fieldName, Reader reader) { + Tokenizer result = new MockTokenizer(reader, MockTokenizer.SIMPLE, true); + return new TokenStreamComponents(result, new FoldingFilter(result)); + } + } +} \ No newline at end of file diff --git modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java index c5e3163..447e0cb 100644 --- modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java +++ modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java @@ -67,7 +67,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { public void testSimple() throws Exception { String[] fields = {"b", "t"}; - MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new MockAnalyzer(random)); + MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new MockAnalyzer(getRandom())); Query q = mfqp.parse("one"); assertEquals("b:one t:one", q.toString()); @@ -130,7 +130,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { boosts.put("b", Float.valueOf(5)); boosts.put("t", Float.valueOf(10)); String[] fields = {"b", "t"}; - MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new MockAnalyzer(random), boosts); + MultiFieldQueryParser mfqp = new MultiFieldQueryParser(TEST_VERSION_CURRENT, fields, new MockAnalyzer(getRandom()), boosts); //Check for simple @@ -156,24 +156,24 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { public void testStaticMethod1() throws ParseException { String[] fields = {"b", "t"}; String[] queries = {"one", "two"}; - Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, new MockAnalyzer(random)); + Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, new MockAnalyzer(getRandom())); assertEquals("b:one t:two", q.toString()); String[] queries2 = {"+one", "+two"}; - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries2, fields, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries2, fields, new MockAnalyzer(getRandom())); assertEquals("(+b:one) (+t:two)", q.toString()); String[] queries3 = {"one", "+two"}; - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries3, fields, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries3, fields, new MockAnalyzer(getRandom())); assertEquals("b:one (+t:two)", q.toString()); String[] queries4 = {"one +more", "+two"}; - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries4, fields, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries4, fields, new MockAnalyzer(getRandom())); assertEquals("(b:one +b:more) (+t:two)", q.toString()); String[] queries5 = {"blah"}; try { - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries5, fields, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries5, fields, new MockAnalyzer(getRandom())); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -195,15 +195,15 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { public void testStaticMethod2() throws ParseException { String[] fields = {"b", "t"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; - Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one", fields, flags, new MockAnalyzer(random)); + Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one", fields, flags, new MockAnalyzer(getRandom())); assertEquals("+b:one -t:one", q.toString()); - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one two", fields, flags, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one two", fields, flags, new MockAnalyzer(getRandom())); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "blah", fields, flags2, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "blah", fields, flags2, new MockAnalyzer(getRandom())); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -215,15 +215,15 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { //int[] flags = {MultiFieldQueryParser.REQUIRED_FIELD, MultiFieldQueryParser.PROHIBITED_FIELD}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; - Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one", fields, flags, new MockAnalyzer(random));//, fields, flags, new MockAnalyzer(random)); + Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one", fields, flags, new MockAnalyzer(getRandom()));//, fields, flags, new MockAnalyzer(random)); assertEquals("+b:one -t:one", q.toString()); - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one two", fields, flags, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "one two", fields, flags, new MockAnalyzer(getRandom())); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "blah", fields, flags2, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, "blah", fields, flags2, new MockAnalyzer(getRandom())); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -235,12 +235,12 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { String[] fields = {"f1", "f2", "f3"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD}; - Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags, new MockAnalyzer(random)); + Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags, new MockAnalyzer(getRandom())); assertEquals("+f1:one -f2:two f3:three", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags2, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags2, new MockAnalyzer(getRandom())); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -251,12 +251,12 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { String[] queries = {"one", "two"}; String[] fields = {"b", "t"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; - Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags, new MockAnalyzer(random)); + Query q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags, new MockAnalyzer(getRandom())); assertEquals("+b:one -t:two", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags2, new MockAnalyzer(random)); + q = MultiFieldQueryParser.parse(TEST_VERSION_CURRENT, queries, fields, flags2, new MockAnalyzer(getRandom())); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -278,7 +278,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { } public void testStopWordSearching() throws Exception { - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); Directory ramDir = newDirectory(); IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); Document doc = new Document(); @@ -302,7 +302,7 @@ public class TestMultiFieldQueryParser extends LuceneTestCase { * Return empty tokens for field "f1". */ private static class AnalyzerReturningNull extends Analyzer { - MockAnalyzer stdAnalyzer = new MockAnalyzer(random); + MockAnalyzer stdAnalyzer = new MockAnalyzer(getStaticRandom()); public AnalyzerReturningNull() { super(new PerFieldReuseStrategy()); diff --git modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java index 9c8ce42..c44aded 100644 --- modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java +++ modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestQueryParser.java @@ -30,7 +30,7 @@ public class TestQueryParser extends QueryParserTestBase { @Override public QueryParser getParser(Analyzer a) throws Exception { if (a == null) - a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + a = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true); QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a); qp.setDefaultOperator(QueryParser.OR_OPERATOR); return qp; diff --git modules/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java modules/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java index 37109b0..a379406 100644 --- modules/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java +++ modules/queryparser/src/test/org/apache/lucene/queryparser/complexPhrase/TestComplexPhraseQuery.java @@ -35,8 +35,8 @@ import org.apache.lucene.util.LuceneTestCase; public class TestComplexPhraseQuery extends LuceneTestCase { Directory rd; - Analyzer analyzer = new MockAnalyzer(random); - + Analyzer analyzer; + DocData docsContent[] = { new DocData("john smith", "1"), new DocData("johathon smith", "2"), new DocData("john percival smith", "3"), @@ -113,6 +113,8 @@ public class TestComplexPhraseQuery extends LuceneTestCase { @Override public void setUp() throws Exception { super.setUp(); + + analyzer = new MockAnalyzer(getRandom()); rd = newDirectory(); IndexWriter w = new IndexWriter(rd, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); for (int i = 0; i < docsContent.length; i++) { diff --git modules/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtendableQueryParser.java modules/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtendableQueryParser.java index a5bb7ac..51df91c 100644 --- modules/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtendableQueryParser.java +++ modules/queryparser/src/test/org/apache/lucene/queryparser/ext/TestExtendableQueryParser.java @@ -43,7 +43,7 @@ public class TestExtendableQueryParser extends QueryParserTestBase { public QueryParser getParser(Analyzer a, Extensions extensions) throws Exception { if (a == null) - a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + a = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true); QueryParser qp = extensions == null ? new ExtendableQueryParser( TEST_VERSION_CURRENT, "field", a) : new ExtendableQueryParser( TEST_VERSION_CURRENT, "field", a, extensions); diff --git modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java index c7755da..12489d7 100644 --- modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java +++ modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java @@ -132,7 +132,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { public PrecedenceQueryParser getParser(Analyzer a) throws Exception { if (a == null) - a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + a = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true); PrecedenceQueryParser qp = new PrecedenceQueryParser(); qp.setAnalyzer(a); qp.setDefaultOperator(StandardQueryConfigHandler.Operator.OR); @@ -178,7 +178,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { public Query getQueryDOA(String query, Analyzer a) throws Exception { if (a == null) - a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + a = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true); PrecedenceQueryParser qp = new PrecedenceQueryParser(); qp.setAnalyzer(a); qp.setDefaultOperator(StandardQueryConfigHandler.Operator.AND); @@ -239,7 +239,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { "+(title:dog title:cat) -author:\"bob dole\""); PrecedenceQueryParser qp = new PrecedenceQueryParser(); - qp.setAnalyzer(new MockAnalyzer(random)); + qp.setAnalyzer(new MockAnalyzer(getRandom())); // make sure OR is the default: assertEquals(StandardQueryConfigHandler.Operator.OR, qp.getDefaultOperator()); qp.setDefaultOperator(StandardQueryConfigHandler.Operator.AND); @@ -253,7 +253,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { } public void testPunct() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false); assertQueryEquals("a&b", a, "a&b"); assertQueryEquals("a&&b", a, "a&&b"); assertQueryEquals(".NET", a, ".NET"); @@ -273,7 +273,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { assertQueryEquals("term 1.0 1 2", null, "term"); assertQueryEquals("term term1 term2", null, "term term term"); - Analyzer a = new MockAnalyzer(random); + Analyzer a = new MockAnalyzer(getRandom()); assertQueryEquals("3", a, "3"); assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2"); assertQueryEquals("term term1 term2", a, "term term1 term2"); @@ -412,7 +412,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { final String defaultField = "default"; final String monthField = "month"; final String hourField = "hour"; - PrecedenceQueryParser qp = new PrecedenceQueryParser(new MockAnalyzer(random)); + PrecedenceQueryParser qp = new PrecedenceQueryParser(new MockAnalyzer(getRandom())); Map fieldMap = new HashMap(); // set a field specific date resolution @@ -474,7 +474,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { } public void testEscaped() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false); assertQueryEquals("a\\-b:c", a, "a-b:c"); assertQueryEquals("a\\+b:c", a, "a+b:c"); @@ -540,7 +540,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { public void testBoost() throws Exception { CharacterRunAutomaton stopSet = new CharacterRunAutomaton(BasicAutomata.makeString("on")); - Analyzer oneStopAnalyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopSet, true); + Analyzer oneStopAnalyzer = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, stopSet, true); PrecedenceQueryParser qp = new PrecedenceQueryParser(); qp.setAnalyzer(oneStopAnalyzer); @@ -555,7 +555,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { q = qp.parse("\"on\"^1.0", "field"); assertNotNull(q); - q = getParser(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)).parse("the^3", + q = getParser(new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)).parse("the^3", "field"); assertNotNull(q); } @@ -571,7 +571,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { public void testBooleanQuery() throws Exception { BooleanQuery.setMaxClauseCount(2); try { - getParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("one two three", "field"); + getParser(new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)).parse("one two three", "field"); fail("ParseException expected due to too many boolean clauses"); } catch (QueryNodeException expected) { // too many boolean clauses, so ParseException is expected @@ -580,7 +580,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { // LUCENE-792 public void testNOT() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false); assertQueryEquals("NOT foo AND bar", a, "-foo +bar"); } @@ -589,7 +589,7 @@ public class TestPrecedenceQueryParser extends LuceneTestCase { * issue has been corrected. */ public void testPrecedence() throws Exception { - PrecedenceQueryParser parser = getParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + PrecedenceQueryParser parser = getParser(new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); Query query1 = parser.parse("A AND B OR C AND D", "field"); Query query2 = parser.parse("(A AND B) OR (C AND D)", "field"); assertEquals(query1, query2); diff --git modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java index 2e1aeb4..869976c 100644 --- modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java +++ modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java @@ -81,7 +81,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { String[] fields = { "b", "t" }; StandardQueryParser mfqp = new StandardQueryParser(); mfqp.setMultiFields(fields); - mfqp.setAnalyzer(new MockAnalyzer(random)); + mfqp.setAnalyzer(new MockAnalyzer(getRandom())); Query q = mfqp.parse("one", null); assertEquals("b:one t:one", q.toString()); @@ -151,7 +151,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { StandardQueryParser mfqp = new StandardQueryParser(); mfqp.setMultiFields(fields); mfqp.setFieldsBoost(boosts); - mfqp.setAnalyzer(new MockAnalyzer(random)); + mfqp.setAnalyzer(new MockAnalyzer(getRandom())); // Check for simple Query q = mfqp.parse("one", null); @@ -179,24 +179,24 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { public void testStaticMethod1() throws QueryNodeException { String[] fields = { "b", "t" }; String[] queries = { "one", "two" }; - Query q = QueryParserUtil.parse(queries, fields, new MockAnalyzer(random)); + Query q = QueryParserUtil.parse(queries, fields, new MockAnalyzer(getRandom())); assertEquals("b:one t:two", q.toString()); String[] queries2 = { "+one", "+two" }; - q = QueryParserUtil.parse(queries2, fields, new MockAnalyzer(random)); + q = QueryParserUtil.parse(queries2, fields, new MockAnalyzer(getRandom())); assertEquals("(+b:one) (+t:two)", q.toString()); String[] queries3 = { "one", "+two" }; - q = QueryParserUtil.parse(queries3, fields, new MockAnalyzer(random)); + q = QueryParserUtil.parse(queries3, fields, new MockAnalyzer(getRandom())); assertEquals("b:one (+t:two)", q.toString()); String[] queries4 = { "one +more", "+two" }; - q = QueryParserUtil.parse(queries4, fields, new MockAnalyzer(random)); + q = QueryParserUtil.parse(queries4, fields, new MockAnalyzer(getRandom())); assertEquals("(b:one +b:more) (+t:two)", q.toString()); String[] queries5 = { "blah" }; try { - q = QueryParserUtil.parse(queries5, fields, new MockAnalyzer(random)); + q = QueryParserUtil.parse(queries5, fields, new MockAnalyzer(getRandom())); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -220,15 +220,15 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT }; Query q = QueryParserUtil.parse("one", fields, flags, - new MockAnalyzer(random)); + new MockAnalyzer(getRandom())); assertEquals("+b:one -t:one", q.toString()); - q = QueryParserUtil.parse("one two", fields, flags, new MockAnalyzer(random)); + q = QueryParserUtil.parse("one two", fields, flags, new MockAnalyzer(getRandom())); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; - q = QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer(random)); + q = QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer(getRandom())); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -241,19 +241,19 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { BooleanClause.Occur.MUST_NOT }; StandardQueryParser parser = new StandardQueryParser(); parser.setMultiFields(fields); - parser.setAnalyzer(new MockAnalyzer(random)); + parser.setAnalyzer(new MockAnalyzer(getRandom())); Query q = QueryParserUtil.parse("one", fields, flags, - new MockAnalyzer(random));// , fields, flags, new + new MockAnalyzer(getRandom()));// , fields, flags, new // MockAnalyzer()); assertEquals("+b:one -t:one", q.toString()); - q = QueryParserUtil.parse("one two", fields, flags, new MockAnalyzer(random)); + q = QueryParserUtil.parse("one two", fields, flags, new MockAnalyzer(getRandom())); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; - q = QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer(random)); + q = QueryParserUtil.parse("blah", fields, flags2, new MockAnalyzer(getRandom())); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -266,13 +266,13 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD }; Query q = QueryParserUtil.parse(queries, fields, flags, - new MockAnalyzer(random)); + new MockAnalyzer(getRandom())); assertEquals("+f1:one -f2:two f3:three", q.toString()); try { BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; q = QueryParserUtil - .parse(queries, fields, flags2, new MockAnalyzer(random)); + .parse(queries, fields, flags2, new MockAnalyzer(getRandom())); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -285,13 +285,13 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT }; Query q = QueryParserUtil.parse(queries, fields, flags, - new MockAnalyzer(random)); + new MockAnalyzer(getRandom())); assertEquals("+b:one -t:two", q.toString()); try { BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; q = QueryParserUtil - .parse(queries, fields, flags2, new MockAnalyzer(random)); + .parse(queries, fields, flags2, new MockAnalyzer(getRandom())); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -317,7 +317,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { } public void testStopWordSearching() throws Exception { - Analyzer analyzer = new MockAnalyzer(random); + Analyzer analyzer = new MockAnalyzer(getRandom()); Directory ramDir = newDirectory(); IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); Document doc = new Document(); @@ -343,7 +343,7 @@ public class TestMultiFieldQPHelper extends LuceneTestCase { * Return empty tokens for field "f1". */ private static final class AnalyzerReturningNull extends Analyzer { - MockAnalyzer stdAnalyzer = new MockAnalyzer(random); + MockAnalyzer stdAnalyzer = new MockAnalyzer(getStaticRandom()); public AnalyzerReturningNull() { super(new PerFieldReuseStrategy()); diff --git modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java index bf558ae..4431ed9 100644 --- modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java +++ modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestNumericQueryParser.java @@ -96,7 +96,7 @@ public class TestNumericQueryParser extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { - ANALYZER = new MockAnalyzer(random); + ANALYZER = new MockAnalyzer(getStaticRandom()); qp = new StandardQueryParser(ANALYZER); @@ -112,10 +112,10 @@ public class TestNumericQueryParser extends LuceneTestCase { } dateFormatSanityCheckPass = true; - LOCALE = randomLocale(random); - TIMEZONE = randomTimeZone(random); - DATE_STYLE = randomDateStyle(random); - TIME_STYLE = randomDateStyle(random); + LOCALE = randomLocale(getStaticRandom()); + TIMEZONE = randomTimeZone(getStaticRandom()); + DATE_STYLE = randomDateStyle(getStaticRandom()); + TIME_STYLE = randomDateStyle(getStaticRandom()); // assumes localized date pattern will have at least year, month, day, // hour, minute @@ -130,7 +130,7 @@ public class TestNumericQueryParser extends LuceneTestCase { DATE_FORMAT = new NumberDateFormat(dateFormat); do { - randomDate = random.nextLong(); + randomDate = getStaticRandom().nextLong(); // prune date value so it doesn't pass in insane values to some // calendars. @@ -154,26 +154,26 @@ public class TestNumericQueryParser extends LuceneTestCase { } while (!dateFormatSanityCheckPass); NUMBER_FORMAT = NumberFormat.getNumberInstance(LOCALE); - NUMBER_FORMAT.setMaximumFractionDigits((random.nextInt() & 20) + 1); - NUMBER_FORMAT.setMinimumFractionDigits((random.nextInt() & 20) + 1); - NUMBER_FORMAT.setMaximumIntegerDigits((random.nextInt() & 20) + 1); - NUMBER_FORMAT.setMinimumIntegerDigits((random.nextInt() & 20) + 1); + NUMBER_FORMAT.setMaximumFractionDigits((getStaticRandom().nextInt() & 20) + 1); + NUMBER_FORMAT.setMinimumFractionDigits((getStaticRandom().nextInt() & 20) + 1); + NUMBER_FORMAT.setMaximumIntegerDigits((getStaticRandom().nextInt() & 20) + 1); + NUMBER_FORMAT.setMinimumIntegerDigits((getStaticRandom().nextInt() & 20) + 1); double randomDouble; long randomLong; int randomInt; float randomFloat; - while ((randomLong = normalizeNumber(Math.abs(random.nextLong())) + while ((randomLong = normalizeNumber(Math.abs(getStaticRandom().nextLong())) .longValue()) == 0L) ; - while ((randomDouble = normalizeNumber(Math.abs(random.nextDouble())) + while ((randomDouble = normalizeNumber(Math.abs(getStaticRandom().nextDouble())) .doubleValue()) == 0.0) ; - while ((randomFloat = normalizeNumber(Math.abs(random.nextFloat())) + while ((randomFloat = normalizeNumber(Math.abs(getStaticRandom().nextFloat())) .floatValue()) == 0.0f) ; - while ((randomInt = normalizeNumber(Math.abs(random.nextInt())).intValue()) == 0) + while ((randomInt = normalizeNumber(Math.abs(getStaticRandom().nextInt())).intValue()) == 0) ; randomNumberMap.put(NumericType.LONG.name(), randomLong); @@ -185,9 +185,9 @@ public class TestNumericQueryParser extends LuceneTestCase { RANDOM_NUMBER_MAP = Collections.unmodifiableMap(randomNumberMap); directory = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, directory, - newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) - .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000)) + RandomIndexWriter writer = new RandomIndexWriter(getStaticRandom(), directory, + newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getStaticRandom())) + .setMaxBufferedDocs(_TestUtil.nextInt(getStaticRandom(), 50, 1000)) .setMergePolicy(newLogMergePolicy())); Document doc = new Document(); diff --git modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java index 20fbc23..11c7327 100644 --- modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java +++ modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java @@ -195,7 +195,7 @@ public class TestQPHelper extends LuceneTestCase { public StandardQueryParser getParser(Analyzer a) throws Exception { if (a == null) - a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + a = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true); StandardQueryParser qp = new StandardQueryParser(); qp.setAnalyzer(a); @@ -285,7 +285,7 @@ public class TestQPHelper extends LuceneTestCase { public Query getQueryDOA(String query, Analyzer a) throws Exception { if (a == null) - a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + a = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true); StandardQueryParser qp = new StandardQueryParser(); qp.setAnalyzer(a); qp.setDefaultOperator(StandardQueryConfigHandler.Operator.AND); @@ -305,7 +305,7 @@ public class TestQPHelper extends LuceneTestCase { } public void testConstantScoreAutoRewrite() throws Exception { - StandardQueryParser qp = new StandardQueryParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + StandardQueryParser qp = new StandardQueryParser(new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); Query q = qp.parse("foo*bar", "field"); assertTrue(q instanceof WildcardQuery); assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((MultiTermQuery) q).getRewriteMethod()); @@ -423,9 +423,9 @@ public class TestQPHelper extends LuceneTestCase { assertQueryEquals("field=a", null, "a"); assertQueryEquals("\"term germ\"~2", null, "\"term germ\"~2"); assertQueryEquals("term term term", null, "term term term"); - assertQueryEquals("t�rm term term", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), + assertQueryEquals("t�rm term term", new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false), "t�rm term term"); - assertQueryEquals("�mlaut", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), "�mlaut"); + assertQueryEquals("�mlaut", new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false), "�mlaut"); // FIXME: change MockAnalyzer to not extend CharTokenizer for this test //assertQueryEquals("\"\"", new KeywordAnalyzer(), ""); @@ -483,7 +483,7 @@ public class TestQPHelper extends LuceneTestCase { } public void testPunct() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false); assertQueryEquals("a&b", a, "a&b"); assertQueryEquals("a&&b", a, "a&&b"); assertQueryEquals(".NET", a, ".NET"); @@ -504,7 +504,7 @@ public class TestQPHelper extends LuceneTestCase { assertQueryEquals("term 1.0 1 2", null, "term"); assertQueryEquals("term term1 term2", null, "term term term"); - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false); assertQueryEquals("3", a, "3"); assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2"); assertQueryEquals("term term1 term2", a, "term term1 term2"); @@ -755,7 +755,7 @@ public class TestQPHelper extends LuceneTestCase { } public void testEscaped() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false); /* * assertQueryEquals("\\[brackets", a, "\\[brackets"); @@ -854,7 +854,7 @@ public class TestQPHelper extends LuceneTestCase { } public void testQueryStringEscaping() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false); assertEscapedQueryEquals("a-b:c", a, "a\\-b\\:c"); assertEscapedQueryEquals("a+b:c", a, "a\\+b\\:c"); @@ -895,7 +895,7 @@ public class TestQPHelper extends LuceneTestCase { @Ignore("contrib queryparser shouldn't escape wildcard terms") public void testEscapedWildcard() throws Exception { StandardQueryParser qp = new StandardQueryParser(); - qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + qp.setAnalyzer(new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); WildcardQuery q = new WildcardQuery(new Term("field", "foo\\?ba?r")); assertEquals(q, qp.parse("foo\\?ba?r", "field")); @@ -933,7 +933,7 @@ public class TestQPHelper extends LuceneTestCase { public void testBoost() throws Exception { CharacterRunAutomaton stopSet = new CharacterRunAutomaton(BasicAutomata.makeString("on")); - Analyzer oneStopAnalyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopSet, true); + Analyzer oneStopAnalyzer = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, stopSet, true); StandardQueryParser qp = new StandardQueryParser(); qp.setAnalyzer(oneStopAnalyzer); @@ -949,7 +949,7 @@ public class TestQPHelper extends LuceneTestCase { assertNotNull(q); StandardQueryParser qp2 = new StandardQueryParser(); - qp2.setAnalyzer(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)); + qp2.setAnalyzer(new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)); q = qp2.parse("the^3", "field"); // "the" is a stop word so the result is an empty query: @@ -979,7 +979,7 @@ public class TestQPHelper extends LuceneTestCase { public void testCustomQueryParserWildcard() { try { - new QPTestParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("a?t", "contents"); + new QPTestParser(new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)).parse("a?t", "contents"); fail("Wildcard queries should not be allowed"); } catch (QueryNodeException expected) { // expected exception @@ -988,7 +988,7 @@ public class TestQPHelper extends LuceneTestCase { public void testCustomQueryParserFuzzy() throws Exception { try { - new QPTestParser(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("xunit~", "contents"); + new QPTestParser(new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)).parse("xunit~", "contents"); fail("Fuzzy queries should not be allowed"); } catch (QueryNodeException expected) { // expected exception @@ -999,7 +999,7 @@ public class TestQPHelper extends LuceneTestCase { BooleanQuery.setMaxClauseCount(2); try { StandardQueryParser qp = new StandardQueryParser(); - qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + qp.setAnalyzer(new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); qp.parse("one two three", "field"); fail("ParseException expected due to too many boolean clauses"); @@ -1013,7 +1013,7 @@ public class TestQPHelper extends LuceneTestCase { */ public void testPrecedence() throws Exception { StandardQueryParser qp = new StandardQueryParser(); - qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + qp.setAnalyzer(new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); Query query1 = qp.parse("A AND B OR C AND D", "field"); Query query2 = qp.parse("+A +B +C +D", "field"); @@ -1145,7 +1145,7 @@ public class TestQPHelper extends LuceneTestCase { public void testStopwords() throws Exception { StandardQueryParser qp = new StandardQueryParser(); CharacterRunAutomaton stopSet = new CharacterRunAutomaton(new RegExp("the|foo").toAutomaton()); - qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopSet, true)); + qp.setAnalyzer(new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, stopSet, true)); Query result = qp.parse("a:the OR a:foo", "a"); assertNotNull("result is null and it shouldn't be", result); @@ -1169,7 +1169,7 @@ public class TestQPHelper extends LuceneTestCase { public void testPositionIncrement() throws Exception { StandardQueryParser qp = new StandardQueryParser(); qp.setAnalyzer( - new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)); + new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)); qp.setEnablePositionIncrements(true); @@ -1190,7 +1190,7 @@ public class TestQPHelper extends LuceneTestCase { public void testMatchAllDocs() throws Exception { StandardQueryParser qp = new StandardQueryParser(); - qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + qp.setAnalyzer(new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); assertEquals(new MatchAllDocsQuery(), qp.parse("*:*", "field")); assertEquals(new MatchAllDocsQuery(), qp.parse("(*:*)", "field")); @@ -1202,7 +1202,7 @@ public class TestQPHelper extends LuceneTestCase { private void assertHits(int expected, String query, IndexSearcher is) throws IOException, QueryNodeException { StandardQueryParser qp = new StandardQueryParser(); - qp.setAnalyzer(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + qp.setAnalyzer(new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); qp.setLocale(Locale.ENGLISH); Query q = qp.parse(query, "date"); diff --git modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SrndQueryTest.java modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SrndQueryTest.java index d0bb1dd..910c7e0 100644 --- modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SrndQueryTest.java +++ modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/SrndQueryTest.java @@ -17,8 +17,6 @@ package org.apache.lucene.queryparser.surround.query; * limitations under the License. */ -import junit.framework.Assert; - import org.apache.lucene.queryparser.surround.parser.QueryParser; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; diff --git modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java index e02e25e..10ed109 100644 --- modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java +++ modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test02Boolean.java @@ -39,7 +39,12 @@ public class Test02Boolean extends LuceneTestCase { "a c e a b c" }; - SingleFieldTestDb db1 = new SingleFieldTestDb(random, docs1, fieldName); + public void setUp() throws Exception { + super.setUp(); + db1 = new SingleFieldTestDb(getRandom(), docs1, fieldName); + } + + SingleFieldTestDb db1; public void normalTest1(String query, int[] expdnrs) throws Exception { BooleanQueryTst bqt = new BooleanQueryTst( query, expdnrs, db1, fieldName, this, diff --git modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java index 336acf9..58536a1 100644 --- modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java +++ modules/queryparser/src/test/org/apache/lucene/queryparser/surround/query/Test03Distance.java @@ -26,6 +26,7 @@ public class Test03Distance extends LuceneTestCase { public static void main(String args[]) { TestRunner.run(new TestSuite(Test03Distance.class)); } + boolean verbose = false; int maxBasicQueries = 16; @@ -58,8 +59,16 @@ public class Test03Distance extends LuceneTestCase { "a c e a b c" }; - SingleFieldTestDb db1 = new SingleFieldTestDb(random, docs1, fieldName); + SingleFieldTestDb db1; + @Override + public void setUp() throws Exception { + super.setUp(); + db1 = new SingleFieldTestDb(getRandom(), docs1, fieldName); + db2 = new SingleFieldTestDb(getRandom(), docs2, fieldName); + db3 = new SingleFieldTestDb(getRandom(), docs3, fieldName); + } + private void distanceTst(String query, int[] expdnrs, SingleFieldTestDb db) throws Exception { BooleanQueryTst bqt = new BooleanQueryTst( query, expdnrs, db, fieldName, this, new BasicQueryFactory(maxBasicQueries)); @@ -179,7 +188,7 @@ public class Test03Distance extends LuceneTestCase { "" }; - SingleFieldTestDb db2 = new SingleFieldTestDb(random, docs2, fieldName); + SingleFieldTestDb db2; public void distanceTest2(String query, int[] expdnrs) throws Exception { distanceTst(query, expdnrs, db2); @@ -227,7 +236,7 @@ public class Test03Distance extends LuceneTestCase { "" }; - SingleFieldTestDb db3 = new SingleFieldTestDb(random, docs3, fieldName); + SingleFieldTestDb db3; public void distanceTest3(String query, int[] expdnrs) throws Exception { distanceTst(query, expdnrs, db3); diff --git modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java index 9e88d7f..ffb2c07 100644 --- modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java +++ modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java @@ -198,7 +198,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public Query getQueryDOA(String query, Analyzer a) throws Exception { if (a == null) - a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true); + a = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true); QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", a); qp.setDefaultOperator(QueryParser.AND_OPERATOR); return qp.parse(query); @@ -319,8 +319,8 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testSimple() throws Exception { assertQueryEquals("term term term", null, "term term term"); - assertQueryEquals("türm term term", new MockAnalyzer(random), "türm term term"); - assertQueryEquals("ümlaut", new MockAnalyzer(random), "ümlaut"); + assertQueryEquals("türm term term", new MockAnalyzer(getRandom()), "türm term term"); + assertQueryEquals("ümlaut", new MockAnalyzer(getRandom()), "ümlaut"); // FIXME: enhance MockAnalyzer to be able to support this // it must no longer extend CharTokenizer @@ -380,7 +380,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null, "+(title:dog title:cat) -author:\"bob dole\""); - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(getRandom())); // make sure OR is the default: assertEquals(QueryParser.OR_OPERATOR, qp.getDefaultOperator()); qp.setDefaultOperator(QueryParser.AND_OPERATOR); @@ -390,7 +390,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } public void testPunct() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false); assertQueryEquals("a&b", a, "a&b"); assertQueryEquals("a&&b", a, "a&&b"); assertQueryEquals(".NET", a, ".NET"); @@ -410,7 +410,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertQueryEquals("term 1.0 1 2", null, "term"); assertQueryEquals("term term1 term2", null, "term term term"); - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, true); + Analyzer a = new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, true); assertQueryEquals("3", a, "3"); assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2"); assertQueryEquals("term term1 term2", a, "term term1 term2"); @@ -538,7 +538,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod()); - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.SIMPLE, true)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true)); qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE); assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]")).getRewriteMethod()); @@ -609,7 +609,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { final String defaultField = "default"; final String monthField = "month"; final String hourField = "hour"; - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.SIMPLE, true)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true)); // set a field specific date resolution qp.setDateResolution(monthField, DateTools.Resolution.MONTH); @@ -642,7 +642,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } public void testEscaped() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false); /*assertQueryEquals("\\[brackets", a, "\\[brackets"); assertQueryEquals("\\[brackets", null, "brackets"); @@ -736,7 +736,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } public void testQueryStringEscaping() throws Exception { - Analyzer a = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); + Analyzer a = new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false); assertEscapedQueryEquals("a-b:c", a, "a\\-b\\:c"); assertEscapedQueryEquals("a+b:c", a, "a\\+b\\:c"); @@ -822,7 +822,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testBoost() throws Exception { CharacterRunAutomaton stopWords = new CharacterRunAutomaton(BasicAutomata.makeString("on")); - Analyzer oneStopAnalyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true); + Analyzer oneStopAnalyzer = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, stopWords, true); QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", oneStopAnalyzer); Query q = qp.parse("on^1.0"); assertNotNull(q); @@ -835,7 +835,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { q = qp.parse("\"on\"^1.0"); assertNotNull(q); - QueryParser qp2 = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)); + QueryParser qp2 = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)); q = qp2.parse("the^3"); // "the" is a stop word so the result is an empty query: assertNotNull(q); @@ -864,7 +864,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testCustomQueryParserWildcard() { try { - new QPTestParser("contents", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("a?t"); + new QPTestParser("contents", new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)).parse("a?t"); fail("Wildcard queries should not be allowed"); } catch (ParseException expected) { // expected exception @@ -873,7 +873,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testCustomQueryParserFuzzy() throws Exception { try { - new QPTestParser("contents", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).parse("xunit~"); + new QPTestParser("contents", new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)).parse("xunit~"); fail("Fuzzy queries should not be allowed"); } catch (ParseException expected) { // expected exception @@ -883,7 +883,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testBooleanQuery() throws Exception { BooleanQuery.setMaxClauseCount(2); try { - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); qp.parse("one two three"); fail("ParseException expected due to too many boolean clauses"); } catch (ParseException expected) { @@ -895,7 +895,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { * This test differs from TestPrecedenceQueryParser */ public void testPrecedence() throws Exception { - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); Query query1 = qp.parse("A AND B OR C AND D"); Query query2 = qp.parse("+A +B +C +D"); assertEquals(query1, query2); @@ -931,7 +931,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testStarParsing() throws Exception { final int[] type = new int[1]; - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)) { + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)) { @Override protected Query getWildcardQuery(String field, String termStr) throws ParseException { // override error checking of superclass @@ -990,13 +990,13 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } public void testEscapedWildcard() throws Exception { - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); WildcardQuery q = new WildcardQuery(new Term("field", "foo\\?ba?r")); assertEquals(q, qp.parse("foo\\?ba?r")); } public void testRegexps() throws Exception { - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); RegexpQuery q = new RegexpQuery(new Term("field", "[a-z][123]")); assertEquals(q, qp.parse("/[a-z][123]/")); qp.setLowercaseExpandedTerms(true); @@ -1024,7 +1024,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testStopwords() throws Exception { CharacterRunAutomaton stopSet = new CharacterRunAutomaton(new RegExp("the|foo").toAutomaton()); - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopSet, true)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, stopSet, true)); Query result = qp.parse("a:the OR a:foo"); assertNotNull("result is null and it shouldn't be", result); assertTrue("result is not a BooleanQuery", result instanceof BooleanQuery); @@ -1040,7 +1040,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } public void testPositionIncrement() throws Exception { - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "a", new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)); qp.setEnablePositionIncrements(true); String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\""; // 0 2 5 7 8 @@ -1057,7 +1057,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } public void testMatchAllDocs() throws Exception { - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); assertEquals(new MatchAllDocsQuery(), qp.parse("*:*")); assertEquals(new MatchAllDocsQuery(), qp.parse("(*:*)")); BooleanQuery bq = (BooleanQuery)qp.parse("+*:* -*:*"); @@ -1066,7 +1066,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } private void assertHits(int expected, String query, IndexSearcher is) throws ParseException, IOException { - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "date", new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "date", new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false)); qp.setLocale(Locale.ENGLISH); Query q = qp.parse(query); ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs; @@ -1084,7 +1084,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { // "match" public void testPositionIncrements() throws Exception { Directory dir = newDirectory(); - Analyzer a = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); + Analyzer a = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, a)); Document doc = new Document(); doc.add(newField("f", "the wizard of ozzy", TextField.TYPE_UNSTORED)); @@ -1247,13 +1247,13 @@ public abstract class QueryParserTestBase extends LuceneTestCase { } public void testDistanceAsEditsParsing() throws Exception { - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(random)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", new MockAnalyzer(getRandom())); FuzzyQuery q = (FuzzyQuery) qp.parse("foobar~2"); assertEquals(2f, q.getMinSimilarity(), 0.0001f); } public void testPhraseQueryToString() throws ParseException { - Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); + Analyzer analyzer = new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true); QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", analyzer); qp.setEnablePositionIncrements(true); PhraseQuery q = (PhraseQuery)qp.parse("\"this hi this is a test is\""); @@ -1262,7 +1262,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testParseWildcardAndPhraseQueries() throws ParseException { String field = "content"; - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, field, new MockAnalyzer(random)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, field, new MockAnalyzer(getRandom())); qp.setAllowLeadingWildcard(true); String prefixQueries[][] = { @@ -1301,7 +1301,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { new CharacterRunAutomaton(new RegExp("[sS][tT][oO][pP]").toAutomaton()); QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "field", - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false, stopStopList, false)); + new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false, stopStopList, false)); PhraseQuery phraseQuery = new PhraseQuery(); phraseQuery.add(new Term("field", "1")); @@ -1317,7 +1317,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { assertEquals(phraseQuery, qp.parse("\"1 stop 2\"")); qp = new QueryParser(TEST_VERSION_CURRENT, "field", - new MockAnalyzer(random, MockTokenizer.WHITESPACE, false, stopStopList, true)); + new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false, stopStopList, true)); qp.setEnablePositionIncrements(true); phraseQuery = new PhraseQuery(); @@ -1328,7 +1328,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase { public void testMatchAllQueryParsing() throws Exception { // test simple parsing of MatchAllDocsQuery - QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "key", new MockAnalyzer(random)); + QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "key", new MockAnalyzer(getRandom())); assertEquals(new MatchAllDocsQuery(), qp.parse(new MatchAllDocsQuery().toString())); // test parsing with non-default boost diff --git modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestParser.java modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestParser.java index 1d8f9eb..25da8e4 100644 --- modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestParser.java +++ modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestParser.java @@ -54,7 +54,7 @@ public class TestParser extends LuceneTestCase { @BeforeClass public static void beforeClass() throws Exception { // TODO: rewrite test (this needs to set QueryParser.enablePositionIncrements, too, for work with CURRENT): - Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET, false); + Analyzer analyzer = new MockAnalyzer(getStaticRandom(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET, false); //initialize the parser builder = new CorePlusExtensionsParser("contents", analyzer); diff --git modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java index cd9c12c..50859b1 100644 --- modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java +++ modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java @@ -45,7 +45,7 @@ import java.util.StringTokenizer; public class TestQueryTemplateManager extends LuceneTestCase { private CoreParser builder; - private final Analyzer analyzer = new MockAnalyzer(random); + private Analyzer analyzer; private IndexSearcher searcher; private IndexReader reader; private Directory dir; @@ -141,6 +141,7 @@ public class TestQueryTemplateManager extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); + analyzer = new MockAnalyzer(getRandom()); //Create an index dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); diff --git modules/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java modules/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java index 964d478..878c715 100644 --- modules/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java +++ modules/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java @@ -33,8 +33,8 @@ public class TestDirectSpellChecker extends LuceneTestCase { public void testInternalLevenshteinDistance() throws Exception { DirectSpellChecker spellchecker = new DirectSpellChecker(); Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - new MockAnalyzer(random, MockTokenizer.KEYWORD, true)); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, + new MockAnalyzer(getRandom(), MockTokenizer.KEYWORD, true)); String[] termsToAdd = { "metanoia", "metanoian", "metanoiai", "metanoias", "metanoi𐑍" }; for (int i = 0; i < termsToAdd.length; i++) { @@ -63,8 +63,8 @@ public class TestDirectSpellChecker extends LuceneTestCase { DirectSpellChecker spellChecker = new DirectSpellChecker(); spellChecker.setMinQueryLength(0); Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - new MockAnalyzer(random, MockTokenizer.SIMPLE, true)); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, + new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true)); for (int i = 0; i < 20; i++) { Document doc = new Document(); @@ -128,8 +128,8 @@ public class TestDirectSpellChecker extends LuceneTestCase { public void testOptions() throws Exception { Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - new MockAnalyzer(random, MockTokenizer.SIMPLE, true)); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, + new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true)); Document doc = new Document(); doc.add(newField("text", "foobar", TextField.TYPE_UNSTORED)); @@ -195,8 +195,8 @@ public class TestDirectSpellChecker extends LuceneTestCase { public void testBogusField() throws Exception { DirectSpellChecker spellChecker = new DirectSpellChecker(); Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - new MockAnalyzer(random, MockTokenizer.SIMPLE, true)); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, + new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true)); for (int i = 0; i < 20; i++) { Document doc = new Document(); @@ -219,8 +219,8 @@ public class TestDirectSpellChecker extends LuceneTestCase { public void testTransposition() throws Exception { DirectSpellChecker spellChecker = new DirectSpellChecker(); Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - new MockAnalyzer(random, MockTokenizer.SIMPLE, true)); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, + new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true)); for (int i = 0; i < 20; i++) { Document doc = new Document(); @@ -244,8 +244,8 @@ public class TestDirectSpellChecker extends LuceneTestCase { public void testTransposition2() throws Exception { DirectSpellChecker spellChecker = new DirectSpellChecker(); Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir, - new MockAnalyzer(random, MockTokenizer.SIMPLE, true)); + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, + new MockAnalyzer(getRandom(), MockTokenizer.SIMPLE, true)); for (int i = 0; i < 20; i++) { Document doc = new Document(); diff --git modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java index c18b2c5..fc3d617 100644 --- modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java +++ modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java @@ -46,7 +46,7 @@ public class TestLuceneDictionary extends LuceneTestCase { public void setUp() throws Exception { super.setUp(); store = newDirectory(); - IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); + IndexWriter writer = new IndexWriter(store, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom(), MockTokenizer.WHITESPACE, false))); Document doc; diff --git modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java index 0799bb0..a254511 100755 --- modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java +++ modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java @@ -54,7 +54,7 @@ public class TestSpellChecker extends LuceneTestCase { //create a user index userindex = newDirectory(); IndexWriter writer = new IndexWriter(userindex, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer(random))); + TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); for (int i = 0; i < 1000; i++) { Document doc = new Document(); @@ -412,7 +412,7 @@ public class TestSpellChecker extends LuceneTestCase { assertEquals(4, searchers.size()); int num_field2 = this.numdoc(); assertEquals(num_field2, num_field1 + 1); - int numThreads = 5 + this.random.nextInt(5); + int numThreads = 5 + this.getRandom().nextInt(5); ExecutorService executor = Executors.newFixedThreadPool(numThreads); SpellCheckWorker[] workers = new SpellCheckWorker[numThreads]; for (int i = 0; i < numThreads; i++) { @@ -421,7 +421,7 @@ public class TestSpellChecker extends LuceneTestCase { workers[i] = spellCheckWorker; } - int iterations = 5 + random.nextInt(5); + int iterations = 5 + getRandom().nextInt(5); for (int i = 0; i < iterations; i++) { Thread.sleep(100); // concurrently reset the spell index diff --git modules/suggest/src/test/org/apache/lucene/search/suggest/fst/BytesRefSortersTest.java modules/suggest/src/test/org/apache/lucene/search/suggest/fst/BytesRefSortersTest.java index cb62b2a..0371919 100644 --- modules/suggest/src/test/org/apache/lucene/search/suggest/fst/BytesRefSortersTest.java +++ modules/suggest/src/test/org/apache/lucene/search/suggest/fst/BytesRefSortersTest.java @@ -36,8 +36,8 @@ public class BytesRefSortersTest extends LuceneTestCase { private void check(BytesRefSorter sorter) throws Exception { for (int i = 0; i < 100; i++) { - byte [] current = new byte [random.nextInt(256)]; - random.nextBytes(current); + byte [] current = new byte [getRandom().nextInt(256)]; + getRandom().nextBytes(current); sorter.add(new BytesRef(current)); } diff --git modules/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java modules/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java index 8904e00..07ede85 100644 --- modules/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java +++ modules/suggest/src/test/org/apache/lucene/search/suggest/fst/FSTCompletionTest.java @@ -155,7 +155,7 @@ public class FSTCompletionTest extends LuceneTestCase { public void testLargeInputConstantWeights() throws Exception { FSTCompletionLookup lookup = new FSTCompletionLookup(10, true); - Random r = random; + Random r = getRandom(); List keys = new ArrayList(); for (int i = 0; i < 5000; i++) { keys.add(new TermFreq(_TestUtil.randomSimpleString(r), -1.0f)); @@ -201,7 +201,7 @@ public class FSTCompletionTest extends LuceneTestCase { @Nightly public void testRandom() throws Exception { List freqs = new ArrayList(); - Random rnd = random; + Random rnd = getRandom(); for (int i = 0; i < 2500 + rnd.nextInt(2500); i++) { float weight = rnd.nextFloat() * 100; freqs.add(new TermFreq("" + rnd.nextLong(), weight)); diff --git modules/suggest/src/test/org/apache/lucene/search/suggest/fst/TestSort.java modules/suggest/src/test/org/apache/lucene/search/suggest/fst/TestSort.java index f4f9853..e123311 100644 --- modules/suggest/src/test/org/apache/lucene/search/suggest/fst/TestSort.java +++ modules/suggest/src/test/org/apache/lucene/search/suggest/fst/TestSort.java @@ -84,8 +84,8 @@ public class TestSort extends LuceneTestCase { private byte[][] generateRandom(int howMuchData) { ArrayList data = new ArrayList(); while (howMuchData > 0) { - byte [] current = new byte [random.nextInt(256)]; - random.nextBytes(current); + byte [] current = new byte [getRandom().nextInt(256)]; + getRandom().nextBytes(current); data.add(current); howMuchData -= current.length; } diff --git modules/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java modules/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java index d0537ee..61a85ec 100644 --- modules/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java +++ modules/suggest/src/test/org/apache/lucene/search/suggest/fst/WFSTCompletionTest.java @@ -88,7 +88,7 @@ public class WFSTCompletionTest extends LuceneTestCase { while (true) { // TODO: would be nice to fix this slowCompletor/comparator to // use full range, but we might lose some coverage too... - s = _TestUtil.randomSimpleString(random); + s = _TestUtil.randomSimpleString(getRandom()); if (!slowCompletor.containsKey(s)) { break; } @@ -98,7 +98,7 @@ public class WFSTCompletionTest extends LuceneTestCase { allPrefixes.add(s.substring(0, j)); } // we can probably do Integer.MAX_VALUE here, but why worry. - int weight = random.nextInt(1<<24); + int weight = getRandom().nextInt(1<<24); slowCompletor.put(s, (long)weight); keys[i] = new TermFreq(s, (float) weight); } @@ -108,7 +108,7 @@ public class WFSTCompletionTest extends LuceneTestCase { for (String prefix : allPrefixes) { - final int topN = _TestUtil.nextInt(random, 1, 10); + final int topN = _TestUtil.nextInt(getRandom(), 1, 10); List r = suggester.lookup(prefix, false, topN); // 2. go thru whole treemap (slowCompletor) and check its actually the best suggestion diff --git solr/contrib/contrib-build.xml solr/contrib/contrib-build.xml index 86c2d18..fc5888b 100644 --- solr/contrib/contrib-build.xml +++ solr/contrib/contrib-build.xml @@ -25,9 +25,6 @@ - - - diff --git solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/conf/dataimport.properties solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/conf/dataimport.properties new file mode 100644 index 0000000..3f630b8 --- /dev/null +++ solr/contrib/dataimporthandler-extras/src/test-files/dihextras/solr/conf/dataimport.properties @@ -0,0 +1,8 @@ +#Fri Feb 17 12:30:14 EST 2012 +18623746288561.last_index_time=2012-02-08 14\:04\:05 +47668268113791.last_index_time=2012-02-17 12\:04\:18 +54624099102413.last_index_time=2012-02-17 12\:30\:14 +18624825699733.last_index_time=2012-02-08 14\:04\:06 +last_index_time=2012-02-17 12\:30\:14 +54623000297494.last_index_time=2012-02-17 12\:30\:13 +47669350425278.last_index_time=2012-02-17 12\:04\:19 diff --git solr/contrib/dataimporthandler/src/test-files/dih/solr/conf/dataimport.properties solr/contrib/dataimporthandler/src/test-files/dih/solr/conf/dataimport.properties new file mode 100644 index 0000000..ea9e766 --- /dev/null +++ solr/contrib/dataimporthandler/src/test-files/dih/solr/conf/dataimport.properties @@ -0,0 +1,5 @@ +#Sat Feb 18 05:30:30 GILT 2012 +x.last_index_time=2012-02-18 05\:30\:30 +PARENT.last_index_time=2012-02-18 05\:30\:22 +last_index_time=2012-02-18 05\:30\:30 +job.last_index_time=2012-02-18 05\:30\:30 diff --git solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestEphemeralCache.java solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestEphemeralCache.java index 8ecd82b..b2288d5 100644 --- solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestEphemeralCache.java +++ solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestEphemeralCache.java @@ -54,7 +54,7 @@ public class TestEphemeralCache extends AbstractDataImportHandlerTestCase { @Test public void testMultiThreaded() throws Exception { // Try between 2 and 6 threads - int numThreads = random.nextInt(4) + 2; + int numThreads = getRandom().nextInt(4) + 2; System.out.println("TRYING " + numThreads); assertFullImport(getDataConfigDotXml(numThreads)); } diff --git solr/core/src/test/org/apache/solr/TestDistributedSearch.java solr/core/src/test/org/apache/solr/TestDistributedSearch.java index 479b8aa..d90c11e 100755 --- solr/core/src/test/org/apache/solr/TestDistributedSearch.java +++ solr/core/src/test/org/apache/solr/TestDistributedSearch.java @@ -192,20 +192,20 @@ public class TestDistributedSearch extends BaseDistributedSearchTestCase { stress=0; // turn off stress... we want to tex max combos in min time for (int i=0; i<25*RANDOM_MULTIPLIER; i++) { - String f = fieldNames[random.nextInt(fieldNames.length)]; - if (random.nextBoolean()) f = t1; // the text field is a really interesting one to facet on (and it's multi-valued too) + String f = fieldNames[getRandom().nextInt(fieldNames.length)]; + if (getRandom().nextBoolean()) f = t1; // the text field is a really interesting one to facet on (and it's multi-valued too) // we want a random query and not just *:* so we'll get zero counts in facets also // TODO: do a better random query - String q = random.nextBoolean() ? "*:*" : "id:(1 3 5 7 9 11 13) OR id:[100 TO " + random.nextInt(50) + "]"; + String q = getRandom().nextBoolean() ? "*:*" : "id:(1 3 5 7 9 11 13) OR id:[100 TO " + getRandom().nextInt(50) + "]"; - int nolimit = random.nextBoolean() ? -1 : 10000; // these should be equivalent + int nolimit = getRandom().nextBoolean() ? -1 : 10000; // these should be equivalent // if limit==-1, we should always get exact matches - query("q",q, "rows",0, "facet","true", "facet.field",f, "facet.limit",nolimit, "facet.sort","count", "facet.mincount",random.nextInt(5), "facet.offset",random.nextInt(10)); - query("q",q, "rows",0, "facet","true", "facet.field",f, "facet.limit",nolimit, "facet.sort","index", "facet.mincount",random.nextInt(5), "facet.offset",random.nextInt(10)); + query("q",q, "rows",0, "facet","true", "facet.field",f, "facet.limit",nolimit, "facet.sort","count", "facet.mincount",getRandom().nextInt(5), "facet.offset",getRandom().nextInt(10)); + query("q",q, "rows",0, "facet","true", "facet.field",f, "facet.limit",nolimit, "facet.sort","index", "facet.mincount",getRandom().nextInt(5), "facet.offset",getRandom().nextInt(10)); // for index sort, we should get exact results for mincount <= 1 - query("q",q, "rows",0, "facet","true", "facet.field",f, "facet.sort","index", "facet.mincount",random.nextInt(2), "facet.offset",random.nextInt(10), "facet.limit",random.nextInt(11)-1); + query("q",q, "rows",0, "facet","true", "facet.field",f, "facet.sort","index", "facet.mincount",getRandom().nextInt(2), "facet.offset",getRandom().nextInt(10), "facet.limit",getRandom().nextInt(11)-1); } stress = backupStress; // restore stress diff --git solr/core/src/test/org/apache/solr/TestGroupingSearch.java solr/core/src/test/org/apache/solr/TestGroupingSearch.java index a787e6f..f7dfe1d 100644 --- solr/core/src/test/org/apache/solr/TestGroupingSearch.java +++ solr/core/src/test/org/apache/solr/TestGroupingSearch.java @@ -542,7 +542,7 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { while (--indexIter >= 0) { - int indexSize = random.nextInt(25 * RANDOM_MULTIPLIER); + int indexSize = getRandom().nextInt(25 * RANDOM_MULTIPLIER); //indexSize=2; List types = new ArrayList(); types.add(new FldType("id",ONE_ONE, new SVal('A','Z',4,4))); @@ -598,17 +598,17 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { for (int qiter=0; qiter sortComparator = createSort(h.getCore().getSchema(), types, stringSortA); String sortStr = stringSortA[0]; - Comparator groupComparator = random.nextBoolean() ? sortComparator : createSort(h.getCore().getSchema(), types, stringSortA); + Comparator groupComparator = getRandom().nextBoolean() ? sortComparator : createSort(h.getCore().getSchema(), types, stringSortA); String groupSortStr = stringSortA[0]; // since groupSortStr defaults to sortStr, we need to normalize null to "score desc" if @@ -644,10 +644,10 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { List sortedGroups = new ArrayList(groups.values()); Collections.sort(sortedGroups, groupComparator==sortComparator ? createFirstDocComparator(sortComparator) : createMaxDocComparator(sortComparator)); - boolean includeNGroups = random.nextBoolean(); + boolean includeNGroups = getRandom().nextBoolean(); Object modelResponse = buildGroupedResult(h.getCore().getSchema(), sortedGroups, start, rows, group_offset, group_limit, includeNGroups); - boolean truncateGroups = random.nextBoolean(); + boolean truncateGroups = getRandom().nextBoolean(); Map facetCounts = new TreeMap(); if (truncateGroups) { for (Grp grp : sortedGroups) { @@ -681,7 +681,7 @@ public class TestGroupingSearch extends SolrTestCaseJ4 { expectedFacetResponse.add(stringIntegerEntry.getValue()); } - int randomPercentage = random.nextInt(101); + int randomPercentage = getRandom().nextInt(101); // TODO: create a random filter too SolrQueryRequest req = req("group","true","wt","json","indent","true", "echoParams","all", "q","{!func}score_f", "group.field",groupField ,sortStr==null ? "nosort":"sort", sortStr ==null ? "": sortStr diff --git solr/core/src/test/org/apache/solr/TestJoin.java solr/core/src/test/org/apache/solr/TestJoin.java index ebb6560..0f2b3aa 100644 --- solr/core/src/test/org/apache/solr/TestJoin.java +++ solr/core/src/test/org/apache/solr/TestJoin.java @@ -151,7 +151,7 @@ public class TestJoin extends SolrTestCaseJ4 { while (--indexIter >= 0) { - int indexSize = random.nextInt(20 * RANDOM_MULTIPLIER); + int indexSize = getRandom().nextInt(20 * RANDOM_MULTIPLIER); List types = new ArrayList(); types.add(new FldType("id",ONE_ONE, new SVal('A','Z',4,4))); @@ -172,16 +172,16 @@ public class TestJoin extends SolrTestCaseJ4 { for (int qiter=0; qiter> pivot = pivots.get(fromField+"/"+toField); @@ -210,7 +210,7 @@ public class TestJoin extends SolrTestCaseJ4 { SolrQueryRequest req = req("wt","json","indent","true", "echoParams","all", "q","{!join from="+fromField+" to="+toField - + (random.nextInt(4)==0 ? " fromIndex=collection1" : "") + + (getRandom().nextInt(4)==0 ? " fromIndex=collection1" : "") +"}*:*" ); diff --git solr/core/src/test/org/apache/solr/analysis/LegacyHTMLStripCharFilterTest.java solr/core/src/test/org/apache/solr/analysis/LegacyHTMLStripCharFilterTest.java index 9cca9d9..af47116 100644 --- solr/core/src/test/org/apache/solr/analysis/LegacyHTMLStripCharFilterTest.java +++ solr/core/src/test/org/apache/solr/analysis/LegacyHTMLStripCharFilterTest.java @@ -273,12 +273,12 @@ public class LegacyHTMLStripCharFilterTest extends BaseTokenStreamTestCase { }; int numRounds = RANDOM_MULTIPLIER * 10000; - checkRandomData(random, analyzer, numRounds); + checkRandomData(getRandom(), analyzer, numRounds); } public void testRandomBrokenHTML() throws Exception { int maxNumElements = 10000; - String text = _TestUtil.randomHtmlishString(random, maxNumElements); + String text = _TestUtil.randomHtmlishString(getRandom(), maxNumElements); Reader reader = new LegacyHTMLStripCharFilter(CharReader.get(new StringReader(text))); while (reader.read() != -1); @@ -290,11 +290,11 @@ public class LegacyHTMLStripCharFilterTest extends BaseTokenStreamTestCase { int maxNumWords = 10000; int minWordLength = 3; int maxWordLength = 20; - int numWords = _TestUtil.nextInt(random, minNumWords, maxNumWords); - switch (_TestUtil.nextInt(random, 0, 4)) { + int numWords = _TestUtil.nextInt(getRandom(), minNumWords, maxNumWords); + switch (_TestUtil.nextInt(getRandom(), 0, 4)) { case 0: { for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { - text.append(_TestUtil.randomUnicodeString(random, maxWordLength)); + text.append(_TestUtil.randomUnicodeString(getRandom(), maxWordLength)); text.append(' '); } break; @@ -302,14 +302,14 @@ public class LegacyHTMLStripCharFilterTest extends BaseTokenStreamTestCase { case 1: { for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { text.append(_TestUtil.randomRealisticUnicodeString - (random, minWordLength, maxWordLength)); + (getRandom(), minWordLength, maxWordLength)); text.append(' '); } break; } default: { // ASCII 50% of the time for (int wordNum = 0 ; wordNum < numWords ; ++wordNum) { - text.append(_TestUtil.randomSimpleString(random)); + text.append(_TestUtil.randomSimpleString(getRandom())); text.append(' '); } } diff --git solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java index efbde20..2bfa99f 100644 --- solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java +++ solr/core/src/test/org/apache/solr/cloud/ChaosMonkeyNothingIsSafeTest.java @@ -42,15 +42,12 @@ import org.junit.Ignore; @Ignore("ignore while investigating jenkins fails") public class ChaosMonkeyNothingIsSafeTest extends FullSolrCloudTest { - @BeforeClass public static void beforeSuperClass() throws Exception { - } @AfterClass public static void afterSuperClass() throws Exception { - } @Before @@ -236,7 +233,7 @@ public class ChaosMonkeyNothingIsSafeTest extends FullSolrCloudTest { while (true && !stop) { ++i; - if (doDeletes && random.nextBoolean() && deletes.size() > 0) { + if (doDeletes && getRandom().nextBoolean() && deletes.size() > 0) { Integer delete = deletes.remove(0); try { numDeletes++; @@ -270,7 +267,7 @@ public class ChaosMonkeyNothingIsSafeTest extends FullSolrCloudTest { fails.incrementAndGet(); } - if (doDeletes && random.nextBoolean()) { + if (doDeletes && getRandom().nextBoolean()) { deletes.add(i); } diff --git solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java index 1c77138..1bf0cfe 100644 --- solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java +++ solr/core/src/test/org/apache/solr/cloud/FullSolrCloudDistribCmdsTest.java @@ -50,7 +50,6 @@ public class FullSolrCloudDistribCmdsTest extends FullSolrCloudTest { @BeforeClass public static void beforeSuperClass() throws Exception { - } public FullSolrCloudDistribCmdsTest() { diff --git solr/core/src/test/org/apache/solr/cloud/FullSolrCloudTest.java solr/core/src/test/org/apache/solr/cloud/FullSolrCloudTest.java index f51b6df..ede2e24 100644 --- solr/core/src/test/org/apache/solr/cloud/FullSolrCloudTest.java +++ solr/core/src/test/org/apache/solr/cloud/FullSolrCloudTest.java @@ -181,7 +181,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase { chaosMonkey = new ChaosMonkey(zkServer, zkStateReader, DEFAULT_COLLECTION, shardToJetty, shardToClient, shardToLeaderClient, - shardToLeaderJetty, random); + shardToLeaderJetty, getRandom()); } // wait until shards have started registering... @@ -1206,7 +1206,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase { while (true && !stop) { ++i; - if (doDeletes && random.nextBoolean() && deletes.size() > 0) { + if (doDeletes && getRandom().nextBoolean() && deletes.size() > 0) { Integer delete = deletes.remove(0); try { numDeletes++; @@ -1229,7 +1229,7 @@ public class FullSolrCloudTest extends AbstractDistributedZkTestCase { fails.incrementAndGet(); } - if (doDeletes && random.nextBoolean()) { + if (doDeletes && getRandom().nextBoolean()) { deletes.add(i); } diff --git solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java index 616b8bc..12f4cd8 100644 --- solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java +++ solr/core/src/test/org/apache/solr/cloud/LeaderElectionTest.java @@ -295,7 +295,7 @@ public class LeaderElectionTest extends SolrTestCaseJ4 { public void run() { int count = atLeast(5); for (int i = 1; i < count; i++) { - int launchIn = random.nextInt(500); + int launchIn = getRandom().nextInt(500); ClientThread thread = null; try { thread = new ClientThread(i); @@ -319,7 +319,7 @@ public class LeaderElectionTest extends SolrTestCaseJ4 { int j; try { // always 1 we won't kill... - j = random.nextInt(threads.size() - 2); + j = getRandom().nextInt(threads.size() - 2); } catch(IllegalArgumentException e) { continue; } @@ -349,7 +349,7 @@ public class LeaderElectionTest extends SolrTestCaseJ4 { try { Thread.sleep(50); int j; - j = random.nextInt(threads.size()); + j = getRandom().nextInt(threads.size()); try { threads.get(j).zkClient.getSolrZooKeeper().pauseCnxn( ZkTestServer.TICK_TIME * 2); diff --git solr/core/src/test/org/apache/solr/cloud/OverseerTest.java solr/core/src/test/org/apache/solr/cloud/OverseerTest.java index 8fb4cdc..d993ae1 100644 --- solr/core/src/test/org/apache/solr/cloud/OverseerTest.java +++ solr/core/src/test/org/apache/solr/cloud/OverseerTest.java @@ -192,9 +192,9 @@ public class OverseerTest extends SolrTestCaseJ4 { String zkDir = dataDir.getAbsolutePath() + File.separator + "zookeeper/server1/data"; - final int nodeCount = random.nextInt(50)+50; //how many simulated nodes (num of threads) - final int coreCount = random.nextInt(100)+100; //how many cores to register - final int sliceCount = random.nextInt(20)+1; //how many slices + final int nodeCount = getRandom().nextInt(50)+50; //how many simulated nodes (num of threads) + final int coreCount = getRandom().nextInt(100)+100; //how many cores to register + final int sliceCount = getRandom().nextInt(20)+1; //how many slices ZkTestServer server = new ZkTestServer(zkDir); diff --git solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java index fa53f91..856270d 100644 --- solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java +++ solr/core/src/test/org/apache/solr/handler/component/DistributedSpellCheckComponentTest.java @@ -44,12 +44,13 @@ public class DistributedSpellCheckComponentTest extends BaseDistributedSearchTes } private String saveProp; + @Override public void setUp() throws Exception { // this test requires FSDir saveProp = System.getProperty("solr.directoryFactory"); System.setProperty("solr.directoryFactory", "solr.StandardDirectoryFactory"); - requestHandlerName = random.nextBoolean() ? "spellCheckCompRH" : "spellCheckCompRH_Direct"; + requestHandlerName = getRandom().nextBoolean() ? "spellCheckCompRH" : "spellCheckCompRH_Direct"; super.setUp(); } diff --git solr/core/src/test/org/apache/solr/handler/component/TermVectorComponentTest.java solr/core/src/test/org/apache/solr/handler/component/TermVectorComponentTest.java index efbc17c..7500080 100644 --- solr/core/src/test/org/apache/solr/handler/component/TermVectorComponentTest.java +++ solr/core/src/test/org/apache/solr/handler/component/TermVectorComponentTest.java @@ -153,7 +153,7 @@ public class TermVectorComponentTest extends SolrTestCaseJ4 { StringBuilder expected = new StringBuilder("/termVectors/doc-0/test_posofftv/anoth=={"); boolean first = true; for (int i = 0; i < options.length; i++) { - final boolean use = random.nextBoolean(); + final boolean use = getRandom().nextBoolean(); if (use) { if (!first) { expected.append(", "); diff --git solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java index 47f31f6..3e11ac0 100644 --- solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java +++ solr/core/src/test/org/apache/solr/request/SimpleFacetsTest.java @@ -36,7 +36,7 @@ public class SimpleFacetsTest extends SolrTestCaseJ4 { static int random_dupe_percent = 25; // some duplicates in the index to create deleted docs static void randomCommit(int percent_chance) { - if (random.nextInt(100) <= percent_chance) + if (getStaticRandom().nextInt(100) <= percent_chance) assertU(commit()); } @@ -46,7 +46,7 @@ public class SimpleFacetsTest extends SolrTestCaseJ4 { static void add_doc(String... fieldsAndValues) { do { pendingDocs.add(fieldsAndValues); - } while (random.nextInt(100) <= random_dupe_percent); + } while (getStaticRandom().nextInt(100) <= random_dupe_percent); // assertU(adoc(fieldsAndValues)); // randomCommit(random_commit_percent); @@ -60,7 +60,7 @@ public class SimpleFacetsTest extends SolrTestCaseJ4 { indexFacetPrefixMultiValued(); indexFacetPrefixSingleValued(); - Collections.shuffle(pendingDocs, random); + Collections.shuffle(pendingDocs, getStaticRandom()); for (String[] doc : pendingDocs) { assertU(adoc(doc)); randomCommit(random_commit_percent); diff --git solr/core/src/test/org/apache/solr/search/SpatialFilterTest.java solr/core/src/test/org/apache/solr/search/SpatialFilterTest.java index 20b65e2..60fe946 100644 --- solr/core/src/test/org/apache/solr/search/SpatialFilterTest.java +++ solr/core/src/test/org/apache/solr/search/SpatialFilterTest.java @@ -152,13 +152,13 @@ public class SpatialFilterTest extends SolrTestCaseJ4 { int postFilterCount = DelegatingCollector.setLastDelegateCount; // throw in a random into the main query to prevent most cache hits - assertQ(req("fl", "id", "q","*:* OR foo_i:" + random.nextInt(100), "rows", "1000", "fq", "{!"+method+" sfield=" +fieldName +"}", + assertQ(req("fl", "id", "q","*:* OR foo_i:" + getRandom().nextInt(100), "rows", "1000", "fq", "{!"+method+" sfield=" +fieldName +"}", "pt", pt, "d", String.valueOf(distance)), tests); assertEquals(postFilterCount, DelegatingCollector.setLastDelegateCount); // post filtering shouldn't be used // try uncached - assertQ(req("fl", "id", "q","*:* OR foo_i:" + random.nextInt(100), "rows", "1000", "fq", "{!"+method+" sfield=" +fieldName + " cache=false" + "}", + assertQ(req("fl", "id", "q","*:* OR foo_i:" + getRandom().nextInt(100), "rows", "1000", "fq", "{!"+method+" sfield=" +fieldName + " cache=false" + "}", "pt", pt, "d", String.valueOf(distance)), tests); assertEquals(postFilterCount, DelegatingCollector.setLastDelegateCount); // post filtering shouldn't be used @@ -166,7 +166,7 @@ public class SpatialFilterTest extends SolrTestCaseJ4 { // try post filtered for fields that support it if (fieldName.endsWith("ll")) { - assertQ(req("fl", "id", "q","*:* OR foo_i:" + random.nextInt(100)+100, "rows", "1000", "fq", "{!"+method+" sfield=" +fieldName + " cache=false cost=150" + "}", + assertQ(req("fl", "id", "q","*:* OR foo_i:" + getRandom().nextInt(100)+100, "rows", "1000", "fq", "{!"+method+" sfield=" +fieldName + " cache=false cost=150" + "}", "pt", pt, "d", String.valueOf(distance)), tests); assertEquals(postFilterCount + 1, DelegatingCollector.setLastDelegateCount); // post filtering shouldn't be used diff --git solr/core/src/test/org/apache/solr/search/TestDocSet.java solr/core/src/test/org/apache/solr/search/TestDocSet.java index 6449c09..26c2d11 100644 --- solr/core/src/test/org/apache/solr/search/TestDocSet.java +++ solr/core/src/test/org/apache/solr/search/TestDocSet.java @@ -38,9 +38,15 @@ import org.apache.lucene.util.OpenBitSetIterator; * */ public class TestDocSet extends LuceneTestCase { - Random rand = random; + Random rand; float loadfactor; + @Override + public void setUp() throws Exception { + super.setUp(); + rand = getRandom(); + } + public OpenBitSet getRandomSet(int sz, int bitsToSet) { OpenBitSet bs = new OpenBitSet(sz); if (sz==0) return bs; diff --git solr/core/src/test/org/apache/solr/search/TestFastLRUCache.java solr/core/src/test/org/apache/solr/search/TestFastLRUCache.java index 3dec389..f146785 100644 --- solr/core/src/test/org/apache/solr/search/TestFastLRUCache.java +++ solr/core/src/test/org/apache/solr/search/TestFastLRUCache.java @@ -256,13 +256,13 @@ public class TestFastLRUCache extends LuceneTestCase { // enough randomness to exercise all of the different cache purging phases public void testRandom() { - int sz = random.nextInt(100)+5; - int lowWaterMark = random.nextInt(sz-3)+1; - int keyrange = random.nextInt(sz*3)+1; + int sz = getRandom().nextInt(100)+5; + int lowWaterMark = getRandom().nextInt(sz-3)+1; + int keyrange = getRandom().nextInt(sz*3)+1; ConcurrentLRUCache cache = new ConcurrentLRUCache(sz, lowWaterMark); for (int i=0; i<10000; i++) { - cache.put(random.nextInt(keyrange), ""); - cache.get(random.nextInt(keyrange)); + cache.put(getRandom().nextInt(keyrange), ""); + cache.get(getRandom().nextInt(keyrange)); } } @@ -272,7 +272,7 @@ public class TestFastLRUCache extends LuceneTestCase { int lowerWaterMark = cacheSize; int upperWaterMark = (int)(lowerWaterMark * 1.1); - Random r = random; + Random r = getRandom(); ConcurrentLRUCache cache = new ConcurrentLRUCache(upperWaterMark, lowerWaterMark, (upperWaterMark+lowerWaterMark)/2, upperWaterMark, false, false, null); boolean getSize=false; int minSize=0,maxSize=0; @@ -323,7 +323,7 @@ public class TestFastLRUCache extends LuceneTestCase { void fillCache(SolrCache sc, int cacheSize, int maxKey) { for (int i=0; i n) { n = uu-ll+1; u = uu; @@ -187,8 +187,8 @@ public class TestFiltering extends SolrTestCaseJ4 { } } else { // negative frange.. make it relatively small - l = random.nextInt(model.indexSize); - u = Math.max(model.indexSize-1, l+random.nextInt(Math.max(model.indexSize / 10, 2))); + l = getRandom().nextInt(model.indexSize); + u = Math.max(model.indexSize-1, l+getRandom().nextInt(Math.max(model.indexSize / 10, 2))); for (OpenBitSet set : sets) { set.clear(l,u+1); @@ -200,7 +200,7 @@ public class TestFiltering extends SolrTestCaseJ4 { // term or boolean query OpenBitSet pset = new OpenBitSet(model.indexSize); for (int i=0; i params = new ArrayList(); params.add("q"); params.add(makeRandomQuery(model, true, false)); - int nFilters = random.nextInt(5); + int nFilters = getRandom().nextInt(5); for (int i=0; i fl = Arrays.asList ("id","[docid]","[explain]","score","val_*","subj*"); - final int iters = atLeast(random, 10); + final int iters = atLeast(getRandom(), 10); for (int i = 0; i< iters; i++) { - Collections.shuffle(fl, random); + Collections.shuffle(fl, getRandom()); final String singleFl = StringUtils.join(fl.toArray(),','); assertQ("fl=" + singleFl, diff --git solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java index 81f89dd..ae8d079 100644 --- solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java +++ solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java @@ -299,19 +299,19 @@ public class TestRealTimeGet extends SolrTestCaseJ4 { // req().getCore().getUpdateHandler().getIndexWriterProvider().getIndexWriter(req().getCore()).setInfoStream(System.out); - final int commitPercent = 5 + random.nextInt(20); - final int softCommitPercent = 30+random.nextInt(75); // what percent of the commits are soft - final int deletePercent = 4+random.nextInt(25); - final int deleteByQueryPercent = 1+random.nextInt(5); - final int ndocs = 5 + (random.nextBoolean() ? random.nextInt(25) : random.nextInt(200)); - int nWriteThreads = 5 + random.nextInt(25); + final int commitPercent = 5 + getRandom().nextInt(20); + final int softCommitPercent = 30+getRandom().nextInt(75); // what percent of the commits are soft + final int deletePercent = 4+getRandom().nextInt(25); + final int deleteByQueryPercent = 1+getRandom().nextInt(5); + final int ndocs = 5 + (getRandom().nextBoolean() ? getRandom().nextInt(25) : getRandom().nextInt(200)); + int nWriteThreads = 5 + getRandom().nextInt(25); final int maxConcurrentCommits = nWriteThreads; // number of committers at a time... it should be <= maxWarmingSearchers // query variables final int percentRealtimeQuery = 60; final AtomicLong operations = new AtomicLong(50000); // number of query operations to perform in total - int nReadThreads = 5 + random.nextInt(25); + int nReadThreads = 5 + getRandom().nextInt(25); verbose("commitPercent=", commitPercent); @@ -334,7 +334,7 @@ public class TestRealTimeGet extends SolrTestCaseJ4 { for (int i=0; i recoveryInfoF = uLog.applyBufferedUpdates(); if (recoveryInfoF != null) { @@ -1306,7 +1306,7 @@ public class TestRealTimeGet extends SolrTestCaseJ4 { while (recInfo == null) { try { // wait a short period of time for recovery to complete (and to give a chance for more writers to concurrently add docs) - recInfo = recoveryInfoF.get(random.nextInt(100/nWriteThreads), TimeUnit.MILLISECONDS); + recInfo = recoveryInfoF.get(getRandom().nextInt(100/nWriteThreads), TimeUnit.MILLISECONDS); } catch (TimeoutException e) { // idle one more write thread verbose("Operation",operations.get(),"Draining permits for write thread",writeThreadNumber); @@ -1314,7 +1314,7 @@ public class TestRealTimeGet extends SolrTestCaseJ4 { if (writeThreadNumber >= nWriteThreads) { // if we hit the end, back up and give a few write permits writeThreadNumber--; - writePermissions[writeThreadNumber].release(random.nextInt(2) + 1); + writePermissions[writeThreadNumber].release(getRandom().nextInt(2) + 1); } // throttle readers so they don't steal too much CPU from the recovery thread @@ -1369,19 +1369,19 @@ public class TestRealTimeGet extends SolrTestCaseJ4 { DirectoryReader reader; @Test public void testStressLuceneNRT() throws Exception { - final int commitPercent = 5 + random.nextInt(20); - final int softCommitPercent = 30+random.nextInt(75); // what percent of the commits are soft - final int deletePercent = 4+random.nextInt(25); - final int deleteByQueryPercent = 1+random.nextInt(5); - final int ndocs = 5 + (random.nextBoolean() ? random.nextInt(25) : random.nextInt(200)); - int nWriteThreads = 5 + random.nextInt(25); + final int commitPercent = 5 + getRandom().nextInt(20); + final int softCommitPercent = 30+getRandom().nextInt(75); // what percent of the commits are soft + final int deletePercent = 4+getRandom().nextInt(25); + final int deleteByQueryPercent = 1+getRandom().nextInt(5); + final int ndocs = 5 + (getRandom().nextBoolean() ? getRandom().nextInt(25) : getRandom().nextInt(200)); + int nWriteThreads = 5 + getRandom().nextInt(25); final int maxConcurrentCommits = nWriteThreads; // number of committers at a time... it should be <= maxWarmingSearchers final AtomicLong operations = new AtomicLong(1000); // number of query operations to perform in total - crank up if - int nReadThreads = 5 + random.nextInt(25); - final boolean tombstones = random.nextBoolean(); - final boolean syncCommits = random.nextBoolean(); + int nReadThreads = 5 + getRandom().nextInt(25); + final boolean tombstones = getRandom().nextBoolean(); + final boolean syncCommits = getRandom().nextBoolean(); verbose("commitPercent=", commitPercent); verbose("softCommitPercent=",softCommitPercent); @@ -1425,7 +1425,7 @@ public class TestRealTimeGet extends SolrTestCaseJ4 { Directory dir = newDirectory(); - final RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + final RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(getRandom()))); writer.setDoRandomForceMergeAssert(false); // writer.commit(); @@ -1436,7 +1436,7 @@ public class TestRealTimeGet extends SolrTestCaseJ4 { for (int i=0; i= max) return min; - return min + random.nextFloat() * (max - min); + return min + getStaticRandom().nextFloat() * (max - min); } @Override @@ -1081,19 +1081,19 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase { model.put(doc.id, doc); // commit 10% of the time - if (random.nextInt(commitOneOutOf)==0) { + if (getRandom().nextInt(commitOneOutOf)==0) { assertU(commit()); } // duplicate 10% of the docs - if (random.nextInt(10)==0) { + if (getRandom().nextInt(10)==0) { updateJ(toJSON(doc), null); model.put(doc.id, doc); } } // optimize 10% of the time - if (random.nextInt(10)==0) { + if (getRandom().nextInt(10)==0) { assertU(optimize()); } else { assertU(commit()); @@ -1137,13 +1137,13 @@ public abstract class SolrTestCaseJ4 extends LuceneTestCase { public static Comparator createSort(IndexSchema schema, List fieldTypes, String[] out) { StringBuilder sortSpec = new StringBuilder(); - int nSorts = random.nextInt(4); + int nSorts = getStaticRandom().nextInt(4); List> comparators = new ArrayList>(); for (int i=0; i0) sortSpec.append(','); - int which = random.nextInt(fieldTypes.size()+2); - boolean asc = random.nextBoolean(); + int which = getStaticRandom().nextInt(fieldTypes.size()+2); + boolean asc = getStaticRandom().nextBoolean(); if (which == fieldTypes.size()) { // sort by score sortSpec.append("score").append(asc ? " asc" : " desc");