Index: solr/core/src/test/org/apache/solr/analysis/TestGermanStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestGermanStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestGermanStemFilterFactory.java (working copy) @@ -20,6 +20,7 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -27,7 +28,7 @@ /** * Simple tests to ensure the German stem filter factory is working. */ -public class TestGermanStemFilterFactory extends BaseTokenTestCase { +public class TestGermanStemFilterFactory extends BaseTokenStreamTestCase { /** * Ensure the filter actually stems text. */ Index: solr/core/src/test/org/apache/solr/analysis/TestPorterStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestPorterStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestPorterStemFilterFactory.java (working copy) @@ -20,6 +20,7 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -27,7 +28,7 @@ /** * Simple tests to ensure the Porter stem filter factory is working. */ -public class TestPorterStemFilterFactory extends BaseTokenTestCase { +public class TestPorterStemFilterFactory extends BaseTokenStreamTestCase { /** * Ensure the filter actually stems text. */ Index: solr/core/src/test/org/apache/solr/analysis/TestPatternReplaceCharFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestPatternReplaceCharFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestPatternReplaceCharFilterFactory.java (working copy) @@ -22,15 +22,12 @@ import java.util.HashMap; import java.util.Map; -import org.apache.lucene.analysis.CharReader; -import org.apache.lucene.analysis.CharStream; -import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.*; /** * Simple tests to ensure this factory is working */ -public class TestPatternReplaceCharFilterFactory extends BaseTokenTestCase { +public class TestPatternReplaceCharFilterFactory extends BaseTokenStreamTestCase { // 1111 // 01234567890123 Index: solr/core/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java (working copy) @@ -17,6 +17,7 @@ package org.apache.solr.analysis; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.util.ResourceLoader; @@ -33,7 +34,7 @@ /** * @since solr 1.4 */ -public class TestMultiWordSynonyms extends BaseTokenTestCase { +public class TestMultiWordSynonyms extends BaseTokenStreamTestCase { /** * @deprecated Remove this test in 5.0 @@ -54,7 +55,7 @@ SynonymFilterFactory factory = new SynonymFilterFactory(); Map args = new HashMap(); args.put("synonyms", "synonyms.txt"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(new StringMockSolrResourceLoader("a b c,d")); TokenStream ts = factory.create(new MockTokenizer(new StringReader("a e"), MockTokenizer.WHITESPACE, false)); Index: solr/core/src/test/org/apache/solr/analysis/TestCapitalizationFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestCapitalizationFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestCapitalizationFilterFactory.java (working copy) @@ -21,6 +21,7 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -29,7 +30,7 @@ /** * */ -public class TestCapitalizationFilterFactory extends BaseTokenTestCase { +public class TestCapitalizationFilterFactory extends BaseTokenStreamTestCase { public void testCapitalization() throws Exception { @@ -38,7 +39,7 @@ args.put( CapitalizationFilterFactory.ONLY_FIRST_WORD, "true" ); CapitalizationFilterFactory factory = new CapitalizationFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init( args ); assertTokenStreamContents(factory.create( new MockTokenizer(new StringReader("kiTTEN"), MockTokenizer.WHITESPACE, false)), @@ -95,7 +96,7 @@ // Now try some prefixes factory = new CapitalizationFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); args.put( "okPrefix", "McK" ); // all words factory.init( args ); assertTokenStreamContents(factory.create( @@ -122,7 +123,7 @@ args.put( CapitalizationFilterFactory.ONLY_FIRST_WORD, "true" ); CapitalizationFilterFactory factory = new CapitalizationFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init( args ); factory.forceFirstLetter = true; assertTokenStreamContents(factory.create( @@ -150,7 +151,7 @@ args.put(CapitalizationFilterFactory.ONLY_FIRST_WORD, "true"); args.put(CapitalizationFilterFactory.MIN_WORD_LENGTH, "5"); CapitalizationFilterFactory factory = new CapitalizationFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); Tokenizer tokenizer = new MockTokenizer(new StringReader( "helo testing"), MockTokenizer.WHITESPACE, false); @@ -166,7 +167,7 @@ Map args = new HashMap(); args.put(CapitalizationFilterFactory.MAX_WORD_COUNT, "2"); CapitalizationFilterFactory factory = new CapitalizationFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); Tokenizer tokenizer = new MockTokenizer(new StringReader( "one two three four"), MockTokenizer.WHITESPACE, false); @@ -181,7 +182,7 @@ Map args = new HashMap(); args.put(CapitalizationFilterFactory.MAX_WORD_COUNT, "2"); CapitalizationFilterFactory factory = new CapitalizationFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); Tokenizer tokenizer = new MockTokenizer(new StringReader( "one two three four"), MockTokenizer.KEYWORD, false); @@ -198,7 +199,7 @@ Map args = new HashMap(); args.put(CapitalizationFilterFactory.MAX_TOKEN_LENGTH, "2"); CapitalizationFilterFactory factory = new CapitalizationFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); Tokenizer tokenizer = new MockTokenizer(new StringReader( "this is a test"), MockTokenizer.WHITESPACE, false); @@ -214,7 +215,7 @@ args.put(CapitalizationFilterFactory.KEEP, "kitten"); args.put(CapitalizationFilterFactory.FORCE_FIRST_LETTER, "true"); CapitalizationFilterFactory factory = new CapitalizationFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); Tokenizer tokenizer = new MockTokenizer(new StringReader("kitten"), MockTokenizer.WHITESPACE, false); TokenStream ts = factory.create(tokenizer); Index: solr/core/src/test/org/apache/solr/analysis/TestRemoveDuplicatesTokenFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestRemoveDuplicatesTokenFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestRemoveDuplicatesTokenFilterFactory.java (working copy) @@ -17,6 +17,7 @@ package org.apache.solr.analysis; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.Token; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; @@ -27,7 +28,7 @@ import java.util.Arrays; /** Simple tests to ensure this factory is working */ -public class TestRemoveDuplicatesTokenFilterFactory extends BaseTokenTestCase { +public class TestRemoveDuplicatesTokenFilterFactory extends BaseTokenStreamTestCase { public static Token tok(int pos, String t, int start, int end) { Token tok = new Token(t,start,end); Index: solr/core/src/test/org/apache/solr/analysis/TestIrishLowerCaseFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestIrishLowerCaseFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestIrishLowerCaseFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the Irish lowercase filter factory is working. */ -public class TestIrishLowerCaseFilterFactory extends BaseTokenTestCase { +public class TestIrishLowerCaseFilterFactory extends BaseTokenStreamTestCase { public void testCasing() throws Exception { Reader reader = new StringReader("nAthair tUISCE hARD"); IrishLowerCaseFilterFactory factory = new IrishLowerCaseFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestKeywordMarkerFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestKeywordMarkerFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestKeywordMarkerFilterFactory.java (working copy) @@ -23,6 +23,7 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.en.PorterStemFilter; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; @@ -33,7 +34,7 @@ /** * Simple tests to ensure the keyword marker filter factory is working. */ -public class TestKeywordMarkerFilterFactory extends BaseTokenTestCase { +public class TestKeywordMarkerFilterFactory extends BaseTokenStreamTestCase { public void testKeywords() throws IOException { Reader reader = new StringReader("dogs cats"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); @@ -41,7 +42,7 @@ Map args = new HashMap(); ResourceLoader loader = new SolrResourceLoader(null, null); args.put("protected", "protwords.txt"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); @@ -57,7 +58,7 @@ ResourceLoader loader = new SolrResourceLoader(null, null); args.put("protected", "protwords.txt"); args.put("ignoreCase", "true"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); Index: solr/core/src/test/org/apache/solr/analysis/TestShingleFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestShingleFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestShingleFilterFactory.java (working copy) @@ -22,13 +22,14 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the Shingle filter factory works. */ -public class TestShingleFilterFactory extends BaseTokenTestCase { +public class TestShingleFilterFactory extends BaseTokenStreamTestCase { /** * Test the defaults */ Index: solr/core/src/test/org/apache/solr/analysis/TestHungarianLightStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestHungarianLightStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestHungarianLightStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the Hungarian light stem factory is working. */ -public class TestHungarianLightStemFilterFactory extends BaseTokenTestCase { +public class TestHungarianLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("házakat"); HungarianLightStemFilterFactory factory = new HungarianLightStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestTurkishLowerCaseFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestTurkishLowerCaseFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestTurkishLowerCaseFilterFactory.java (working copy) @@ -20,6 +20,7 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -27,7 +28,7 @@ /** * Simple tests to ensure the Turkish lowercase filter factory is working. */ -public class TestTurkishLowerCaseFilterFactory extends BaseTokenTestCase { +public class TestTurkishLowerCaseFilterFactory extends BaseTokenStreamTestCase { /** * Ensure the filter actually lowercases text. */ Index: solr/core/src/test/org/apache/solr/analysis/TestJapanesePartOfSpeechStopFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestJapanesePartOfSpeechStopFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestJapanesePartOfSpeechStopFilterFactory.java (working copy) @@ -2,9 +2,11 @@ import java.io.IOException; import java.io.StringReader; +import java.util.Collections; import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; import org.apache.solr.core.SolrResourceLoader; @@ -28,15 +30,16 @@ /** * Simple tests for {@link JapanesePartOfSpeechStopFilterFactory} */ -public class TestJapanesePartOfSpeechStopFilterFactory extends BaseTokenTestCase { +public class TestJapanesePartOfSpeechStopFilterFactory extends BaseTokenStreamTestCase { public void testBasics() throws IOException { String tags = "# verb-main:\n" + "動詞-自立\n"; JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(); - tokenizerFactory.setLuceneMatchVersion(DEFAULT_VERSION); - tokenizerFactory.init(EMPTY_PARAMS); + tokenizerFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map tokenizerArgs = Collections.emptyMap(); + tokenizerFactory.init(tokenizerArgs); tokenizerFactory.inform(new SolrResourceLoader(null, null)); TokenStream ts = tokenizerFactory.create(new StringReader("私は制限スピードを超える。")); JapanesePartOfSpeechStopFilterFactory factory = new JapanesePartOfSpeechStopFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestGalicianStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestGalicianStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestGalicianStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the Galician stem factory is working. */ -public class TestGalicianStemFilterFactory extends BaseTokenTestCase { +public class TestGalicianStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("cariñosa"); GalicianStemFilterFactory factory = new GalicianStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestElisionFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestElisionFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestElisionFilterFactory.java (working copy) @@ -19,9 +19,11 @@ import java.io.Reader; import java.io.StringReader; +import java.util.Collections; import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -31,7 +33,7 @@ /** * Simple tests to ensure the French elision filter factory is working. */ -public class TestElisionFilterFactory extends BaseTokenTestCase { +public class TestElisionFilterFactory extends BaseTokenStreamTestCase { /** * Ensure the filter actually normalizes text. */ @@ -39,8 +41,7 @@ Reader reader = new StringReader("l'avion"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); ElisionFilterFactory factory = new ElisionFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); ResourceLoader loader = new SolrResourceLoader(null, null); Map args = new HashMap(); args.put("articles", "frenchArticles.txt"); @@ -57,10 +58,10 @@ Reader reader = new StringReader("l'avion"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); ElisionFilterFactory factory = new ElisionFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); ResourceLoader loader = new SolrResourceLoader(null, null); - factory.init(new HashMap()); factory.inform(loader); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "avion" }); @@ -73,8 +74,7 @@ Reader reader = new StringReader("L'avion"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); ElisionFilterFactory factory = new ElisionFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); ResourceLoader loader = new SolrResourceLoader(null, null); Map args = new HashMap(); args.put("articles", "frenchArticles.txt"); Index: solr/core/src/test/org/apache/solr/analysis/TestReverseStringFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestReverseStringFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestReverseStringFilterFactory.java (working copy) @@ -19,7 +19,10 @@ import java.io.Reader; import java.io.StringReader; +import java.util.Collections; +import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -27,7 +30,7 @@ /** * Simple tests to ensure the Reverse string filter factory is working. */ -public class TestReverseStringFilterFactory extends BaseTokenTestCase { +public class TestReverseStringFilterFactory extends BaseTokenStreamTestCase { /** * Ensure the filter actually reverses text. */ @@ -35,8 +38,9 @@ Reader reader = new StringReader("simple test"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); ReverseStringFilterFactory factory = new ReverseStringFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "elpmis", "tset" }); } Index: solr/core/src/test/org/apache/solr/analysis/TestRussianLightStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestRussianLightStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestRussianLightStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the Russian light stem factory is working. */ -public class TestRussianLightStemFilterFactory extends BaseTokenTestCase { +public class TestRussianLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("журналы"); RussianLightStemFilterFactory factory = new RussianLightStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestDictionaryCompoundWordTokenFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestDictionaryCompoundWordTokenFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestDictionaryCompoundWordTokenFilterFactory.java (working copy) @@ -22,6 +22,7 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -31,7 +32,7 @@ /** * Simple tests to ensure the Dictionary compound filter factory is working. */ -public class TestDictionaryCompoundWordTokenFilterFactory extends BaseTokenTestCase { +public class TestDictionaryCompoundWordTokenFilterFactory extends BaseTokenStreamTestCase { /** * Ensure the filter actually decompounds text. */ @@ -42,7 +43,7 @@ ResourceLoader loader = new SolrResourceLoader(null, null); Map args = new HashMap(); args.put("dictionary", "compoundDictionary.txt"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); TokenStream stream = factory.create(tokenizer); Index: solr/core/src/test/org/apache/solr/analysis/CommonGramsQueryFilterFactoryTest.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/CommonGramsQueryFilterFactoryTest.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/CommonGramsQueryFilterFactoryTest.java (working copy) @@ -16,6 +16,7 @@ */ package org.apache.solr.analysis; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -24,6 +25,7 @@ import org.apache.solr.core.SolrResourceLoader; import java.io.StringReader; +import java.util.Collections; import java.util.Map; import java.util.HashMap; @@ -32,7 +34,7 @@ * used by the StopFilterFactoryTest TODO: consider creating separate test files * so this won't break if stop filter test files change **/ -public class CommonGramsQueryFilterFactoryTest extends BaseTokenTestCase { +public class CommonGramsQueryFilterFactoryTest extends BaseTokenStreamTestCase { public void testInform() throws Exception { ResourceLoader loader = new SolrResourceLoader(null, null); @@ -41,7 +43,7 @@ Map args = new HashMap(); args.put("words", "stop-1.txt"); args.put("ignoreCase", "true"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); CharArraySet words = factory.getCommonWords(); @@ -53,7 +55,7 @@ factory = new CommonGramsQueryFilterFactory(); args.put("words", "stop-1.txt, stop-2.txt"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); words = factory.getCommonWords(); @@ -64,7 +66,7 @@ .isIgnoreCase() == true); factory = new CommonGramsQueryFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); args.put("words", "stop-snowball.txt"); args.put("format", "snowball"); factory.init(args); @@ -88,8 +90,9 @@ ResourceLoader loader = new SolrResourceLoader(null, null); assertTrue("loader is null and it shouldn't be", loader != null); CommonGramsQueryFilterFactory factory = new CommonGramsQueryFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); factory.inform(loader); CharArraySet words = factory.getCommonWords(); assertTrue("words is null and it shouldn't be", words != null); Index: solr/core/src/test/org/apache/solr/analysis/TestPhoneticFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestPhoneticFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestPhoneticFilterFactory.java (working copy) @@ -23,6 +23,7 @@ import org.apache.commons.codec.language.Metaphone; import org.apache.commons.codec.language.Caverphone2; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -31,7 +32,7 @@ /** * */ -public class TestPhoneticFilterFactory extends BaseTokenTestCase { +public class TestPhoneticFilterFactory extends BaseTokenStreamTestCase { private static final int REPEATS = 100000; Index: solr/core/src/test/org/apache/solr/analysis/TestNorwegianLightStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestNorwegianLightStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestNorwegianLightStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the Norwegian Light stem factory is working. */ -public class TestNorwegianLightStemFilterFactory extends BaseTokenTestCase { +public class TestNorwegianLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("epler eple"); NorwegianLightStemFilterFactory factory = new NorwegianLightStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestDelimitedPayloadTokenFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestDelimitedPayloadTokenFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestDelimitedPayloadTokenFilterFactory.java (working copy) @@ -21,6 +21,7 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilter; @@ -30,7 +31,7 @@ import org.apache.lucene.analysis.util.ResourceLoader; import org.apache.solr.core.SolrResourceLoader; -public class TestDelimitedPayloadTokenFilterFactory extends BaseTokenTestCase { +public class TestDelimitedPayloadTokenFilterFactory extends BaseTokenStreamTestCase { public void testEncoder() throws Exception { Map args = new HashMap(); Index: solr/core/src/test/org/apache/solr/analysis/TestStemmerOverrideFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestStemmerOverrideFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestStemmerOverrideFilterFactory.java (working copy) @@ -23,6 +23,7 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.en.PorterStemFilter; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; @@ -33,7 +34,7 @@ /** * Simple tests to ensure the stemmer override filter factory is working. */ -public class TestStemmerOverrideFilterFactory extends BaseTokenTestCase { +public class TestStemmerOverrideFilterFactory extends BaseTokenStreamTestCase { public void testKeywords() throws IOException { // our stemdict stems dogs to 'cat' Reader reader = new StringReader("testing dogs"); @@ -42,7 +43,7 @@ Map args = new HashMap(); ResourceLoader loader = new SolrResourceLoader(null, null); args.put("dictionary", "stemdict.txt"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); @@ -58,7 +59,7 @@ ResourceLoader loader = new SolrResourceLoader(null, null); args.put("dictionary", "stemdict.txt"); args.put("ignoreCase", "true"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); Index: solr/core/src/test/org/apache/solr/analysis/SnowballPorterFilterFactoryTest.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/SnowballPorterFilterFactoryTest.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/SnowballPorterFilterFactoryTest.java (working copy) @@ -16,6 +16,7 @@ * limitations under the License. */ +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -34,7 +35,7 @@ import java.util.Map; import java.util.ArrayList; -public class SnowballPorterFilterFactoryTest extends BaseTokenTestCase { +public class SnowballPorterFilterFactoryTest extends BaseTokenStreamTestCase { public void test() throws IOException { EnglishStemmer stemmer = new EnglishStemmer(); @@ -50,7 +51,7 @@ Map args = new HashMap(); args.put("language", "English"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(new LinesMockSolrResourceLoader(new ArrayList())); Tokenizer tokenizer = new MockTokenizer( @@ -88,7 +89,7 @@ Map args = new HashMap(); args.put("protected", "protwords.txt"); args.put("language", "English"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); Reader reader = new StringReader("ridding of some stemming"); Index: solr/core/src/test/org/apache/solr/analysis/TestChineseFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestChineseFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestChineseFilterFactory.java (working copy) @@ -20,6 +20,7 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -27,7 +28,7 @@ /** * Simple tests to ensure the Chinese filter factory is working. */ -public class TestChineseFilterFactory extends BaseTokenTestCase { +public class TestChineseFilterFactory extends BaseTokenStreamTestCase { /** * Ensure the filter actually normalizes text (numerics, stopwords) */ Index: solr/core/src/test/org/apache/solr/analysis/TestFinnishLightStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestFinnishLightStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestFinnishLightStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the Finnish light stem factory is working. */ -public class TestFinnishLightStemFilterFactory extends BaseTokenTestCase { +public class TestFinnishLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("aseistettujen"); FinnishLightStemFilterFactory factory = new FinnishLightStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestGermanNormalizationFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestGermanNormalizationFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestGermanNormalizationFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the German normalization factory is working. */ -public class TestGermanNormalizationFilterFactory extends BaseTokenTestCase { +public class TestGermanNormalizationFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("weißbier"); GermanNormalizationFilterFactory factory = new GermanNormalizationFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestKStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestKStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestKStemFilterFactory.java (working copy) @@ -3,6 +3,7 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; @@ -26,7 +27,7 @@ /** * Simple tests to ensure the kstem filter factory is working. */ -public class TestKStemFilterFactory extends BaseTokenTestCase { +public class TestKStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("bricks"); KStemFilterFactory factory = new KStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestHTMLStripCharFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestHTMLStripCharFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestHTMLStripCharFilterFactory.java (working copy) @@ -22,15 +22,12 @@ import java.util.HashMap; import java.util.Map; -import org.apache.lucene.analysis.CharReader; -import org.apache.lucene.analysis.CharStream; -import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.*; /** * Simple tests to ensure this factory is working */ -public class TestHTMLStripCharFilterFactory extends BaseTokenTestCase { +public class TestHTMLStripCharFilterFactory extends BaseTokenStreamTestCase { public void testNothingChanged() throws IOException { Index: solr/core/src/test/org/apache/solr/analysis/TestCzechStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestCzechStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestCzechStemFilterFactory.java (working copy) @@ -20,6 +20,7 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -27,7 +28,7 @@ /** * Simple tests to ensure the Czech stem filter factory is working. */ -public class TestCzechStemFilterFactory extends BaseTokenTestCase { +public class TestCzechStemFilterFactory extends BaseTokenStreamTestCase { /** * Ensure the filter actually stems text. */ Index: solr/core/src/test/org/apache/solr/analysis/TestStopFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestStopFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestStopFilterFactory.java (working copy) @@ -17,6 +17,7 @@ */ +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.util.CharArraySet; import org.apache.lucene.analysis.util.ResourceLoader; import org.apache.solr.core.SolrResourceLoader; @@ -28,7 +29,7 @@ * * **/ -public class TestStopFilterFactory extends BaseTokenTestCase { +public class TestStopFilterFactory extends BaseTokenStreamTestCase { public void testInform() throws Exception { ResourceLoader loader = new SolrResourceLoader(null, null); @@ -37,7 +38,7 @@ Map args = new HashMap(); args.put("words", "stop-1.txt"); args.put("ignoreCase", "true"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); CharArraySet words = factory.getStopWords(); @@ -47,7 +48,7 @@ factory = new StopFilterFactory(); args.put("words", "stop-1.txt, stop-2.txt"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); words = factory.getStopWords(); @@ -56,7 +57,7 @@ assertTrue(factory.isIgnoreCase() + " does not equal: " + true, factory.isIgnoreCase() == true); factory = new StopFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); args.put("words", "stop-snowball.txt"); args.put("format", "snowball"); factory.init(args); Index: solr/core/src/test/org/apache/solr/analysis/TestHindiFilters.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestHindiFilters.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestHindiFilters.java (working copy) @@ -19,25 +19,29 @@ import java.io.Reader; import java.io.StringReader; +import java.util.Collections; +import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; /** * Simple tests to ensure the Hindi filter Factories are working. */ -public class TestHindiFilters extends BaseTokenTestCase { +public class TestHindiFilters extends BaseTokenStreamTestCase { /** * Test IndicNormalizationFilterFactory */ public void testIndicNormalizer() throws Exception { Reader reader = new StringReader("ত্‍ अाैर"); StandardTokenizerFactory factory = new StandardTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); IndicNormalizationFilterFactory filterFactory = new IndicNormalizationFilterFactory(); - filterFactory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); - filterFactory.init(EMPTY_PARAMS); + filterFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); + filterFactory.init(args); Tokenizer tokenizer = factory.create(reader); TokenStream stream = filterFactory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "ৎ", "और" }); @@ -49,12 +53,13 @@ public void testHindiNormalizer() throws Exception { Reader reader = new StringReader("क़िताब"); StandardTokenizerFactory factory = new StandardTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); IndicNormalizationFilterFactory indicFilterFactory = new IndicNormalizationFilterFactory(); HindiNormalizationFilterFactory hindiFilterFactory = new HindiNormalizationFilterFactory(); - hindiFilterFactory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); - hindiFilterFactory.init(EMPTY_PARAMS); + hindiFilterFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); + hindiFilterFactory.init(args); Tokenizer tokenizer = factory.create(reader); TokenStream stream = indicFilterFactory.create(tokenizer); stream = hindiFilterFactory.create(stream); @@ -67,13 +72,14 @@ public void testStemmer() throws Exception { Reader reader = new StringReader("किताबें"); StandardTokenizerFactory factory = new StandardTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); IndicNormalizationFilterFactory indicFilterFactory = new IndicNormalizationFilterFactory(); HindiNormalizationFilterFactory hindiFilterFactory = new HindiNormalizationFilterFactory(); HindiStemFilterFactory stemFactory = new HindiStemFilterFactory(); - stemFactory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); - stemFactory.init(EMPTY_PARAMS); + stemFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); + stemFactory.init(args); Tokenizer tokenizer = factory.create(reader); TokenStream stream = indicFilterFactory.create(tokenizer); stream = hindiFilterFactory.create(stream); Index: solr/core/src/test/org/apache/solr/analysis/TestGreekStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestGreekStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestGreekStemFilterFactory.java (working copy) @@ -3,6 +3,7 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -28,11 +29,11 @@ /** * Simple tests to ensure the Greek stem filter factory is working. */ -public class TestGreekStemFilterFactory extends BaseTokenTestCase { +public class TestGreekStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("άνθρωπος"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - TokenStream normalized = new GreekLowerCaseFilter(DEFAULT_VERSION, tokenizer); + TokenStream normalized = new GreekLowerCaseFilter(TEST_VERSION_CURRENT, tokenizer); GreekStemFilterFactory factory = new GreekStemFilterFactory(); TokenStream stream = factory.create(normalized); assertTokenStreamContents(stream, new String[] { "ανθρωπ" }); Index: solr/core/src/test/org/apache/solr/analysis/TestHyphenationCompoundWordTokenFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestHyphenationCompoundWordTokenFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestHyphenationCompoundWordTokenFilterFactory.java (working copy) @@ -22,6 +22,7 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -31,7 +32,7 @@ /** * Simple tests to ensure the Hyphenation compound filter factory is working. */ -public class TestHyphenationCompoundWordTokenFilterFactory extends BaseTokenTestCase { +public class TestHyphenationCompoundWordTokenFilterFactory extends BaseTokenStreamTestCase { /** * Ensure the factory works with hyphenation grammar+dictionary: using default options. */ @@ -43,7 +44,7 @@ Map args = new HashMap(); args.put("hyphenator", "da_UTF8.xml"); args.put("dictionary", "da_compoundDictionary.txt"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); TokenStream stream = factory.create(tokenizer); @@ -68,7 +69,7 @@ args.put("hyphenator", "da_UTF8.xml"); args.put("minSubwordSize", "2"); args.put("maxSubwordSize", "4"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); TokenStream stream = factory.create(tokenizer); Index: solr/core/src/test/org/apache/solr/analysis/TestNorwegianMinimalStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestNorwegianMinimalStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestNorwegianMinimalStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the Norwegian Minimal stem factory is working. */ -public class TestNorwegianMinimalStemFilterFactory extends BaseTokenTestCase { +public class TestNorwegianMinimalStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("eple eplet epler eplene eplets eplenes"); NorwegianMinimalStemFilterFactory factory = new NorwegianMinimalStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestCJKBigramFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestCJKBigramFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestCJKBigramFilterFactory.java (working copy) @@ -19,9 +19,11 @@ import java.io.Reader; import java.io.StringReader; +import java.util.Collections; import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.standard.StandardTokenizer; @@ -29,12 +31,13 @@ * Simple tests to ensure the CJK bigram factory is working. * @deprecated */ -public class TestCJKBigramFilterFactory extends BaseTokenTestCase { +public class TestCJKBigramFilterFactory extends BaseTokenStreamTestCase { public void testDefaults() throws Exception { Reader reader = new StringReader("多くの学生が試験に落ちた。"); CJKBigramFilterFactory factory = new CJKBigramFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); TokenStream stream = factory.create(new StandardTokenizer(TEST_VERSION_CURRENT, reader)); assertTokenStreamContents(stream, new String[] { "多く", "くの", "の学", "学生", "生が", "が試", "試験", "験に", "に落", "落ち", "ちた" }); Index: solr/core/src/test/org/apache/solr/analysis/TestWikipediaTokenizerFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestWikipediaTokenizerFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestWikipediaTokenizerFactory.java (working copy) @@ -4,6 +4,7 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.wikipedia.WikipediaTokenizer; @@ -27,7 +28,7 @@ /** * Simple tests to ensure the wikipedia tokenizer is working. */ -public class TestWikipediaTokenizerFactory extends BaseTokenTestCase { +public class TestWikipediaTokenizerFactory extends BaseTokenStreamTestCase { public void testTokenizer() throws IOException { Reader reader = new StringReader("This is a [[Category:foo]]"); WikipediaTokenizerFactory factory = new WikipediaTokenizerFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestBeiderMorseFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestBeiderMorseFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestBeiderMorseFilterFactory.java (working copy) @@ -18,18 +18,21 @@ */ import java.io.StringReader; +import java.util.Collections; import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** Simple tests for {@link BeiderMorseFilterFactory} */ -public class TestBeiderMorseFilterFactory extends BaseTokenTestCase { +public class TestBeiderMorseFilterFactory extends BaseTokenStreamTestCase { public void testBasics() throws Exception { BeiderMorseFilterFactory factory = new BeiderMorseFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); TokenStream ts = factory.create(new MockTokenizer(new StringReader("Weinberg"), MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(ts, new String[] { "vDnbirk", "vanbirk", "vinbirk", "wDnbirk", "wanbirk", "winbirk" }, Index: solr/core/src/test/org/apache/solr/analysis/TestPortugueseMinimalStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestPortugueseMinimalStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestPortugueseMinimalStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the Portuguese Minimal stem factory is working. */ -public class TestPortugueseMinimalStemFilterFactory extends BaseTokenTestCase { +public class TestPortugueseMinimalStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("questões"); PortugueseMinimalStemFilterFactory factory = new PortugueseMinimalStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestPatternTokenizerFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestPatternTokenizerFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestPatternTokenizerFactory.java (working copy) @@ -21,11 +21,11 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; /** Simple Tests to ensure this factory is working */ -public class TestPatternTokenizerFactory extends BaseTokenTestCase -{ +public class TestPatternTokenizerFactory extends BaseTokenStreamTestCase { public void testFactory() throws Exception { final String INPUT = "Günther Günther is here"; Index: solr/core/src/test/org/apache/solr/analysis/TestChineseTokenizerFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestChineseTokenizerFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestChineseTokenizerFactory.java (working copy) @@ -20,12 +20,13 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the Chinese tokenizer factory is working. */ -public class TestChineseTokenizerFactory extends BaseTokenTestCase { +public class TestChineseTokenizerFactory extends BaseTokenStreamTestCase { /** * Ensure the tokenizer actually tokenizes chinese text correctly */ Index: solr/core/src/test/org/apache/solr/analysis/TestJapaneseBaseFormFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestJapaneseBaseFormFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestJapaneseBaseFormFilterFactory.java (working copy) @@ -19,18 +19,22 @@ import java.io.IOException; import java.io.StringReader; +import java.util.Collections; +import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; import org.apache.solr.core.SolrResourceLoader; /** * Simple tests for {@link JapaneseBaseFormFilterFactory} */ -public class TestJapaneseBaseFormFilterFactory extends BaseTokenTestCase { +public class TestJapaneseBaseFormFilterFactory extends BaseTokenStreamTestCase { public void testBasics() throws IOException { JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(); - tokenizerFactory.setLuceneMatchVersion(DEFAULT_VERSION); - tokenizerFactory.init(EMPTY_PARAMS); + tokenizerFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + tokenizerFactory.init(args); tokenizerFactory.inform(new SolrResourceLoader(null, null)); TokenStream ts = tokenizerFactory.create(new StringReader("それはまだ実験段階にあります")); JapaneseBaseFormFilterFactory factory = new JapaneseBaseFormFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestCJKWidthFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestCJKWidthFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestCJKWidthFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the CJKWidthFilterFactory is working */ -public class TestCJKWidthFilterFactory extends BaseTokenTestCase { +public class TestCJKWidthFilterFactory extends BaseTokenStreamTestCase { public void test() throws Exception { Reader reader = new StringReader("Test 1234"); CJKWidthFilterFactory factory = new CJKWidthFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestGalicianMinimalStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestGalicianMinimalStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestGalicianMinimalStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the Galician plural stem factory is working. */ -public class TestGalicianMinimalStemFilterFactory extends BaseTokenTestCase { +public class TestGalicianMinimalStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("elefantes"); GalicianMinimalStemFilterFactory factory = new GalicianMinimalStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestPersianNormalizationFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestPersianNormalizationFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestPersianNormalizationFilterFactory.java (working copy) @@ -20,6 +20,7 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -27,7 +28,7 @@ /** * Simple tests to ensure the Persian normalization factory is working. */ -public class TestPersianNormalizationFilterFactory extends BaseTokenTestCase { +public class TestPersianNormalizationFilterFactory extends BaseTokenStreamTestCase { /** * Ensure the filter actually normalizes persian text. */ Index: solr/core/src/test/org/apache/solr/analysis/TestPatternReplaceFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestPatternReplaceFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestPatternReplaceFilterFactory.java (working copy) @@ -17,6 +17,7 @@ package org.apache.solr.analysis; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; @@ -27,7 +28,7 @@ /** * Simple tests to ensure this factory is working */ -public class TestPatternReplaceFilterFactory extends BaseTokenTestCase { +public class TestPatternReplaceFilterFactory extends BaseTokenStreamTestCase { public void testReplaceAll() throws Exception { String input = "aabfooaabfooabfoob ab caaaaaaaaab"; Index: solr/core/src/test/org/apache/solr/analysis/DoubleMetaphoneFilterFactoryTest.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/DoubleMetaphoneFilterFactoryTest.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/DoubleMetaphoneFilterFactoryTest.java (working copy) @@ -20,12 +20,13 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.phonetic.DoubleMetaphoneFilter; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -public class DoubleMetaphoneFilterFactoryTest extends BaseTokenTestCase { +public class DoubleMetaphoneFilterFactoryTest extends BaseTokenStreamTestCase { public void testDefaults() throws Exception { DoubleMetaphoneFilterFactory factory = new DoubleMetaphoneFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestTrimFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestTrimFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestTrimFilterFactory.java (working copy) @@ -21,13 +21,14 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure this factory is working */ -public class TestTrimFilterFactory extends BaseTokenTestCase { +public class TestTrimFilterFactory extends BaseTokenStreamTestCase { public void testTrimming() throws Exception { TrimFilterFactory factory = new TrimFilterFactory(); Map args = new HashMap(); Index: solr/core/src/test/org/apache/solr/analysis/TestArabicFilters.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestArabicFilters.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestArabicFilters.java (working copy) @@ -19,7 +19,10 @@ import java.io.Reader; import java.io.StringReader; +import java.util.Collections; +import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.CharReader; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -27,7 +30,7 @@ /** * Simple tests to ensure the Arabic filter Factories are working. */ -public class TestArabicFilters extends BaseTokenTestCase { +public class TestArabicFilters extends BaseTokenStreamTestCase { /** * Test ArabicLetterTokenizerFactory * @deprecated (3.1) Remove in Lucene 5.0 @@ -36,8 +39,9 @@ public void testTokenizer() throws Exception { Reader reader = new StringReader("الذين مَلكت أيمانكم"); ArabicLetterTokenizerFactory factory = new ArabicLetterTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"الذين", "مَلكت", "أيمانكم"}); } @@ -48,11 +52,12 @@ public void testNormalizer() throws Exception { Reader reader = new StringReader("الذين مَلكت أيمانكم"); StandardTokenizerFactory factory = new StandardTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); ArabicNormalizationFilterFactory filterFactory = new ArabicNormalizationFilterFactory(); - filterFactory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); - filterFactory.init(EMPTY_PARAMS); + filterFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); + filterFactory.init(args); Tokenizer tokenizer = factory.create(reader); TokenStream stream = filterFactory.create(tokenizer); assertTokenStreamContents(stream, new String[] {"الذين", "ملكت", "ايمانكم"}); @@ -64,12 +69,13 @@ public void testStemmer() throws Exception { Reader reader = new StringReader("الذين مَلكت أيمانكم"); StandardTokenizerFactory factory = new StandardTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); ArabicNormalizationFilterFactory normFactory = new ArabicNormalizationFilterFactory(); - normFactory.setLuceneMatchVersion(DEFAULT_VERSION); + normFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); ArabicStemFilterFactory stemFactory = new ArabicStemFilterFactory(); - factory.init(EMPTY_PARAMS); - normFactory.init(EMPTY_PARAMS); + Map args = Collections.emptyMap(); + factory.init(args); + normFactory.init(args); Tokenizer tokenizer = factory.create(reader); TokenStream stream = normFactory.create(tokenizer); stream = stemFactory.create(stream); @@ -83,8 +89,9 @@ Reader reader = new StringReader("می‌خورد"); PersianCharFilterFactory charfilterFactory = new PersianCharFilterFactory(); StandardTokenizerFactory tokenizerFactory = new StandardTokenizerFactory(); - tokenizerFactory.setLuceneMatchVersion(DEFAULT_VERSION); - tokenizerFactory.init(EMPTY_PARAMS); + tokenizerFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + tokenizerFactory.init(args); TokenStream stream = tokenizerFactory.create(charfilterFactory.create(CharReader.get(reader))); assertTokenStreamContents(stream, new String[] { "می", "خورد" }); } Index: solr/core/src/test/org/apache/solr/analysis/TestIndonesianStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestIndonesianStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestIndonesianStemFilterFactory.java (working copy) @@ -22,6 +22,7 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -29,7 +30,7 @@ /** * Simple tests to ensure the Indonesian stem filter factory is working. */ -public class TestIndonesianStemFilterFactory extends BaseTokenTestCase { +public class TestIndonesianStemFilterFactory extends BaseTokenStreamTestCase { /** * Ensure the filter actually stems text. */ Index: solr/core/src/test/org/apache/solr/analysis/CommonGramsFilterFactoryTest.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/CommonGramsFilterFactoryTest.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/CommonGramsFilterFactoryTest.java (working copy) @@ -17,6 +17,7 @@ * limitations under the License. */ +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -25,6 +26,7 @@ import org.apache.solr.core.SolrResourceLoader; import java.io.StringReader; +import java.util.Collections; import java.util.Map; import java.util.HashMap; @@ -33,7 +35,7 @@ * used by the StopFilterFactoryTest TODO: consider creating separate test files * so this won't break if stop filter test files change **/ -public class CommonGramsFilterFactoryTest extends BaseTokenTestCase { +public class CommonGramsFilterFactoryTest extends BaseTokenStreamTestCase { public void testInform() throws Exception { ResourceLoader loader = new SolrResourceLoader(null, null); @@ -42,7 +44,7 @@ Map args = new HashMap(); args.put("words", "stop-1.txt"); args.put("ignoreCase", "true"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); CharArraySet words = factory.getCommonWords(); @@ -54,7 +56,7 @@ factory = new CommonGramsFilterFactory(); args.put("words", "stop-1.txt, stop-2.txt"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); words = factory.getCommonWords(); @@ -67,7 +69,7 @@ factory = new CommonGramsFilterFactory(); args.put("words", "stop-snowball.txt"); args.put("format", "snowball"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); words = factory.getCommonWords(); @@ -89,8 +91,9 @@ ResourceLoader loader = new SolrResourceLoader(null, null); assertTrue("loader is null and it shouldn't be", loader != null); CommonGramsFilterFactory factory = new CommonGramsFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); factory.inform(loader); CharArraySet words = factory.getCommonWords(); assertTrue("words is null and it shouldn't be", words != null); Index: solr/core/src/test/org/apache/solr/analysis/TestKeepFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestKeepFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestKeepFilterFactory.java (working copy) @@ -16,6 +16,7 @@ * limitations under the License. */ +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.util.CharArraySet; import org.apache.lucene.analysis.util.ResourceLoader; import org.apache.solr.core.SolrResourceLoader; @@ -27,7 +28,7 @@ * * **/ -public class TestKeepFilterFactory extends BaseTokenTestCase{ +public class TestKeepFilterFactory extends BaseTokenStreamTestCase { public void testInform() throws Exception { ResourceLoader loader = new SolrResourceLoader(null, null); @@ -36,7 +37,7 @@ Map args = new HashMap(); args.put("words", "keep-1.txt"); args.put("ignoreCase", "true"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); CharArraySet words = factory.getWords(); @@ -46,7 +47,7 @@ factory = new KeepWordFilterFactory(); args.put("words", "keep-1.txt, keep-2.txt"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); words = factory.getWords(); Index: solr/core/src/test/org/apache/solr/analysis/TestFrenchLightStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestFrenchLightStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestFrenchLightStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the French light stem factory is working. */ -public class TestFrenchLightStemFilterFactory extends BaseTokenTestCase { +public class TestFrenchLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("administrativement"); FrenchLightStemFilterFactory factory = new FrenchLightStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java (working copy) @@ -40,7 +40,7 @@ import org.junit.BeforeClass; import org.junit.Test; -import static org.apache.solr.analysis.BaseTokenTestCase.*; +import static org.apache.lucene.analysis.BaseTokenStreamTestCase.*; public class TestReversedWildcardFilterFactory extends SolrTestCaseJ4 { Map args = new HashMap(); Index: solr/core/src/test/org/apache/solr/analysis/TestGermanLightStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestGermanLightStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestGermanLightStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the German light stem factory is working. */ -public class TestGermanLightStemFilterFactory extends BaseTokenTestCase { +public class TestGermanLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("häuser"); GermanLightStemFilterFactory factory = new GermanLightStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestCJKTokenizerFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestCJKTokenizerFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestCJKTokenizerFactory.java (working copy) @@ -20,6 +20,7 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; /** @@ -27,7 +28,7 @@ * @deprecated */ @Deprecated -public class TestCJKTokenizerFactory extends BaseTokenTestCase { +public class TestCJKTokenizerFactory extends BaseTokenStreamTestCase { /** * Ensure the tokenizer actually tokenizes CJK text correctly */ Index: solr/core/src/test/org/apache/solr/analysis/TestBulgarianStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestBulgarianStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestBulgarianStemFilterFactory.java (working copy) @@ -20,6 +20,7 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -27,7 +28,7 @@ /** * Simple tests to ensure the Bulgarian stem filter factory is working. */ -public class TestBulgarianStemFilterFactory extends BaseTokenTestCase { +public class TestBulgarianStemFilterFactory extends BaseTokenStreamTestCase { /** * Ensure the filter actually stems text. */ Index: solr/core/src/test/org/apache/solr/analysis/TestCollationKeyFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestCollationKeyFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestCollationKeyFilterFactory.java (working copy) @@ -28,12 +28,13 @@ import java.util.Locale; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.util.ResourceLoader; -public class TestCollationKeyFilterFactory extends BaseTokenTestCase { +public class TestCollationKeyFilterFactory extends BaseTokenStreamTestCase { /* * Turkish has some funny casing. Index: solr/core/src/test/org/apache/solr/analysis/TestItalianLightStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestItalianLightStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestItalianLightStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the Italian light stem factory is working. */ -public class TestItalianLightStemFilterFactory extends BaseTokenTestCase { +public class TestItalianLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("ragazzo ragazzi"); ItalianLightStemFilterFactory factory = new ItalianLightStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestGreekLowerCaseFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestGreekLowerCaseFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestGreekLowerCaseFilterFactory.java (working copy) @@ -19,7 +19,10 @@ import java.io.Reader; import java.io.StringReader; +import java.util.Collections; +import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -27,7 +30,7 @@ /** * Simple tests to ensure the Greek lowercase filter factory is working. */ -public class TestGreekLowerCaseFilterFactory extends BaseTokenTestCase { +public class TestGreekLowerCaseFilterFactory extends BaseTokenStreamTestCase { /** * Ensure the filter actually lowercases (and a bit more) greek text. */ @@ -35,8 +38,9 @@ Reader reader = new StringReader("Μάϊος ΜΆΪΟΣ"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); GreekLowerCaseFilterFactory factory = new GreekLowerCaseFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "μαιοσ", "μαιοσ" }); } Index: solr/core/src/test/org/apache/solr/analysis/TestStandardFactories.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestStandardFactories.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestStandardFactories.java (working copy) @@ -19,9 +19,11 @@ import java.io.Reader; import java.io.StringReader; +import java.util.Collections; import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -29,15 +31,16 @@ /** * Simple tests to ensure the standard lucene factories are working. */ -public class TestStandardFactories extends BaseTokenTestCase { +public class TestStandardFactories extends BaseTokenStreamTestCase { /** * Test StandardTokenizerFactory */ public void testStandardTokenizer() throws Exception { Reader reader = new StringReader("Wha\u0301t's this thing do?"); StandardTokenizerFactory factory = new StandardTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"Wha\u0301t's", "this", "thing", "do" }); @@ -54,7 +57,7 @@ Map args = new HashMap(); args.put("maxTokenLength", "1000"); StandardTokenizerFactory factory = new StandardTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, @@ -67,8 +70,9 @@ public void testClassicTokenizer() throws Exception { Reader reader = new StringReader("What's this thing do?"); ClassicTokenizerFactory factory = new ClassicTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"What's", "this", "thing", "do" }); @@ -85,7 +89,7 @@ Map args = new HashMap(); args.put("maxTokenLength", "1000"); ClassicTokenizerFactory factory = new ClassicTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, @@ -98,11 +102,12 @@ public void testStandardFilter() throws Exception { Reader reader = new StringReader("What's this thing do?"); ClassicTokenizerFactory factory = new ClassicTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); ClassicFilterFactory filterFactory = new ClassicFilterFactory(); - filterFactory.setLuceneMatchVersion(DEFAULT_VERSION); - filterFactory.init(EMPTY_PARAMS); + filterFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + filterFactory.init(args); Tokenizer tokenizer = factory.create(reader); TokenStream stream = filterFactory.create(tokenizer); assertTokenStreamContents(stream, @@ -115,8 +120,9 @@ public void testKeywordTokenizer() throws Exception { Reader reader = new StringReader("What's this thing do?"); KeywordTokenizerFactory factory = new KeywordTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"What's this thing do?"}); @@ -128,8 +134,9 @@ public void testWhitespaceTokenizer() throws Exception { Reader reader = new StringReader("What's this thing do?"); WhitespaceTokenizerFactory factory = new WhitespaceTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"What's", "this", "thing", "do?"}); @@ -141,8 +148,9 @@ public void testLetterTokenizer() throws Exception { Reader reader = new StringReader("What's this thing do?"); LetterTokenizerFactory factory = new LetterTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"What", "s", "this", "thing", "do"}); @@ -154,8 +162,9 @@ public void testLowerCaseTokenizer() throws Exception { Reader reader = new StringReader("What's this thing do?"); LowerCaseTokenizerFactory factory = new LowerCaseTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"what", "s", "this", "thing", "do"}); @@ -168,8 +177,9 @@ Reader reader = new StringReader("Česká"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); ASCIIFoldingFilterFactory factory = new ASCIIFoldingFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "Ceska" }); } Index: solr/core/src/test/org/apache/solr/analysis/TestBrazilianStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestBrazilianStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestBrazilianStemFilterFactory.java (working copy) @@ -20,6 +20,7 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -27,7 +28,7 @@ /** * Simple tests to ensure the Brazilian stem filter factory is working. */ -public class TestBrazilianStemFilterFactory extends BaseTokenTestCase { +public class TestBrazilianStemFilterFactory extends BaseTokenStreamTestCase { /** * Ensure the filter actually stems and normalizes text. */ Index: solr/core/src/test/org/apache/solr/analysis/TestFrenchMinimalStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestFrenchMinimalStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestFrenchMinimalStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the French minimal stem factory is working. */ -public class TestFrenchMinimalStemFilterFactory extends BaseTokenTestCase { +public class TestFrenchMinimalStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("chevaux"); FrenchMinimalStemFilterFactory factory = new FrenchMinimalStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestGermanMinimalStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestGermanMinimalStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestGermanMinimalStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the German minimal stem factory is working. */ -public class TestGermanMinimalStemFilterFactory extends BaseTokenTestCase { +public class TestGermanMinimalStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("bilder"); GermanMinimalStemFilterFactory factory = new GermanMinimalStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestJapaneseTokenizerFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestJapaneseTokenizerFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestJapaneseTokenizerFactory.java (working copy) @@ -19,20 +19,23 @@ import java.io.IOException; import java.io.StringReader; +import java.util.Collections; import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; import org.apache.solr.core.SolrResourceLoader; /** * Simple tests for {@link JapaneseTokenizerFactory} */ -public class TestJapaneseTokenizerFactory extends BaseTokenTestCase { +public class TestJapaneseTokenizerFactory extends BaseTokenStreamTestCase { public void testSimple() throws IOException { JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); factory.inform(new SolrResourceLoader(null, null)); TokenStream ts = factory.create(new StringReader("これは本ではない")); assertTokenStreamContents(ts, @@ -47,8 +50,9 @@ */ public void testDefaults() throws IOException { JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); factory.inform(new SolrResourceLoader(null, null)); TokenStream ts = factory.create(new StringReader("シニアソフトウェアエンジニア")); assertTokenStreamContents(ts, Index: solr/core/src/test/org/apache/solr/analysis/TestSynonymFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestSynonymFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestSynonymFilterFactory.java (working copy) @@ -26,6 +26,7 @@ import java.util.List; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.synonym.SynonymFilter; @@ -33,13 +34,13 @@ import org.apache.lucene.analysis.util.ResourceLoader; import org.apache.solr.core.SolrResourceLoader; -public class TestSynonymFilterFactory extends BaseTokenTestCase { +public class TestSynonymFilterFactory extends BaseTokenStreamTestCase { /** test that we can parse and use the solr syn file */ public void testSynonyms() throws Exception { SynonymFilterFactory factory = new SynonymFilterFactory(); Map args = new HashMap(); args.put("synonyms", "synonyms.txt"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(new SolrResourceLoader(null, null)); TokenStream ts = factory.create(new MockTokenizer(new StringReader("GB"), MockTokenizer.WHITESPACE, false)); @@ -90,7 +91,7 @@ SynonymFilterFactory factory = new SynonymFilterFactory(); Map args = new HashMap(); args.put("synonyms", "synonyms.txt"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(new StringMockSolrResourceLoader("")); // empty file! TokenStream ts = factory.create(new MockTokenizer(new StringReader("GB"), MockTokenizer.WHITESPACE, false)); Index: solr/core/src/test/org/apache/solr/analysis/TestRussianFilters.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestRussianFilters.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestRussianFilters.java (working copy) @@ -19,21 +19,25 @@ import java.io.Reader; import java.io.StringReader; +import java.util.Collections; +import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.Tokenizer; /** * Simple tests to ensure the Russian filter factories are working. */ -public class TestRussianFilters extends BaseTokenTestCase { +public class TestRussianFilters extends BaseTokenStreamTestCase { /** * Test RussianLetterTokenizerFactory */ public void testTokenizer() throws Exception { Reader reader = new StringReader("Вместе с тем о силе электромагнитной 100"); RussianLetterTokenizerFactory factory = new RussianLetterTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"Вместе", "с", "тем", "о", "силе", "электромагнитной", "100"}); Index: solr/core/src/test/org/apache/solr/analysis/TestThaiWordFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestThaiWordFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestThaiWordFilterFactory.java (working copy) @@ -19,7 +19,10 @@ import java.io.Reader; import java.io.StringReader; +import java.util.Collections; +import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -28,7 +31,7 @@ /** * Simple tests to ensure the Thai word filter factory is working. */ -public class TestThaiWordFilterFactory extends BaseTokenTestCase { +public class TestThaiWordFilterFactory extends BaseTokenStreamTestCase { /** * Ensure the filter actually decomposes text. */ @@ -37,8 +40,9 @@ Reader reader = new StringReader("การที่ได้ต้องแสดงว่างานดี"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); ThaiWordFilterFactory factory = new ThaiWordFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] {"การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี"}); Index: solr/core/src/test/org/apache/solr/analysis/TestTypeTokenFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestTypeTokenFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestTypeTokenFilterFactory.java (working copy) @@ -17,6 +17,7 @@ */ +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.NumericTokenStream; import org.apache.lucene.analysis.util.InitializationException; import org.apache.lucene.analysis.util.ResourceLoader; @@ -30,7 +31,7 @@ /** * Testcase for {@link TypeTokenFilterFactory} */ -public class TestTypeTokenFilterFactory extends BaseTokenTestCase { +public class TestTypeTokenFilterFactory extends BaseTokenStreamTestCase { @Test public void testInform() throws Exception { @@ -39,7 +40,7 @@ Map args = new HashMap(); args.put("types", "stoptypes-1.txt"); args.put("enablePositionIncrements", "true"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(loader); Set types = factory.getStopTypes(); @@ -65,7 +66,7 @@ Map args = new HashMap(); args.put("types", "stoptypes-1.txt, stoptypes-2.txt"); args.put("enablePositionIncrements", "false"); - typeTokenFilterFactory.setLuceneMatchVersion(DEFAULT_VERSION); + typeTokenFilterFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); typeTokenFilterFactory.init(args); NumericTokenStream input = new NumericTokenStream(); input.setIntValue(123); @@ -79,7 +80,7 @@ args.put("types", "stoptypes-1.txt, stoptypes-2.txt"); args.put("enablePositionIncrements", "false"); args.put("useWhitelist","true"); - typeTokenFilterFactory.setLuceneMatchVersion(DEFAULT_VERSION); + typeTokenFilterFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); typeTokenFilterFactory.init(args); NumericTokenStream input = new NumericTokenStream(); input.setIntValue(123); @@ -92,7 +93,7 @@ TypeTokenFilterFactory typeTokenFilterFactory = new TypeTokenFilterFactory(); Map args = new HashMap(); args.put("enablePositionIncrements", "false"); - typeTokenFilterFactory.setLuceneMatchVersion(DEFAULT_VERSION); + typeTokenFilterFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); typeTokenFilterFactory.init(args); typeTokenFilterFactory.inform(new SolrResourceLoader(null, null)); fail("not supplying 'types' parameter should cause an InitializationException"); Index: solr/core/src/test/org/apache/solr/analysis/TestPortugueseLightStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestPortugueseLightStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestPortugueseLightStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the Portuguese Light stem factory is working. */ -public class TestPortugueseLightStemFilterFactory extends BaseTokenTestCase { +public class TestPortugueseLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("evidentemente"); PortugueseLightStemFilterFactory factory = new PortugueseLightStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestHunspellStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestHunspellStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestHunspellStemFilterFactory.java (working copy) @@ -22,6 +22,7 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.solr.core.SolrResourceLoader; @@ -30,13 +31,13 @@ /** * Simple tests to ensure the Hunspell stemmer loads from factory */ -public class TestHunspellStemFilterFactory extends BaseTokenTestCase { +public class TestHunspellStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { HunspellStemFilterFactory factory = new HunspellStemFilterFactory(); Map args = new HashMap(); args.put("dictionary", "hunspell-test.dic"); args.put("affix", "hunspell-test.aff"); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); factory.inform(new SolrResourceLoader("solr")); Index: solr/core/src/test/org/apache/solr/analysis/TestSpanishLightStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestSpanishLightStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestSpanishLightStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the Spanish Light stem factory is working. */ -public class TestSpanishLightStemFilterFactory extends BaseTokenTestCase { +public class TestSpanishLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("sociedades"); SpanishLightStemFilterFactory factory = new SpanishLightStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestLatvianStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestLatvianStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestLatvianStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the Latvian stem factory is working. */ -public class TestLatvianStemFilterFactory extends BaseTokenTestCase { +public class TestLatvianStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("tirgiem tirgus"); LatvianStemFilterFactory factory = new LatvianStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestSwedishLightStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestSwedishLightStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestSwedishLightStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the Swedish Light stem factory is working. */ -public class TestSwedishLightStemFilterFactory extends BaseTokenTestCase { +public class TestSwedishLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("äpplen äpple"); SwedishLightStemFilterFactory factory = new SwedishLightStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestUAX29URLEmailTokenizerFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestUAX29URLEmailTokenizerFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestUAX29URLEmailTokenizerFactory.java (working copy) @@ -23,6 +23,7 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.util.Version; @@ -30,13 +31,14 @@ * A few tests based on org.apache.lucene.analysis.TestUAX29URLEmailTokenizer */ -public class TestUAX29URLEmailTokenizerFactory extends BaseTokenTestCase { +public class TestUAX29URLEmailTokenizerFactory extends BaseTokenStreamTestCase { public void testUAX29URLEmailTokenizer() throws Exception { Reader reader = new StringReader("Wha\u0301t's this thing do?"); UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"Wha\u0301t's", "this", "thing", "do" }); @@ -45,8 +47,9 @@ public void testArabic() throws Exception { Reader reader = new StringReader("الفيلم الوثائقي الأول عن ويكيبيديا يسمى \"الحقيقة بالأرقام: قصة ويكيبيديا\" (بالإنجليزية: Truth in Numbers: The Wikipedia Story)، سيتم إطلاقه في 2008."); UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"الفيلم", "الوثائقي", "الأول", "عن", "ويكيبيديا", "يسمى", "الحقيقة", "بالأرقام", "قصة", "ويكيبيديا", @@ -56,8 +59,9 @@ public void testChinese() throws Exception { Reader reader = new StringReader("我是中国人。 1234 Tests "); UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"我", "是", "中", "国", "人", "1234", "Tests"}); @@ -66,8 +70,9 @@ public void testKorean() throws Exception { Reader reader = new StringReader("안녕하세요 한글입니다"); UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"안녕하세요", "한글입니다"}); @@ -76,8 +81,9 @@ public void testHyphen() throws Exception { Reader reader = new StringReader("some-dashed-phrase"); UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"some", "dashed", "phrase"}); @@ -101,8 +107,9 @@ + "http://[a42:a7b6::]/qSmxSUU4z/%52qVl4\n"; Reader reader = new StringReader(textWithURLs); UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] { @@ -142,8 +149,9 @@ + "lv'p@tqk.vj5s0tgl.0dlu7su3iyiaz.dqso.494.3hb76.XN--MGBAAM7A8H\n"; Reader reader = new StringReader(textWithEmails); UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] { @@ -176,7 +184,7 @@ Map args = new HashMap(); args.put("maxTokenLength", "1000"); UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, @@ -188,8 +196,9 @@ public void testMatchVersion() throws Exception { Reader reader = new StringReader("ざ"); UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"ざ"}); @@ -197,7 +206,7 @@ reader = new StringReader("ざ"); factory = new UAX29URLEmailTokenizerFactory(); factory.setLuceneMatchVersion(Version.LUCENE_31); - factory.init(EMPTY_PARAMS); + factory.init(args); stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"さ"}); // old broken behavior Index: solr/core/src/test/org/apache/solr/analysis/LengthFilterTest.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/LengthFilterTest.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/LengthFilterTest.java (working copy) @@ -21,10 +21,11 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; -public class LengthFilterTest extends BaseTokenTestCase { +public class LengthFilterTest extends BaseTokenStreamTestCase { public void test() throws IOException { LengthFilterFactory factory = new LengthFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestEnglishMinimalStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestEnglishMinimalStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestEnglishMinimalStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the English minimal stem factory is working. */ -public class TestEnglishMinimalStemFilterFactory extends BaseTokenTestCase { +public class TestEnglishMinimalStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("bricks"); EnglishMinimalStemFilterFactory factory = new EnglishMinimalStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestPortugueseStemFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestPortugueseStemFilterFactory.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestPortugueseStemFilterFactory.java (working copy) @@ -20,13 +20,14 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; /** * Simple tests to ensure the Portuguese stem factory is working. */ -public class TestPortugueseStemFilterFactory extends BaseTokenTestCase { +public class TestPortugueseStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("maluquice"); PortugueseStemFilterFactory factory = new PortugueseStemFilterFactory(); Index: solr/core/src/test/org/apache/solr/analysis/TestNGramFilters.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestNGramFilters.java (revision 1335876) +++ solr/core/src/test/org/apache/solr/analysis/TestNGramFilters.java (working copy) @@ -22,6 +22,7 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; @@ -29,7 +30,7 @@ /** * Simple tests to ensure the NGram filter factories are working. */ -public class TestNGramFilters extends BaseTokenTestCase { +public class TestNGramFilters extends BaseTokenStreamTestCase { /** * Test NGramTokenizerFactory */ Index: solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestStempelPolishStemFilterFactory.java =================================================================== --- solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestStempelPolishStemFilterFactory.java (revision 1335876) +++ solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestStempelPolishStemFilterFactory.java (working copy) @@ -2,6 +2,7 @@ import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.solr.core.SolrResourceLoader; @@ -26,12 +27,12 @@ /** * Tests for {@link StempelPolishStemFilterFactory} */ -public class TestStempelPolishStemFilterFactory extends BaseTokenTestCase { +public class TestStempelPolishStemFilterFactory extends BaseTokenStreamTestCase { public void testBasics() throws Exception { StringReader document = new StringReader("studenta studenci"); StempelPolishStemFilterFactory factory = new StempelPolishStemFilterFactory(); factory.inform(new SolrResourceLoader(null, null)); - TokenStream ts = factory.create(new WhitespaceTokenizer(DEFAULT_VERSION, document)); + TokenStream ts = factory.create(new WhitespaceTokenizer(TEST_VERSION_CURRENT, document)); assertTokenStreamContents(ts, new String[] { "student", "student" }); } Index: solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestICUFoldingFilterFactory.java =================================================================== --- solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestICUFoldingFilterFactory.java (revision 1335876) +++ solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestICUFoldingFilterFactory.java (working copy) @@ -20,19 +20,20 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; /** basic tests for {@link ICUFoldingFilterFactory} */ -public class TestICUFoldingFilterFactory extends BaseTokenTestCase { +public class TestICUFoldingFilterFactory extends BaseTokenStreamTestCase { /** basic tests to ensure the folding is working */ public void test() throws Exception { Reader reader = new StringReader("Résumé"); ICUFoldingFilterFactory factory = new ICUFoldingFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, reader); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "resume" }); } Index: solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestICUNormalizer2FilterFactory.java =================================================================== --- solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestICUNormalizer2FilterFactory.java (revision 1335876) +++ solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestICUNormalizer2FilterFactory.java (working copy) @@ -19,21 +19,25 @@ import java.io.Reader; import java.io.StringReader; +import java.util.Collections; +import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; /** basic tests for {@link ICUNormalizer2FilterFactory} */ -public class TestICUNormalizer2FilterFactory extends BaseTokenTestCase { +public class TestICUNormalizer2FilterFactory extends BaseTokenStreamTestCase { /** Test nfkc_cf defaults */ public void testDefaults() throws Exception { Reader reader = new StringReader("This is a Test"); ICUNormalizer2FilterFactory factory = new ICUNormalizer2FilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); - factory.init(EMPTY_PARAMS); - Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, reader); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); + Map args = Collections.emptyMap(); + factory.init(args); + Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "this", "is", "a", "test" }); } Index: solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestICUTokenizerFactory.java =================================================================== --- solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestICUTokenizerFactory.java (revision 1335876) +++ solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestICUTokenizerFactory.java (working copy) @@ -20,10 +20,11 @@ import java.io.Reader; import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; /** basic tests for {@link ICUTokenizerFactory} **/ -public class TestICUTokenizerFactory extends BaseTokenTestCase { +public class TestICUTokenizerFactory extends BaseTokenStreamTestCase { public void testMixedText() throws Exception { Reader reader = new StringReader("การที่ได้ต้องแสดงว่างานดี This is a test ກວ່າດອກ"); ICUTokenizerFactory factory = new ICUTokenizerFactory(); Index: solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestSmartChineseFactories.java =================================================================== --- solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestSmartChineseFactories.java (revision 1335876) +++ solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestSmartChineseFactories.java (working copy) @@ -19,6 +19,7 @@ import java.io.StringReader; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; @@ -27,11 +28,11 @@ * Tests for {@link SmartChineseSentenceTokenizerFactory} and * {@link SmartChineseWordTokenFilterFactory} */ -public class TestSmartChineseFactories extends BaseTokenTestCase { +public class TestSmartChineseFactories extends BaseTokenStreamTestCase { /** Test showing the behavior with whitespace */ public void testSimple() throws Exception { String sentence = "我购买了道具和服装。"; - WhitespaceTokenizer ws = new WhitespaceTokenizer(DEFAULT_VERSION, new StringReader(sentence)); + WhitespaceTokenizer ws = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(sentence)); SmartChineseWordTokenFilterFactory factory = new SmartChineseWordTokenFilterFactory(); TokenStream ts = factory.create(ws); // TODO: fix smart chinese to not emit punctuation tokens Index: solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestMorfologikFilterFactory.java =================================================================== --- solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestMorfologikFilterFactory.java (revision 1335876) +++ solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestMorfologikFilterFactory.java (working copy) @@ -4,6 +4,7 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.solr.schema.IndexSchema; @@ -28,16 +29,16 @@ /** * Test for {@link MorfologikFilterFactory}. */ -public class TestMorfologikFilterFactory extends BaseTokenTestCase { +public class TestMorfologikFilterFactory extends BaseTokenStreamTestCase { public void testCreateDictionary() throws Exception { StringReader reader = new StringReader("rowery bilety"); Map initParams = new HashMap(); initParams.put(MorfologikFilterFactory.DICTIONARY_SCHEMA_ATTRIBUTE, "morfologik"); MorfologikFilterFactory factory = new MorfologikFilterFactory(); - factory.setLuceneMatchVersion(DEFAULT_VERSION); + factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); factory.init(initParams); - TokenStream ts = factory.create(new WhitespaceTokenizer(DEFAULT_VERSION, + TokenStream ts = factory.create(new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader)); assertTokenStreamContents(ts, new String[] {"rower", "bilet"}); } Index: solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestICUTransformFilterFactory.java =================================================================== --- solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestICUTransformFilterFactory.java (revision 1335876) +++ solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestICUTransformFilterFactory.java (working copy) @@ -22,12 +22,13 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; /** basic tests for {@link ICUTransformFilterFactory} */ -public class TestICUTransformFilterFactory extends BaseTokenTestCase { +public class TestICUTransformFilterFactory extends BaseTokenStreamTestCase { /** ensure the transform is working */ public void test() throws Exception { @@ -36,7 +37,7 @@ Map args = new HashMap(); args.put("id", "Traditional-Simplified"); factory.init(args); - Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, reader); + Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "简化字" }); } @@ -49,7 +50,7 @@ Map args = new HashMap(); args.put("id", "Cyrillic-Latin"); factory.init(args); - Tokenizer tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, reader); + Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "Rossijskaâ", "Federaciâ" }); @@ -57,7 +58,7 @@ reader = new StringReader("Rossijskaâ Federaciâ"); args.put("direction", "reverse"); factory.init(args); - tokenizer = new WhitespaceTokenizer(DEFAULT_VERSION, reader); + tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader); stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "Российская", "Федерация" }); } Index: solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestICUCollationKeyFilterFactory.java =================================================================== --- solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestICUCollationKeyFilterFactory.java (revision 1335876) +++ solr/contrib/analysis-extras/src/test/org/apache/solr/analysis/TestICUCollationKeyFilterFactory.java (working copy) @@ -22,6 +22,7 @@ import java.util.HashMap; import java.util.Map; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; @@ -31,7 +32,7 @@ import com.ibm.icu.util.ULocale; @Deprecated -public class TestICUCollationKeyFilterFactory extends BaseTokenTestCase { +public class TestICUCollationKeyFilterFactory extends BaseTokenStreamTestCase { /* * Turkish has some funny casing. Index: solr/test-framework/src/java/org/apache/solr/analysis/BaseTokenTestCase.java =================================================================== --- solr/test-framework/src/java/org/apache/solr/analysis/BaseTokenTestCase.java (revision 1335876) +++ solr/test-framework/src/java/org/apache/solr/analysis/BaseTokenTestCase.java (working copy) @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.solr.analysis; - -import java.util.Collections; -import java.util.Map; - -import org.apache.lucene.analysis.BaseTokenStreamTestCase; -import org.apache.lucene.util.Version; -import org.apache.solr.core.Config; - -/** - * General token testing helper functions - */ -public abstract class BaseTokenTestCase extends BaseTokenStreamTestCase{ - - protected static final Map EMPTY_PARAMS = Collections.emptyMap(); - - /** The default test version for easy testing */ - public static final Version DEFAULT_VERSION; - - static { - String rawVersion = System.getProperty("tests.luceneMatchVersion", "LUCENE_CURRENT"); - try { - DEFAULT_VERSION = Version.parseLeniently(rawVersion); - } catch (IllegalArgumentException iae) { - throw new RuntimeException("Test Lucene Match Version [" + rawVersion + "] is invalid", iae); - } - } -}