Index: solr/core/src/test/org/apache/solr/core/ResourceLoaderTest.java =================================================================== --- solr/core/src/test/org/apache/solr/core/ResourceLoaderTest.java (revision 1365868) +++ solr/core/src/test/org/apache/solr/core/ResourceLoaderTest.java (working copy) @@ -19,6 +19,7 @@ import junit.framework.Assert; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.analysis.core.KeywordTokenizerFactory; import org.apache.lucene.analysis.ngram.NGramFilterFactory; @@ -53,8 +54,8 @@ Class clazz = ResourceLoaderAware.class; // Check ResourceLoaderAware valid objects - loader.assertAwareCompatibility( clazz, new NGramFilterFactory() ); - loader.assertAwareCompatibility( clazz, new KeywordTokenizerFactory() ); + loader.assertAwareCompatibility( clazz, new NGramFilterFactory(BaseTokenStreamTestCase.TEST_VERSION_CURRENT_MAP()) ); + loader.assertAwareCompatibility( clazz, new KeywordTokenizerFactory(BaseTokenStreamTestCase.TEST_VERSION_CURRENT_MAP()) ); // Make sure it throws an error for invalid objects Object[] invalid = new Object[] { @@ -80,9 +81,9 @@ // Make sure it throws an error for invalid objects invalid = new Object[] { - new NGramFilterFactory(), + new NGramFilterFactory(BaseTokenStreamTestCase.TEST_VERSION_CURRENT_MAP()), "hello", new Float( 12.3f ), - new KeywordTokenizerFactory() + new KeywordTokenizerFactory(BaseTokenStreamTestCase.TEST_VERSION_CURRENT_MAP()) }; for( Object obj : invalid ) { try { Index: solr/core/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java (revision 1365868) +++ solr/core/src/test/org/apache/solr/analysis/TestMultiWordSynonyms.java (working copy) @@ -37,11 +37,9 @@ public class TestMultiWordSynonyms extends BaseTokenStreamTestCase { public void testMultiWordSynonyms() throws IOException { - SynonymFilterFactory factory = new SynonymFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("synonyms", "synonyms.txt"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + SynonymFilterFactory factory = new SynonymFilterFactory(args); factory.inform(new StringMockSolrResourceLoader("a b c,d")); TokenStream ts = factory.create(new MockTokenizer(new StringReader("a e"), MockTokenizer.WHITESPACE, false)); // This fails because ["e","e"] is the value of the token stream @@ -59,7 +57,7 @@ return null; } - public T newInstance(String cname, Class expectedType, String... subpackages) { + public T newInstance(String cname, Class expectedType, Class[] argTypes, Object[] args, String... subpackages) { return null; } Index: solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java (revision 1365868) +++ solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java (working copy) @@ -44,7 +44,6 @@ public class TestReversedWildcardFilterFactory extends SolrTestCaseJ4 { Map args = new HashMap(); - ReversedWildcardFilterFactory factory = new ReversedWildcardFilterFactory(); IndexSchema schema; @BeforeClass @@ -65,7 +64,7 @@ public void testReversedTokens() throws IOException { String text = "simple text"; args.put("withOriginal", "true"); - factory.init(args); + ReversedWildcardFilterFactory factory = new ReversedWildcardFilterFactory(args); TokenStream input = factory.create(new MockTokenizer(new StringReader(text), MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(input, new String[] { "\u0001elpmis", "simple", "\u0001txet", "text" }, @@ -73,7 +72,7 @@ // now without original tokens args.put("withOriginal", "false"); - factory.init(args); + factory = new ReversedWildcardFilterFactory(args); input = factory.create(new MockTokenizer(new StringReader(text), MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(input, new String[] { "\u0001elpmis", "\u0001txet" }, Index: solr/core/src/test/org/apache/solr/analysis/TestWordDelimiterFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestWordDelimiterFilterFactory.java (revision 1365868) +++ solr/core/src/test/org/apache/solr/analysis/TestWordDelimiterFilterFactory.java (working copy) @@ -198,7 +198,6 @@ @Test public void testCustomTypes() throws Exception { String testText = "I borrowed $5,400.00 at 25% interest-rate"; - WordDelimiterFilterFactory factoryDefault = new WordDelimiterFilterFactory(); ResourceLoader loader = new SolrResourceLoader("solr/collection1"); Map args = new HashMap(); args.put("generateWordParts", "1"); @@ -207,9 +206,9 @@ args.put("catenateNumbers", "1"); args.put("catenateAll", "0"); args.put("splitOnCaseChange", "1"); - + /* default behavior */ - factoryDefault.init(args); + WordDelimiterFilterFactory factoryDefault = new WordDelimiterFilterFactory(args); factoryDefault.inform(loader); TokenStream ts = factoryDefault.create( @@ -224,10 +223,9 @@ /* custom behavior */ - WordDelimiterFilterFactory factoryCustom = new WordDelimiterFilterFactory(); // use a custom type mapping args.put("types", "wdftypes.txt"); - factoryCustom.init(args); + WordDelimiterFilterFactory factoryCustom = new WordDelimiterFilterFactory(args); factoryCustom.inform(loader); ts = factoryCustom.create( Index: solr/core/src/java/org/apache/solr/update/processor/SignatureUpdateProcessorFactory.java =================================================================== --- solr/core/src/java/org/apache/solr/update/processor/SignatureUpdateProcessorFactory.java (revision 1365868) +++ solr/core/src/java/org/apache/solr/update/processor/SignatureUpdateProcessorFactory.java (working copy) @@ -142,7 +142,7 @@ currDocSigFields = sigFields; } - Signature sig = req.getCore().getResourceLoader().newInstance(signatureClass, Signature.class); + Signature sig = req.getCore().getResourceLoader().newInstance(signatureClass, Signature.class, new Class[0], new Object[0]); sig.init(params); for (String field : currDocSigFields) { Index: solr/core/src/java/org/apache/solr/update/SolrIndexConfig.java =================================================================== --- solr/core/src/java/org/apache/solr/update/SolrIndexConfig.java (revision 1365868) +++ solr/core/src/java/org/apache/solr/update/SolrIndexConfig.java (working copy) @@ -182,7 +182,7 @@ private MergePolicy buildMergePolicy(IndexSchema schema) { String mpClassName = mergePolicyInfo == null ? defaultMergePolicyClassName : mergePolicyInfo.className; - MergePolicy policy = schema.getResourceLoader().newInstance(mpClassName, MergePolicy.class); + MergePolicy policy = schema.getResourceLoader().newInstance(mpClassName, MergePolicy.class, new Class[0], new Object[0]); if (policy instanceof LogMergePolicy) { LogMergePolicy logMergePolicy = (LogMergePolicy) policy; @@ -215,7 +215,7 @@ private MergeScheduler buildMergeScheduler(IndexSchema schema) { String msClassName = mergeSchedulerInfo == null ? SolrIndexConfig.DEFAULT_MERGE_SCHEDULER_CLASSNAME : mergeSchedulerInfo.className; - MergeScheduler scheduler = schema.getResourceLoader().newInstance(msClassName, MergeScheduler.class); + MergeScheduler scheduler = schema.getResourceLoader().newInstance(msClassName, MergeScheduler.class, new Class[0], new Object[0]); if (mergeSchedulerInfo != null) SolrPluginUtils.invokeSetters(scheduler, mergeSchedulerInfo.initArgs); Index: solr/core/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java =================================================================== --- solr/core/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java (revision 1365868) +++ solr/core/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java (working copy) @@ -26,6 +26,7 @@ import java.io.IOException; import java.io.Reader; +import java.util.Collections; /** * Tokenizer for trie fields. It uses NumericTokenStream to create multiple trie encoded string per number. @@ -44,6 +45,7 @@ protected final TrieTypes type; public TrieTokenizerFactory(TrieTypes type, int precisionStep) { + super(Collections.emptyMap()); this.type = type; this.precisionStep = precisionStep; } Index: solr/core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilterFactory.java =================================================================== --- solr/core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilterFactory.java (revision 1365868) +++ solr/core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilterFactory.java (working copy) @@ -19,6 +19,7 @@ */ import java.io.Reader; +import java.util.Map; import org.apache.lucene.analysis.charfilter.HTMLStripCharFilterFactory; import org.apache.lucene.analysis.util.CharFilterFactory; @@ -54,6 +55,10 @@ @Deprecated public class LegacyHTMLStripCharFilterFactory extends CharFilterFactory { + public LegacyHTMLStripCharFilterFactory(Map args) { + super(args); + } + public LegacyHTMLStripCharFilter create(Reader input) { return new LegacyHTMLStripCharFilter(input); } Index: solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilterFactory.java =================================================================== --- solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilterFactory.java (revision 1365868) +++ solr/core/src/java/org/apache/solr/analysis/ReversedWildcardFilterFactory.java (working copy) @@ -71,9 +71,8 @@ private int minTrailing; private float maxFractionAsterisk; - @Override - public void init(Map args) { - super.init(args); + public ReversedWildcardFilterFactory(Map args) { + super(args); withOriginal = getBoolean("withOriginal", true); maxPosAsterisk = getInt("maxPosAsterisk", 2); maxPosQuestion = getInt("maxPosQuestion", 1); Index: solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java =================================================================== --- solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java (revision 1365868) +++ solr/core/src/java/org/apache/solr/core/SolrResourceLoader.java (working copy) @@ -462,7 +462,7 @@ } } - public T newInstance(String cname, Class expectedType, String ... subpackages) { + public T newInstance(String cname, Class expectedType, Class[] argTypes, Object[] args, String ... subpackages) { Class clazz = findClass(cname, expectedType, subpackages); if( clazz == null ) { throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, @@ -471,8 +471,9 @@ T obj = null; try { - obj = clazz.newInstance(); - } + Constructor constructor = clazz.getConstructor(argTypes); + obj = constructor.newInstance(args); + } catch (Exception e) { throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "Error instantiating class: '" + clazz.getName()+"'", e); Index: solr/core/src/java/org/apache/solr/core/SolrCore.java =================================================================== --- solr/core/src/java/org/apache/solr/core/SolrCore.java (revision 1365868) +++ solr/core/src/java/org/apache/solr/core/SolrCore.java (working copy) @@ -384,7 +384,7 @@ DirectoryFactory dirFactory; PluginInfo info = solrConfig.getPluginInfo(DirectoryFactory.class.getName()); if (info != null) { - dirFactory = getResourceLoader().newInstance(info.className, DirectoryFactory.class); + dirFactory = getResourceLoader().newInstance(info.className, DirectoryFactory.class, new Class[0], new Object[0]); dirFactory.init(info.initArgs); } else { dirFactory = new NRTCachingDirectoryFactory(); @@ -397,7 +397,7 @@ IndexReaderFactory indexReaderFactory; PluginInfo info = solrConfig.getPluginInfo(IndexReaderFactory.class.getName()); if (info != null) { - indexReaderFactory = resourceLoader.newInstance(info.className, IndexReaderFactory.class); + indexReaderFactory = resourceLoader.newInstance(info.className, IndexReaderFactory.class, new Class[0], new Object[0]); indexReaderFactory.init(info.initArgs); } else { indexReaderFactory = new StandardIndexReaderFactory(); @@ -474,7 +474,7 @@ return (T)con.newInstance(this); } } - return getResourceLoader().newInstance(className, cast);//use the empty constructor + return getResourceLoader().newInstance(className, cast, new Class[0], new Object[0]);//use the empty constructor } catch (SolrException e) { throw e; } catch (Exception e) { @@ -711,7 +711,7 @@ final PluginInfo info = solrConfig.getPluginInfo(CodecFactory.class.getName()); final CodecFactory factory; if (info != null) { - factory = schema.getResourceLoader().newInstance(info.className, CodecFactory.class); + factory = schema.getResourceLoader().newInstance(info.className, CodecFactory.class, new Class[0], new Object[0]); factory.init(info.initArgs); } else { factory = new CodecFactory() { @@ -1011,7 +1011,7 @@ } private void addIfNotPresent(Map registry, String name, Class c){ if(!registry.containsKey(name)){ - T searchComp = resourceLoader.newInstance(c.getName(), c); + T searchComp = resourceLoader.newInstance(c.getName(), c, new Class[0], new Object[0]); if (searchComp instanceof NamedListInitializedPlugin){ ((NamedListInitializedPlugin)searchComp).init( new NamedList() ); } Index: solr/core/src/java/org/apache/solr/core/CoreContainer.java =================================================================== --- solr/core/src/java/org/apache/solr/core/CoreContainer.java (revision 1365868) +++ solr/core/src/java/org/apache/solr/core/CoreContainer.java (working copy) @@ -417,7 +417,7 @@ // } else { try { - logging = loader.newInstance(fname, LogWatcher.class); + logging = loader.newInstance(fname, LogWatcher.class, new Class[0], new Object[0]); } catch (Throwable e) { log.warn("Unable to load LogWatcher", e); Index: solr/core/src/java/org/apache/solr/schema/IndexSchema.java =================================================================== --- solr/core/src/java/org/apache/solr/schema/IndexSchema.java (revision 1365868) +++ solr/core/src/java/org/apache/solr/schema/IndexSchema.java (working copy) @@ -703,7 +703,7 @@ return null; } else { SimilarityFactory similarityFactory; - final Object obj = loader.newInstance(((Element) node).getAttribute("class"), Object.class, "search.similarities."); + final Object obj = loader.newInstance(((Element) node).getAttribute("class"), Object.class, new Class[0], new Object[0], "search.similarities."); if (obj instanceof SimilarityFactory) { // configure a factory, get a similarity back SolrParams params = SolrParams.toSolrParams(DOMUtil.childNodesToNamedList(node)); Index: solr/core/src/java/org/apache/solr/schema/FieldTypePluginLoader.java =================================================================== --- solr/core/src/java/org/apache/solr/schema/FieldTypePluginLoader.java (revision 1365868) +++ solr/core/src/java/org/apache/solr/schema/FieldTypePluginLoader.java (working copy) @@ -78,7 +78,7 @@ String className, Node node ) throws Exception { - FieldType ft = loader.newInstance(className, FieldType.class); + FieldType ft = loader.newInstance(className, FieldType.class, new Class[0], new Object[0]); ft.setTypeName(name); String expression = "./analyzer[@type='query']"; @@ -171,8 +171,7 @@ static final KeywordTokenizerFactory keyFactory; static { - keyFactory = new KeywordTokenizerFactory(); - keyFactory.init(new HashMap()); + keyFactory = new KeywordTokenizerFactory(new HashMap()); } ArrayList charFilters = null; @@ -269,17 +268,18 @@ @Override protected void init(CharFilterFactory plugin, Node node) throws Exception { if( plugin != null ) { - final Map params = DOMUtil.toMapExcept(node.getAttributes(),"class"); - - String configuredVersion = params.remove(LUCENE_MATCH_VERSION_PARAM); - plugin.setLuceneMatchVersion(parseConfiguredVersion(configuredVersion, plugin.getClass().getSimpleName())); - - plugin.init( params ); charFilters.add( plugin ); } } @Override + protected CharFilterFactory create(ResourceLoader loader, String name, String className, Node node) throws Exception { + Map params = DOMUtil.toMapExcept(node.getAttributes(),"class"); + validateConfiguredVersion(params, className); + return loader.newInstance(className, CharFilterFactory.class, new Class[] {Map.class}, new Object[] {params}); + } + + @Override protected CharFilterFactory register(String name, CharFilterFactory plugin) { return null; // used for map registration @@ -304,12 +304,6 @@ throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "The schema defines multiple tokenizers for: "+node ); } - final Map params = DOMUtil.toMapExcept(node.getAttributes(),"class"); - - String configuredVersion = params.remove(LUCENE_MATCH_VERSION_PARAM); - plugin.setLuceneMatchVersion(parseConfiguredVersion(configuredVersion, plugin.getClass().getSimpleName())); - - plugin.init( params ); tokenizers.add( plugin ); } @@ -317,6 +311,13 @@ protected TokenizerFactory register(String name, TokenizerFactory plugin) { return null; // used for map registration } + + @Override + protected TokenizerFactory create(ResourceLoader loader, String name, String className, Node node) throws Exception { + Map params = DOMUtil.toMapExcept(node.getAttributes(),"class"); + validateConfiguredVersion(params, className); + return loader.newInstance(className, TokenizerFactory.class, new Class[] {Map.class}, new Object[] {params}); + } }; tokenizerLoader.load( loader, (NodeList)xpath.evaluate("./tokenizer", node, XPathConstants.NODESET) ); @@ -338,12 +339,6 @@ @Override protected void init(TokenFilterFactory plugin, Node node) throws Exception { if( plugin != null ) { - final Map params = DOMUtil.toMapExcept(node.getAttributes(),"class"); - - String configuredVersion = params.remove(LUCENE_MATCH_VERSION_PARAM); - plugin.setLuceneMatchVersion(parseConfiguredVersion(configuredVersion, plugin.getClass().getSimpleName())); - - plugin.init( params ); filters.add( plugin ); } } @@ -352,6 +347,13 @@ protected TokenFilterFactory register(String name, TokenFilterFactory plugin) throws Exception { return null; // used for map registration } + + @Override + protected TokenFilterFactory create(ResourceLoader loader, String name, String className, Node node) throws Exception { + Map params = DOMUtil.toMapExcept(node.getAttributes(),"class"); + validateConfiguredVersion(params, className); + return loader.newInstance(className, TokenFilterFactory.class, new Class[] {Map.class}, new Object[] {params}); + } }; filterLoader.load( loader, (NodeList)xpath.evaluate("./filter", node, XPathConstants.NODESET) ); @@ -359,7 +361,8 @@ tokenizers.get(0), filters.toArray(new TokenFilterFactory[filters.size()])); } - private Version parseConfiguredVersion(String configuredVersion, String pluginClassName) { + private void validateConfiguredVersion(Map params, String pluginClassName) { + String configuredVersion = params.get(LUCENE_MATCH_VERSION_PARAM); Version version = (configuredVersion != null) ? Config.parseLuceneVersionString(configuredVersion) : schema.getDefaultLuceneMatchVersion(); @@ -368,7 +371,9 @@ " emulation. You should at some point declare and reindex to at least 4.0, because " + "3.x emulation is deprecated and will be removed in 5.0"); } - return version; + if (configuredVersion == null) { + params.put(LUCENE_MATCH_VERSION_PARAM, version.toString()); + } } } Index: solr/core/src/java/org/apache/solr/search/ValueSourceParser.java =================================================================== --- solr/core/src/java/org/apache/solr/search/ValueSourceParser.java (revision 1365868) +++ solr/core/src/java/org/apache/solr/search/ValueSourceParser.java (working copy) @@ -344,7 +344,7 @@ } dist = new NGramDistance(ngram); } else { - dist = fp.req.getCore().getResourceLoader().newInstance(distClass, StringDistance.class); + dist = fp.req.getCore().getResourceLoader().newInstance(distClass, StringDistance.class, new Class[0], new Object[0]); } return new StringDistanceFunction(str1, str2, dist); } Index: solr/core/src/java/org/apache/solr/search/CacheConfig.java =================================================================== --- solr/core/src/java/org/apache/solr/search/CacheConfig.java (revision 1365868) +++ solr/core/src/java/org/apache/solr/search/CacheConfig.java (working copy) @@ -97,7 +97,7 @@ config.regenImpl = config.args.get("regenerator"); config.clazz = loader.findClass(config.cacheImpl, SolrCache.class); if (config.regenImpl != null) { - config.regenerator = loader.newInstance(config.regenImpl, CacheRegenerator.class); + config.regenerator = loader.newInstance(config.regenImpl, CacheRegenerator.class, new Class[0], new Object[0]); } return config; Index: solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java =================================================================== --- solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java (revision 1365868) +++ solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java (working copy) @@ -591,7 +591,7 @@ if (className == null) className = IndexBasedSpellChecker.class.getName(); SolrResourceLoader loader = core.getResourceLoader(); - SolrSpellChecker checker = loader.newInstance(className, SolrSpellChecker.class); + SolrSpellChecker checker = loader.newInstance(className, SolrSpellChecker.class, new Class[0], new Object[0]); if (checker != null) { String dictionary = checker.init(spellchecker, core); if (dictionary != null) { Index: solr/core/src/java/org/apache/solr/spelling/DirectSolrSpellChecker.java =================================================================== --- solr/core/src/java/org/apache/solr/spelling/DirectSolrSpellChecker.java (revision 1365868) +++ solr/core/src/java/org/apache/solr/spelling/DirectSolrSpellChecker.java (working copy) @@ -110,13 +110,13 @@ else if (compClass.equalsIgnoreCase(FREQ_COMP)) comp = new SuggestWordFrequencyComparator(); else //must be a FQCN - comp = (Comparator) core.getResourceLoader().newInstance(compClass, Comparator.class); + comp = (Comparator) core.getResourceLoader().newInstance(compClass, Comparator.class, new Class[0], new Object[0]); } StringDistance sd = DirectSpellChecker.INTERNAL_LEVENSHTEIN; String distClass = (String) config.get(STRING_DISTANCE); if (distClass != null && !distClass.equalsIgnoreCase(INTERNAL_DISTANCE)) - sd = core.getResourceLoader().newInstance(distClass, StringDistance.class); + sd = core.getResourceLoader().newInstance(distClass, StringDistance.class, new Class[0], new Object[0]); float minAccuracy = DEFAULT_ACCURACY; Float accuracy = (Float) config.get(ACCURACY); Index: solr/core/src/java/org/apache/solr/spelling/suggest/Suggester.java =================================================================== --- solr/core/src/java/org/apache/solr/spelling/suggest/Suggester.java (revision 1365868) +++ solr/core/src/java/org/apache/solr/spelling/suggest/Suggester.java (working copy) @@ -97,7 +97,7 @@ lookupImpl = FSTLookupFactory.class.getName(); } - factory = core.getResourceLoader().newInstance(lookupImpl, LookupFactory.class); + factory = core.getResourceLoader().newInstance(lookupImpl, LookupFactory.class, new Class[0], new Object[0]); lookup = factory.create(config, core); String store = (String)config.get(STORE_DIR); Index: solr/core/src/java/org/apache/solr/spelling/AbstractLuceneSpellChecker.java =================================================================== --- solr/core/src/java/org/apache/solr/spelling/AbstractLuceneSpellChecker.java (revision 1365868) +++ solr/core/src/java/org/apache/solr/spelling/AbstractLuceneSpellChecker.java (working copy) @@ -110,14 +110,14 @@ } else if (compClass.equalsIgnoreCase(FREQ_COMP)){ comp = new SuggestWordFrequencyComparator(); } else{//must be a FQCN - comp = (Comparator) core.getResourceLoader().newInstance(compClass, Comparator.class); + comp = (Comparator) core.getResourceLoader().newInstance(compClass, Comparator.class, new Class[0], new Object[0]); } } else { comp = SuggestWordQueue.DEFAULT_COMPARATOR; } String strDistanceName = (String)config.get(STRING_DISTANCE); if (strDistanceName != null) { - sd = core.getResourceLoader().newInstance(strDistanceName, StringDistance.class); + sd = core.getResourceLoader().newInstance(strDistanceName, StringDistance.class, new Class[0], new Object[0]); //TODO: Figure out how to configure options. Where's Spring when you need it? Or at least BeanUtils... } else { sd = new LevensteinDistance(); Index: solr/core/src/java/org/apache/solr/util/plugin/AbstractPluginLoader.java =================================================================== --- solr/core/src/java/org/apache/solr/util/plugin/AbstractPluginLoader.java (revision 1365868) +++ solr/core/src/java/org/apache/solr/util/plugin/AbstractPluginLoader.java (working copy) @@ -83,7 +83,7 @@ @SuppressWarnings("unchecked") protected T create( ResourceLoader loader, String name, String className, Node node ) throws Exception { - return loader.newInstance(className, pluginClassType, getDefaultPackages()); + return loader.newInstance(className, pluginClassType, new Class[0], new Object[0], getDefaultPackages()); } /** Index: solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringComponent.java =================================================================== --- solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringComponent.java (revision 1365868) +++ solr/contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringComponent.java (working copy) @@ -200,7 +200,7 @@ className = CarrotClusteringEngine.class.getName(); } SolrResourceLoader loader = core.getResourceLoader(); - ClusteringEngine clusterer = loader.newInstance(className, ClusteringEngine.class); + ClusteringEngine clusterer = loader.newInstance(className, ClusteringEngine.class, new Class[0], new Object[0]); if (clusterer != null) { String name = clusterer.init(engineNL, core); if (name != null) { Index: solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java =================================================================== --- solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java (revision 1365868) +++ solr/contrib/dataimporthandler-extras/src/java/org/apache/solr/handler/dataimport/TikaEntityProcessor.java (working copy) @@ -121,7 +121,7 @@ if(parser.equals(AUTO_PARSER)){ tikaParser = new AutoDetectParser(tikaConfig); } else { - tikaParser = context.getSolrCore().getResourceLoader().newInstance(parser, Parser.class); + tikaParser = context.getSolrCore().getResourceLoader().newInstance(parser, Parser.class, new Class[0], new Object[0]); } try { tikaParser.parse(is, contentHandler, metadata , new ParseContext()); Index: solr/test-framework/src/java/org/apache/solr/analysis/StringMockSolrResourceLoader.java =================================================================== --- solr/test-framework/src/java/org/apache/solr/analysis/StringMockSolrResourceLoader.java (revision 1365868) +++ solr/test-framework/src/java/org/apache/solr/analysis/StringMockSolrResourceLoader.java (working copy) @@ -36,7 +36,7 @@ return Arrays.asList(text.split("\n")); } - public T newInstance(String cname, Class expectedType, String... subpackages) { + public T newInstance(String cname, Class expectedType, Class[] argTypes, Object[] args, String... subpackages) { return null; } Index: solr/test-framework/src/java/org/apache/solr/analysis/MockCharFilterFactory.java =================================================================== --- solr/test-framework/src/java/org/apache/solr/analysis/MockCharFilterFactory.java (revision 1365868) +++ solr/test-framework/src/java/org/apache/solr/analysis/MockCharFilterFactory.java (working copy) @@ -30,9 +30,8 @@ public class MockCharFilterFactory extends CharFilterFactory { int remainder; - @Override - public void init(Map args) { - super.init(args); + public MockCharFilterFactory(Map args) { + super(args); String sval = args.get("remainder"); if (sval == null) { throw new IllegalArgumentException("remainder is mandatory"); Index: solr/test-framework/src/java/org/apache/solr/analysis/MockTokenizerFactory.java =================================================================== --- solr/test-framework/src/java/org/apache/solr/analysis/MockTokenizerFactory.java (revision 1365868) +++ solr/test-framework/src/java/org/apache/solr/analysis/MockTokenizerFactory.java (working copy) @@ -32,9 +32,8 @@ CharacterRunAutomaton pattern; boolean enableChecks; - @Override - public void init(Map args) { - super.init(args); + public MockTokenizerFactory(Map args) { + super(args); String patternArg = args.get("pattern"); if (patternArg == null) { patternArg = "whitespace"; Index: lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseReadingFormFilterFactory.java =================================================================== --- lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseReadingFormFilterFactory.java (revision 1365868) +++ lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseReadingFormFilterFactory.java (working copy) @@ -30,12 +30,10 @@ */ public class TestJapaneseReadingFormFilterFactory extends BaseTokenStreamTestCase { public void testReadings() throws IOException { - JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(); - Map args = Collections.emptyMap(); - tokenizerFactory.init(args); + JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(TEST_VERSION_CURRENT_MAP()); tokenizerFactory.inform(new StringMockResourceLoader("")); TokenStream tokenStream = tokenizerFactory.create(new StringReader("先ほどベルリンから来ました。")); - JapaneseReadingFormFilterFactory filterFactory = new JapaneseReadingFormFilterFactory(); + JapaneseReadingFormFilterFactory filterFactory = new JapaneseReadingFormFilterFactory(TEST_VERSION_CURRENT_MAP()); assertTokenStreamContents(filterFactory.create(tokenStream), new String[] { "サキ", "ホド", "ベルリン", "カラ", "キ", "マシ", "タ" } ); Index: lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseBaseFormFilterFactory.java =================================================================== --- lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseBaseFormFilterFactory.java (revision 1365868) +++ lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseBaseFormFilterFactory.java (working copy) @@ -30,13 +30,10 @@ */ public class TestJapaneseBaseFormFilterFactory extends BaseTokenStreamTestCase { public void testBasics() throws IOException { - JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(); - tokenizerFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - tokenizerFactory.init(args); + JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(TEST_VERSION_CURRENT_MAP()); tokenizerFactory.inform(new StringMockResourceLoader("")); TokenStream ts = tokenizerFactory.create(new StringReader("それはまだ実験段階にあります")); - JapaneseBaseFormFilterFactory factory = new JapaneseBaseFormFilterFactory(); + JapaneseBaseFormFilterFactory factory = new JapaneseBaseFormFilterFactory(TEST_VERSION_CURRENT_MAP()); ts = factory.create(ts); assertTokenStreamContents(ts, new String[] { "それ", "は", "まだ", "実験", "段階", "に", "ある", "ます" } Index: lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizerFactory.java =================================================================== --- lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizerFactory.java (revision 1365868) +++ lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseTokenizerFactory.java (working copy) @@ -31,10 +31,7 @@ */ public class TestJapaneseTokenizerFactory extends BaseTokenStreamTestCase { public void testSimple() throws IOException { - JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(TEST_VERSION_CURRENT_MAP()); factory.inform(new StringMockResourceLoader("")); TokenStream ts = factory.create(new StringReader("これは本ではない")); assertTokenStreamContents(ts, @@ -48,10 +45,7 @@ * Test that search mode is enabled and working by default */ public void testDefaults() throws IOException { - JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(TEST_VERSION_CURRENT_MAP()); factory.inform(new StringMockResourceLoader("")); TokenStream ts = factory.create(new StringReader("シニアソフトウェアエンジニア")); assertTokenStreamContents(ts, @@ -63,10 +57,9 @@ * Test mode parameter: specifying normal mode */ public void testMode() throws IOException { - JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("mode", "normal"); - factory.init(args); + JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(args); factory.inform(new StringMockResourceLoader("")); TokenStream ts = factory.create(new StringReader("シニアソフトウェアエンジニア")); assertTokenStreamContents(ts, @@ -84,10 +77,9 @@ "関西国際空港,関西 国際 空港,カンサイ コクサイ クウコウ,テスト名詞\n" + "# Custom reading for sumo wrestler\n" + "朝青龍,朝青龍,アサショウリュウ,カスタム人名\n"; - JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("userDictionary", "userdict.txt"); - factory.init(args); + JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(args); factory.inform(new StringMockResourceLoader(userDict)); TokenStream ts = factory.create(new StringReader("関西国際空港に行った")); assertTokenStreamContents(ts, @@ -99,10 +91,9 @@ * Test preserving punctuation */ public void testPreservePunctuation() throws IOException { - JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("discardPunctuation", "false"); - factory.init(args); + JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(args); factory.inform(new StringMockResourceLoader("")); TokenStream ts = factory.create( new StringReader("今ノルウェーにいますが、来週の頭日本に戻ります。楽しみにしています!お寿司が食べたいな。。。") Index: lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseKatakanaStemFilterFactory.java =================================================================== --- lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseKatakanaStemFilterFactory.java (revision 1365868) +++ lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseKatakanaStemFilterFactory.java (working copy) @@ -30,16 +30,12 @@ */ public class TestJapaneseKatakanaStemFilterFactory extends BaseTokenStreamTestCase { public void testKatakanaStemming() throws IOException { - JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(); - Map tokenizerArgs = Collections.emptyMap(); - tokenizerFactory.init(tokenizerArgs); + JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(TEST_VERSION_CURRENT_MAP()); tokenizerFactory.inform(new StringMockResourceLoader("")); TokenStream tokenStream = tokenizerFactory.create( new StringReader("明後日パーティーに行く予定がある。図書館で資料をコピーしました。") ); - JapaneseKatakanaStemFilterFactory filterFactory = new JapaneseKatakanaStemFilterFactory(); - Map filterArgs = Collections.emptyMap(); - filterFactory.init(filterArgs); + JapaneseKatakanaStemFilterFactory filterFactory = new JapaneseKatakanaStemFilterFactory(TEST_VERSION_CURRENT_MAP()); assertTokenStreamContents(filterFactory.create(tokenStream), new String[]{ "明後日", "パーティ", "に", "行く", "予定", "が", "ある", // パーティー should be stemmed "図書館", "で", "資料", "を", "コピー", "し", "まし", "た"} // コピー should not be stemmed Index: lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseIterationMarkCharFilterFactory.java =================================================================== --- lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseIterationMarkCharFilterFactory.java (revision 1365868) +++ lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapaneseIterationMarkCharFilterFactory.java (working copy) @@ -35,21 +35,17 @@ public void testIterationMarksWithKeywordTokenizer() throws IOException { final String text = "時々馬鹿々々しいところゞゝゝミスヾ"; - JapaneseIterationMarkCharFilterFactory filterFactory = new JapaneseIterationMarkCharFilterFactory(); + JapaneseIterationMarkCharFilterFactory filterFactory = new JapaneseIterationMarkCharFilterFactory(TEST_VERSION_CURRENT_MAP()); CharFilter filter = filterFactory.create(new StringReader(text)); TokenStream tokenStream = new MockTokenizer(filter, MockTokenizer.KEYWORD, false); assertTokenStreamContents(tokenStream, new String[]{"時時馬鹿馬鹿しいところどころミスズ"}); } public void testIterationMarksWithJapaneseTokenizer() throws IOException { - JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(); - Map tokenizerArgs = Collections.emptyMap(); - tokenizerFactory.init(tokenizerArgs); + JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(TEST_VERSION_CURRENT_MAP()); tokenizerFactory.inform(new StringMockResourceLoader("")); - JapaneseIterationMarkCharFilterFactory filterFactory = new JapaneseIterationMarkCharFilterFactory(); - Map filterArgs = Collections.emptyMap(); - filterFactory.init(filterArgs); + JapaneseIterationMarkCharFilterFactory filterFactory = new JapaneseIterationMarkCharFilterFactory(TEST_VERSION_CURRENT_MAP()); CharFilter filter = filterFactory.create( new StringReader("時々馬鹿々々しいところゞゝゝミスヾ") @@ -59,17 +55,14 @@ } public void testKanjiOnlyIterationMarksWithJapaneseTokenizer() throws IOException { - JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(); - Map tokenizerArgs = Collections.emptyMap(); - tokenizerFactory.init(tokenizerArgs); + JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(TEST_VERSION_CURRENT_MAP()); tokenizerFactory.inform(new StringMockResourceLoader("")); - JapaneseIterationMarkCharFilterFactory filterFactory = new JapaneseIterationMarkCharFilterFactory(); - Map filterArgs = new HashMap(); + Map filterArgs = TEST_VERSION_CURRENT_MAP(); filterArgs.put("normalizeKanji", "true"); filterArgs.put("normalizeKana", "false"); - filterFactory.init(filterArgs); - + JapaneseIterationMarkCharFilterFactory filterFactory = new JapaneseIterationMarkCharFilterFactory(filterArgs); + CharFilter filter = filterFactory.create( new StringReader("時々馬鹿々々しいところゞゝゝミスヾ") ); @@ -78,16 +71,13 @@ } public void testKanaOnlyIterationMarksWithJapaneseTokenizer() throws IOException { - JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(); - Map tokenizerArgs = Collections.emptyMap(); - tokenizerFactory.init(tokenizerArgs); + JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(TEST_VERSION_CURRENT_MAP()); tokenizerFactory.inform(new StringMockResourceLoader("")); - JapaneseIterationMarkCharFilterFactory filterFactory = new JapaneseIterationMarkCharFilterFactory(); - Map filterArgs = new HashMap(); + Map filterArgs = TEST_VERSION_CURRENT_MAP(); filterArgs.put("normalizeKanji", "false"); filterArgs.put("normalizeKana", "true"); - filterFactory.init(filterArgs); + JapaneseIterationMarkCharFilterFactory filterFactory = new JapaneseIterationMarkCharFilterFactory(filterArgs); CharFilter filter = filterFactory.create( new StringReader("時々馬鹿々々しいところゞゝゝミスヾ") Index: lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/StringMockResourceLoader.java =================================================================== --- lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/StringMockResourceLoader.java (revision 1365868) +++ lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/StringMockResourceLoader.java (working copy) @@ -20,6 +20,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; +import java.lang.reflect.Constructor; import java.util.Arrays; import java.util.List; @@ -38,10 +39,11 @@ } // TODO: do this subpackages thing... wtf is that? - public T newInstance(String cname, Class expectedType, String... subpackages) { + public T newInstance(String cname, Class expectedType, Class[] argTypes, Object[] args, String... subpackages) { try { Class clazz = Class.forName(cname).asSubclass(expectedType); - return clazz.newInstance(); + Constructor constructor = clazz.getConstructor(argTypes); + return constructor.newInstance(args); } catch (Exception e) { throw new RuntimeException(e); } Index: lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapanesePartOfSpeechStopFilterFactory.java =================================================================== --- lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapanesePartOfSpeechStopFilterFactory.java (revision 1365868) +++ lucene/analysis/kuromoji/src/test/org/apache/lucene/analysis/ja/TestJapanesePartOfSpeechStopFilterFactory.java (working copy) @@ -35,17 +35,12 @@ "# verb-main:\n" + "動詞-自立\n"; - JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(); - tokenizerFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map tokenizerArgs = Collections.emptyMap(); - tokenizerFactory.init(tokenizerArgs); + JapaneseTokenizerFactory tokenizerFactory = new JapaneseTokenizerFactory(TEST_VERSION_CURRENT_MAP()); tokenizerFactory.inform(new StringMockResourceLoader("")); TokenStream ts = tokenizerFactory.create(new StringReader("私は制限スピードを超える。")); - JapanesePartOfSpeechStopFilterFactory factory = new JapanesePartOfSpeechStopFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("tags", "stoptags.txt"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + JapanesePartOfSpeechStopFilterFactory factory = new JapanesePartOfSpeechStopFilterFactory(args); factory.inform(new StringMockResourceLoader(tags)); ts = factory.create(ts); assertTokenStreamContents(ts, Index: lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseReadingFormFilterFactory.java =================================================================== --- lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseReadingFormFilterFactory.java (revision 1365868) +++ lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseReadingFormFilterFactory.java (working copy) @@ -39,9 +39,8 @@ private static final String ROMAJI_PARAM = "useRomaji"; private boolean useRomaji; - @Override - public void init(Map args) { - super.init(args); + public JapaneseReadingFormFilterFactory(Map args) { + super(args); useRomaji = getBoolean(ROMAJI_PARAM, false); } Index: lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseBaseFormFilterFactory.java =================================================================== --- lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseBaseFormFilterFactory.java (revision 1365868) +++ lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseBaseFormFilterFactory.java (working copy) @@ -21,6 +21,8 @@ import org.apache.lucene.analysis.ja.JapaneseBaseFormFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; +import java.util.Map; + /** * Factory for {@link org.apache.lucene.analysis.ja.JapaneseBaseFormFilter}. *
@@ -34,6 +36,10 @@
  */
 public class JapaneseBaseFormFilterFactory extends TokenFilterFactory {
 
+  public JapaneseBaseFormFilterFactory(Map args) {
+    super(args);
+  }
+
   @Override
   public TokenStream create(TokenStream input) {
     return new JapaneseBaseFormFilter(input);
Index: lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizerFactory.java
===================================================================
--- lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizerFactory.java	(revision 1365868)
+++ lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseTokenizerFactory.java	(working copy)
@@ -67,6 +67,10 @@
 
   private boolean discardPunctuation;
 
+  public JapaneseTokenizerFactory(Map args) {
+    super(args);
+  }
+
   @Override
   public void inform(ResourceLoader loader) throws IOException {
     mode = getMode(args);
Index: lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseKatakanaStemFilterFactory.java
===================================================================
--- lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseKatakanaStemFilterFactory.java	(revision 1365868)
+++ lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseKatakanaStemFilterFactory.java	(working copy)
@@ -39,9 +39,8 @@
   private static final String MINIMUM_LENGTH_PARAM = "minimumLength";
   private int minimumLength;
   
-  @Override
-  public void init(Map args) {
-    super.init(args);
+  public JapaneseKatakanaStemFilterFactory(Map args) {
+    super(args);
     minimumLength = getInt(MINIMUM_LENGTH_PARAM, JapaneseKatakanaStemFilter.DEFAULT_MINIMUM_LENGTH);
     if (minimumLength < 2) {
       throw new IllegalArgumentException("Illegal " + MINIMUM_LENGTH_PARAM + " " + minimumLength + " (must be 2 or greater)");
Index: lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseIterationMarkCharFilterFactory.java
===================================================================
--- lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseIterationMarkCharFilterFactory.java	(revision 1365868)
+++ lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapaneseIterationMarkCharFilterFactory.java	(working copy)
@@ -51,9 +51,8 @@
     return new JapaneseIterationMarkCharFilter(input, normalizeKanji, normalizeKana);
   }
 
-  @Override
-  public void init(Map args) {
-    super.init(args);
+  public JapaneseIterationMarkCharFilterFactory(Map args) {
+    super(args);
     normalizeKanji = getBoolean(NORMALIZE_KANJI_PARAM, JapaneseIterationMarkCharFilter.NORMALIZE_KANJI_DEFAULT);
     normalizeKana = getBoolean(NORMALIZE_KANA_PARAM, JapaneseIterationMarkCharFilter.NORMALIZE_KANA_DEFAULT);
   }
Index: lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java
===================================================================
--- lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java	(revision 1365868)
+++ lucene/analysis/kuromoji/src/java/org/apache/lucene/analysis/ja/JapanesePartOfSpeechStopFilterFactory.java	(working copy)
@@ -19,6 +19,7 @@
 
 import java.io.IOException;
 import java.util.HashSet;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.lucene.analysis.TokenStream;
@@ -42,6 +43,10 @@
   private boolean enablePositionIncrements;
   private Set stopTags;
 
+  public JapanesePartOfSpeechStopFilterFactory(Map args) {
+    super(args);
+  }
+
   public void inform(ResourceLoader loader) throws IOException {
     String stopTagFiles = args.get("tags");
     enablePositionIncrements = getBoolean("enablePositionIncrements", false);
Index: lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilterFactory.java
===================================================================
--- lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilterFactory.java	(revision 1365868)
+++ lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilterFactory.java	(working copy)
@@ -31,8 +31,7 @@
   /** basic tests to ensure the folding is working */
   public void test() throws Exception {
     Reader reader = new StringReader("Résumé");
-    ICUFoldingFilterFactory factory = new ICUFoldingFilterFactory();
-    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
+    ICUFoldingFilterFactory factory = new ICUFoldingFilterFactory(TEST_VERSION_CURRENT_MAP());
     Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
     TokenStream stream = factory.create(tokenizer);
     assertTokenStreamContents(stream, new String[] { "resume" });
Index: lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2FilterFactory.java
===================================================================
--- lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2FilterFactory.java	(revision 1365868)
+++ lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2FilterFactory.java	(working copy)
@@ -33,10 +33,7 @@
   /** Test nfkc_cf defaults */
   public void testDefaults() throws Exception {
     Reader reader = new StringReader("This is a Test");
-    ICUNormalizer2FilterFactory factory = new ICUNormalizer2FilterFactory();
-    factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
-    Map args = Collections.emptyMap();
-    factory.init(args);
+    ICUNormalizer2FilterFactory factory = new ICUNormalizer2FilterFactory(TEST_VERSION_CURRENT_MAP());
     Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
     TokenStream stream = factory.create(tokenizer);
     assertTokenStreamContents(stream, new String[] { "this", "is", "a", "test" });
Index: lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilterFactory.java
===================================================================
--- lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilterFactory.java	(revision 1365868)
+++ lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilterFactory.java	(working copy)
@@ -33,10 +33,9 @@
   /** ensure the transform is working */
   public void test() throws Exception {
     Reader reader = new StringReader("簡化字");
-    ICUTransformFilterFactory factory = new ICUTransformFilterFactory();
-    Map args = new HashMap();
+    Map args = TEST_VERSION_CURRENT_MAP();
     args.put("id", "Traditional-Simplified");
-    factory.init(args);
+    ICUTransformFilterFactory factory = new ICUTransformFilterFactory(args);
     Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
     TokenStream stream = factory.create(tokenizer);
     assertTokenStreamContents(stream, new String[] { "简化字" });
@@ -46,10 +45,9 @@
   public void testDirection() throws Exception {
     // forward
     Reader reader = new StringReader("Российская Федерация");
-    ICUTransformFilterFactory factory = new ICUTransformFilterFactory();
-    Map args = new HashMap();
+    Map args = TEST_VERSION_CURRENT_MAP();
     args.put("id", "Cyrillic-Latin");
-    factory.init(args);
+    ICUTransformFilterFactory factory = new ICUTransformFilterFactory(args);
     Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
     TokenStream stream = factory.create(tokenizer);
     assertTokenStreamContents(stream, new String[] { "Rossijskaâ",  "Federaciâ" });
@@ -57,7 +55,7 @@
     // backward (invokes Latin-Cyrillic)
     reader = new StringReader("Rossijskaâ Federaciâ");
     args.put("direction", "reverse");
-    factory.init(args);
+    factory = new ICUTransformFilterFactory(args);
     tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
     stream = factory.create(tokenizer);
     assertTokenStreamContents(stream, new String[] { "Российская", "Федерация" });
Index: lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerFactory.java
===================================================================
--- lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerFactory.java	(revision 1365868)
+++ lucene/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizerFactory.java	(working copy)
@@ -27,7 +27,7 @@
 public class TestICUTokenizerFactory extends BaseTokenStreamTestCase {
   public void testMixedText() throws Exception {
     Reader reader = new StringReader("การที่ได้ต้องแสดงว่างานดี  This is a test ກວ່າດອກ");
-    ICUTokenizerFactory factory = new ICUTokenizerFactory();
+    ICUTokenizerFactory factory = new ICUTokenizerFactory(TEST_VERSION_CURRENT_MAP());
     TokenStream stream = factory.create(reader);
     assertTokenStreamContents(stream,
         new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี",
Index: lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilterFactory.java
===================================================================
--- lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilterFactory.java	(revision 1365868)
+++ lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUFoldingFilterFactory.java	(working copy)
@@ -6,6 +6,8 @@
 import org.apache.lucene.analysis.util.MultiTermAwareComponent;
 import org.apache.lucene.analysis.util.TokenFilterFactory;
 
+import java.util.Map;
+
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -26,6 +28,10 @@
 /** Factory for {@link ICUFoldingFilter} */
 public class ICUFoldingFilterFactory extends TokenFilterFactory implements MultiTermAwareComponent {
 
+  public ICUFoldingFilterFactory(Map args) {
+    super(args);
+  }
+
   @Override
   public TokenStream create(TokenStream input) {
     return new ICUFoldingFilter(input);
Index: lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUNormalizer2FilterFactory.java
===================================================================
--- lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUNormalizer2FilterFactory.java	(revision 1365868)
+++ lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUNormalizer2FilterFactory.java	(working copy)
@@ -49,9 +49,8 @@
   private Normalizer2 normalizer;
 
   // TODO: support custom normalization
-  @Override
-  public void init(Map args) {
-    super.init(args);
+  public ICUNormalizer2FilterFactory(Map args) {
+    super(args);
     String name = args.get("name");
     if (name == null)
       name = "nfkc_cf";
Index: lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUTransformFilterFactory.java
===================================================================
--- lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUTransformFilterFactory.java	(revision 1365868)
+++ lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/ICUTransformFilterFactory.java	(working copy)
@@ -41,9 +41,8 @@
   private Transliterator transliterator;
   
   // TODO: add support for custom rules
-  @Override
-  public void init(Map args) {
-    super.init(args);
+  public ICUTransformFilterFactory(Map args) {
+    super(args);
     String id = args.get("id");
     if (id == null) {
       throw new IllegalArgumentException("id is required.");
Index: lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java
===================================================================
--- lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java	(revision 1365868)
+++ lucene/analysis/icu/src/java/org/apache/lucene/analysis/icu/segmentation/ICUTokenizerFactory.java	(working copy)
@@ -18,6 +18,7 @@
  */
 
 import java.io.Reader;
+import java.util.Map;
 
 import org.apache.lucene.analysis.Tokenizer;
 import org.apache.lucene.analysis.icu.segmentation.ICUTokenizer;
@@ -25,6 +26,11 @@
 
 /** Factory for {@link ICUTokenizer} */
 public class ICUTokenizerFactory extends TokenizerFactory {
+
+  public ICUTokenizerFactory(Map args) {
+    super(args);
+  }
+
   // TODO: add support for custom configs
   @Override
   public Tokenizer create(Reader input) {
Index: lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseFactories.java
===================================================================
--- lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseFactories.java	(revision 1365868)
+++ lucene/analysis/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseFactories.java	(working copy)
@@ -33,7 +33,7 @@
   public void testSimple() throws Exception {
     String sentence = "我购买了道具和服装。";
     WhitespaceTokenizer ws = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(sentence));
-    SmartChineseWordTokenFilterFactory factory = new SmartChineseWordTokenFilterFactory();
+    SmartChineseWordTokenFilterFactory factory = new SmartChineseWordTokenFilterFactory(TEST_VERSION_CURRENT_MAP());
     TokenStream ts = factory.create(ws);
     // TODO: fix smart chinese to not emit punctuation tokens
     // at the moment: you have to clean up with WDF, or use the stoplist, etc
@@ -44,9 +44,9 @@
   /** Test showing the behavior with whitespace */
   public void testTokenizer() throws Exception {
     String sentence = "我购买了道具和服装。我购买了道具和服装。";
-    SmartChineseSentenceTokenizerFactory tokenizerFactory = new SmartChineseSentenceTokenizerFactory();
+    SmartChineseSentenceTokenizerFactory tokenizerFactory = new SmartChineseSentenceTokenizerFactory(TEST_VERSION_CURRENT_MAP());
     Tokenizer tokenizer = tokenizerFactory.create(new StringReader(sentence));
-    SmartChineseWordTokenFilterFactory factory = new SmartChineseWordTokenFilterFactory();
+    SmartChineseWordTokenFilterFactory factory = new SmartChineseWordTokenFilterFactory(TEST_VERSION_CURRENT_MAP());
     TokenStream ts = factory.create(tokenizer);
     // TODO: fix smart chinese to not emit punctuation tokens
     // at the moment: you have to clean up with WDF, or use the stoplist, etc
Index: lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseWordTokenFilterFactory.java
===================================================================
--- lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseWordTokenFilterFactory.java	(revision 1365868)
+++ lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseWordTokenFilterFactory.java	(working copy)
@@ -22,6 +22,8 @@
 import org.apache.lucene.analysis.cn.smart.WordTokenFilter;
 import org.apache.lucene.analysis.util.TokenFilterFactory;
 
+import java.util.Map;
+
 /**
  * Factory for the SmartChineseAnalyzer {@link WordTokenFilter}
  * 

@@ -32,6 +34,11 @@ * @lucene.experimental */ public class SmartChineseWordTokenFilterFactory extends TokenFilterFactory { + + public SmartChineseWordTokenFilterFactory(Map args) { + super(args); + } + public TokenFilter create(TokenStream input) { return new WordTokenFilter(input); } Index: lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseSentenceTokenizerFactory.java =================================================================== --- lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseSentenceTokenizerFactory.java (revision 1365868) +++ lucene/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseSentenceTokenizerFactory.java (working copy) @@ -18,6 +18,7 @@ */ import java.io.Reader; +import java.util.Map; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.cn.smart.SentenceTokenizer; @@ -28,6 +29,11 @@ * @lucene.experimental */ public class SmartChineseSentenceTokenizerFactory extends TokenizerFactory { + + public SmartChineseSentenceTokenizerFactory(Map args) { + super(args); + } + public Tokenizer create(Reader input) { return new SentenceTokenizer(input); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/TestCommonGramsFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/TestCommonGramsFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/TestCommonGramsFilterFactory.java (working copy) @@ -41,12 +41,10 @@ public void testInform() throws Exception { ResourceLoader loader = new ResourceAsStreamResourceLoader(TestStopFilter.class); assertTrue("loader is null and it shouldn't be", loader != null); - CommonGramsFilterFactory factory = new CommonGramsFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("words", "stop-1.txt"); args.put("ignoreCase", "true"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + CommonGramsFilterFactory factory = new CommonGramsFilterFactory(args); factory.inform(loader); CharArraySet words = factory.getCommonWords(); assertTrue("words is null and it shouldn't be", words != null); @@ -55,10 +53,8 @@ assertTrue(factory.isIgnoreCase() + " does not equal: " + true, factory .isIgnoreCase() == true); - factory = new CommonGramsFilterFactory(); args.put("words", "stop-1.txt, stop-2.txt"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + factory = new CommonGramsFilterFactory(args); factory.inform(loader); words = factory.getCommonWords(); assertTrue("words is null and it shouldn't be", words != null); @@ -67,11 +63,9 @@ assertTrue(factory.isIgnoreCase() + " does not equal: " + true, factory .isIgnoreCase() == true); - factory = new CommonGramsFilterFactory(); args.put("words", "stop-snowball.txt"); args.put("format", "snowball"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + factory = new CommonGramsFilterFactory(args); factory.inform(loader); words = factory.getCommonWords(); assertEquals(8, words.size()); @@ -91,10 +85,7 @@ public void testDefaults() throws Exception { ResourceLoader loader = new ResourceAsStreamResourceLoader(TestStopFilter.class); assertTrue("loader is null and it shouldn't be", loader != null); - CommonGramsFilterFactory factory = new CommonGramsFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + CommonGramsFilterFactory factory = new CommonGramsFilterFactory(TEST_VERSION_CURRENT_MAP()); factory.inform(loader); CharArraySet words = factory.getCommonWords(); assertTrue("words is null and it shouldn't be", words != null); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/TestCommonGramsQueryFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/TestCommonGramsQueryFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/commongrams/TestCommonGramsQueryFilterFactory.java (working copy) @@ -41,12 +41,10 @@ public void testInform() throws Exception { ResourceLoader loader = new ResourceAsStreamResourceLoader(TestStopFilter.class); assertTrue("loader is null and it shouldn't be", loader != null); - CommonGramsQueryFilterFactory factory = new CommonGramsQueryFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("words", "stop-1.txt"); args.put("ignoreCase", "true"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + CommonGramsQueryFilterFactory factory = new CommonGramsQueryFilterFactory(args); factory.inform(loader); CharArraySet words = factory.getCommonWords(); assertTrue("words is null and it shouldn't be", words != null); @@ -55,10 +53,8 @@ assertTrue(factory.isIgnoreCase() + " does not equal: " + true, factory .isIgnoreCase() == true); - factory = new CommonGramsQueryFilterFactory(); args.put("words", "stop-1.txt, stop-2.txt"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + factory = new CommonGramsQueryFilterFactory(args); factory.inform(loader); words = factory.getCommonWords(); assertTrue("words is null and it shouldn't be", words != null); @@ -67,11 +63,9 @@ assertTrue(factory.isIgnoreCase() + " does not equal: " + true, factory .isIgnoreCase() == true); - factory = new CommonGramsQueryFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); args.put("words", "stop-snowball.txt"); args.put("format", "snowball"); - factory.init(args); + factory = new CommonGramsQueryFilterFactory(args); factory.inform(loader); words = factory.getCommonWords(); assertEquals(8, words.size()); @@ -91,10 +85,7 @@ public void testDefaults() throws Exception { ResourceLoader loader = new ResourceAsStreamResourceLoader(TestStopFilter.class); assertTrue("loader is null and it shouldn't be", loader != null); - CommonGramsQueryFilterFactory factory = new CommonGramsQueryFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + CommonGramsQueryFilterFactory factory = new CommonGramsQueryFilterFactory(TEST_VERSION_CURRENT_MAP()); factory.inform(loader); CharArraySet words = factory.getCommonWords(); assertTrue("words is null and it shouldn't be", words != null); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestHTMLStripCharFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestHTMLStripCharFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestHTMLStripCharFilterFactory.java (working copy) @@ -34,10 +34,9 @@ // 11111111112 // 012345678901234567890 final String text = "this is only a test."; - HTMLStripCharFilterFactory factory = new HTMLStripCharFilterFactory(); Map args = new HashMap(); args.put("escapedTags", "a, Title"); - factory.init(args); + HTMLStripCharFilterFactory factory = new HTMLStripCharFilterFactory(args); CharFilter cs = factory.create(new StringReader(text)); TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false); assertTokenStreamContents(ts, @@ -50,9 +49,7 @@ // 11111111112222222222333333333344 // 012345678901234567890123456789012345678901 final String text = "this is only a test."; - HTMLStripCharFilterFactory factory = new HTMLStripCharFilterFactory(); - Map args = new HashMap(); - factory.init(args); + HTMLStripCharFilterFactory factory = new HTMLStripCharFilterFactory(new HashMap()); CharFilter cs = factory.create(new StringReader(text)); TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false); assertTokenStreamContents(ts, @@ -65,10 +62,9 @@ // 11111111112222222222333333333344 // 012345678901234567890123456789012345678901 final String text = "this is only a test."; - HTMLStripCharFilterFactory factory = new HTMLStripCharFilterFactory(); Map args = new HashMap(); args.put("escapedTags", "U i"); - factory.init(args); + HTMLStripCharFilterFactory factory = new HTMLStripCharFilterFactory(args); CharFilter cs = factory.create(new StringReader(text)); TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false); assertTokenStreamContents(ts, @@ -81,10 +77,9 @@ // 11111111112222222222333333333344 // 012345678901234567890123456789012345678901 final String text = "this is only a test."; - HTMLStripCharFilterFactory factory = new HTMLStripCharFilterFactory(); Map args = new HashMap(); args.put("escapedTags", ",, , "); - factory.init(args); + HTMLStripCharFilterFactory factory = new HTMLStripCharFilterFactory(args); CharFilter cs = factory.create(new StringReader(text)); TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false); assertTokenStreamContents(ts, @@ -97,10 +92,9 @@ // 11111111112222222222333333333344 // 012345678901234567890123456789012345678901 final String text = "this is only a test."; - HTMLStripCharFilterFactory factory = new HTMLStripCharFilterFactory(); Map args = new HashMap(); args.put("escapedTags", ""); - factory.init(args); + HTMLStripCharFilterFactory factory = new HTMLStripCharFilterFactory(args); CharFilter cs = factory.create(new StringReader(text)); TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false); assertTokenStreamContents(ts, @@ -113,10 +107,9 @@ // 11111111112222222222333333333344 // 012345678901234567890123456789012345678901 final String text = "this is only a test."; - HTMLStripCharFilterFactory factory = new HTMLStripCharFilterFactory(); Map args = new HashMap(); args.put("escapedTags", ", B\r\n\t"); - factory.init(args); + HTMLStripCharFilterFactory factory = new HTMLStripCharFilterFactory(args); CharFilter cs = factory.create(new StringReader(text)); TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false); assertTokenStreamContents(ts, Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/charfilter/TestMappingCharFilterFactory.java (working copy) @@ -17,12 +17,14 @@ * limitations under the License. */ +import java.util.HashMap; + import org.apache.lucene.util.LuceneTestCase; public class TestMappingCharFilterFactory extends LuceneTestCase { public void testParseString() throws Exception { - MappingCharFilterFactory f = new MappingCharFilterFactory(); + MappingCharFilterFactory f = new MappingCharFilterFactory(new HashMap()); try { f.parseString( "\\" ); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestDictionaryCompoundWordTokenFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestDictionaryCompoundWordTokenFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestDictionaryCompoundWordTokenFilterFactory.java (working copy) @@ -39,12 +39,10 @@ public void testDecompounding() throws Exception { Reader reader = new StringReader("I like to play softball"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - DictionaryCompoundWordTokenFilterFactory factory = new DictionaryCompoundWordTokenFilterFactory(); ResourceLoader loader = new ResourceAsStreamResourceLoader(getClass()); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("dictionary", "compoundDictionary.txt"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + DictionaryCompoundWordTokenFilterFactory factory = new DictionaryCompoundWordTokenFilterFactory(args); factory.inform(loader); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestHyphenationCompoundWordTokenFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestHyphenationCompoundWordTokenFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/compound/TestHyphenationCompoundWordTokenFilterFactory.java (working copy) @@ -39,13 +39,11 @@ public void testHyphenationWithDictionary() throws Exception { Reader reader = new StringReader("min veninde som er lidt af en læsehest"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - HyphenationCompoundWordTokenFilterFactory factory = new HyphenationCompoundWordTokenFilterFactory(); ResourceLoader loader = new ResourceAsStreamResourceLoader(getClass()); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("hyphenator", "da_UTF8.xml"); args.put("dictionary", "da_compoundDictionary.txt"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + HyphenationCompoundWordTokenFilterFactory factory = new HyphenationCompoundWordTokenFilterFactory(args); factory.inform(loader); TokenStream stream = factory.create(tokenizer); @@ -63,14 +61,12 @@ public void testHyphenationOnly() throws Exception { Reader reader = new StringReader("basketballkurv"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - HyphenationCompoundWordTokenFilterFactory factory = new HyphenationCompoundWordTokenFilterFactory(); ResourceLoader loader = new ResourceAsStreamResourceLoader(getClass()); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("hyphenator", "da_UTF8.xml"); args.put("minSubwordSize", "2"); args.put("maxSubwordSize", "4"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + HyphenationCompoundWordTokenFilterFactory factory = new HyphenationCompoundWordTokenFilterFactory(args); factory.inform(loader); TokenStream stream = factory.create(tokenizer); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilterFactory.java (working copy) @@ -35,7 +35,7 @@ public void testNormalization() throws Exception { Reader reader = new StringReader("های"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - PersianNormalizationFilterFactory factory = new PersianNormalizationFilterFactory(); + PersianNormalizationFilterFactory factory = new PersianNormalizationFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "هاي" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemFilterFactory.java (working copy) @@ -35,7 +35,7 @@ public void testStemming() throws Exception { Reader reader = new StringReader("компютри"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - BulgarianStemFilterFactory factory = new BulgarianStemFilterFactory(); + BulgarianStemFilterFactory factory = new BulgarianStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "компютр" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAllAnalyzersHaveFactories.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAllAnalyzersHaveFactories.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestAllAnalyzersHaveFactories.java (working copy) @@ -27,6 +27,7 @@ import java.util.Map; import java.util.Set; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.CachingTokenFilter; import org.apache.lucene.analysis.CharFilter; import org.apache.lucene.analysis.EmptyTokenizer; @@ -117,11 +118,9 @@ String clazzName = c.getSimpleName(); assertTrue(clazzName.endsWith("Tokenizer")); String simpleName = clazzName.substring(0, clazzName.length() - 9); - TokenizerFactory instance = TokenizerFactory.forName(simpleName); - assertNotNull(instance); try { - instance.setLuceneMatchVersion(TEST_VERSION_CURRENT); - instance.init(Collections.emptyMap()); + TokenizerFactory instance = TokenizerFactory.forName(simpleName, BaseTokenStreamTestCase.TEST_VERSION_CURRENT_MAP()); + assertNotNull(instance); // TODO: provide fake ResourceLoader if (!(instance instanceof ResourceLoaderAware)) { assertSame(c, instance.create(new StringReader("")).getClass()); @@ -133,11 +132,9 @@ String clazzName = c.getSimpleName(); assertTrue(clazzName.endsWith("Filter")); String simpleName = clazzName.substring(0, clazzName.length() - (clazzName.endsWith("TokenFilter") ? 11 : 6)); - TokenFilterFactory instance = TokenFilterFactory.forName(simpleName); - assertNotNull(instance); try { - instance.setLuceneMatchVersion(TEST_VERSION_CURRENT); - instance.init(Collections.emptyMap()); + TokenFilterFactory instance = TokenFilterFactory.forName(simpleName, BaseTokenStreamTestCase.TEST_VERSION_CURRENT_MAP()); + assertNotNull(instance); // TODO: provide fake ResourceLoader if (!(instance instanceof ResourceLoaderAware)) { Class createdClazz = instance.create(new KeywordTokenizer(new StringReader(""))).getClass(); @@ -153,11 +150,9 @@ String clazzName = c.getSimpleName(); assertTrue(clazzName.endsWith("CharFilter")); String simpleName = clazzName.substring(0, clazzName.length() - 10); - CharFilterFactory instance = CharFilterFactory.forName(simpleName); - assertNotNull(instance); try { - instance.setLuceneMatchVersion(TEST_VERSION_CURRENT); - instance.init(Collections.emptyMap()); + CharFilterFactory instance = CharFilterFactory.forName(simpleName, BaseTokenStreamTestCase.TEST_VERSION_CURRENT_MAP()); + assertNotNull(instance); // TODO: provide fake ResourceLoader if (!(instance instanceof ResourceLoaderAware)) { Class createdClazz = instance.create(new StringReader("")).getClass(); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFactories.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFactories.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestFactories.java (working copy) @@ -57,7 +57,14 @@ } private void doTestTokenizer(String tokenizer) throws IOException { - TokenizerFactory factory = TokenizerFactory.forName(tokenizer); + TokenizerFactory factory; + try { + factory = TokenizerFactory.forName(tokenizer, TEST_VERSION_CURRENT_MAP()); + } catch (IllegalArgumentException iae) { + // its ok if we dont provide the right parameters to throw this + return; + } + if (initialize(factory)) { // we managed to fully create an instance. check a few more things: @@ -76,7 +83,14 @@ } private void doTestTokenFilter(String tokenfilter) throws IOException { - TokenFilterFactory factory = TokenFilterFactory.forName(tokenfilter); + TokenFilterFactory factory; + try { + factory = TokenFilterFactory.forName(tokenfilter, TEST_VERSION_CURRENT_MAP()); + } catch (IllegalArgumentException iae) { + // its ok if we dont provide the right parameters to throw this + return; + } + if (initialize(factory)) { // we managed to fully create an instance. check a few more things: @@ -95,7 +109,14 @@ } private void doTestCharFilter(String charfilter) throws IOException { - CharFilterFactory factory = CharFilterFactory.forName(charfilter); + CharFilterFactory factory; + try { + factory = CharFilterFactory.forName(charfilter, TEST_VERSION_CURRENT_MAP()); + } catch (IllegalArgumentException iae) { + // its ok if we dont provide the right parameters to throw this + return; + } + if (initialize(factory)) { // we managed to fully create an instance. check a few more things: @@ -115,14 +136,7 @@ /** tries to initialize a factory with no arguments */ private boolean initialize(AbstractAnalysisFactory factory) { - boolean success = false; - try { - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(Collections.emptyMap()); - success = true; - } catch (IllegalArgumentException ignored) { - // its ok if we dont provide the right parameters to throw this - } + boolean success = true; if (factory instanceof ResourceLoaderAware) { success = false; @@ -139,7 +153,7 @@ } // some silly classes just so we can use checkRandomData - private TokenizerFactory assertingTokenizer = new TokenizerFactory() { + private TokenizerFactory assertingTokenizer = new TokenizerFactory(TEST_VERSION_CURRENT_MAP()) { @Override public Tokenizer create(Reader input) { return new MockTokenizer(input); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestTypeTokenFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestTypeTokenFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestTypeTokenFilterFactory.java (working copy) @@ -35,23 +35,20 @@ @Test public void testInform() throws Exception { ResourceLoader loader = new ResourceAsStreamResourceLoader(getClass()); - TypeTokenFilterFactory factory = new TypeTokenFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("types", "stoptypes-1.txt"); args.put("enablePositionIncrements", "true"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + TypeTokenFilterFactory factory = new TypeTokenFilterFactory(args); factory.inform(loader); Set types = factory.getStopTypes(); assertTrue("types is null and it shouldn't be", types != null); assertTrue("types Size: " + types.size() + " is not: " + 2, types.size() == 2); assertTrue("enablePositionIncrements was set to true but not correctly parsed", factory.isEnablePositionIncrements()); - factory = new TypeTokenFilterFactory(); args.put("types", "stoptypes-1.txt, stoptypes-2.txt"); args.put("enablePositionIncrements", "false"); args.put("useWhitelist","true"); - factory.init(args); + factory = new TypeTokenFilterFactory(args); factory.inform(loader); types = factory.getStopTypes(); assertTrue("types is null and it shouldn't be", types != null); @@ -61,12 +58,10 @@ @Test public void testCreationWithBlackList() throws Exception { - TypeTokenFilterFactory typeTokenFilterFactory = new TypeTokenFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("types", "stoptypes-1.txt, stoptypes-2.txt"); args.put("enablePositionIncrements", "false"); - typeTokenFilterFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - typeTokenFilterFactory.init(args); + TypeTokenFilterFactory typeTokenFilterFactory = new TypeTokenFilterFactory(args); NumericTokenStream input = new NumericTokenStream(); input.setIntValue(123); typeTokenFilterFactory.create(input); @@ -74,13 +69,11 @@ @Test public void testCreationWithWhiteList() throws Exception { - TypeTokenFilterFactory typeTokenFilterFactory = new TypeTokenFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("types", "stoptypes-1.txt, stoptypes-2.txt"); args.put("enablePositionIncrements", "false"); args.put("useWhitelist","true"); - typeTokenFilterFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - typeTokenFilterFactory.init(args); + TypeTokenFilterFactory typeTokenFilterFactory = new TypeTokenFilterFactory(args); NumericTokenStream input = new NumericTokenStream(); input.setIntValue(123); typeTokenFilterFactory.create(input); @@ -89,11 +82,9 @@ @Test public void testMissingTypesParameter() throws Exception { try { - TypeTokenFilterFactory typeTokenFilterFactory = new TypeTokenFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("enablePositionIncrements", "false"); - typeTokenFilterFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - typeTokenFilterFactory.init(args); + TypeTokenFilterFactory typeTokenFilterFactory = new TypeTokenFilterFactory(args); typeTokenFilterFactory.inform(new ResourceAsStreamResourceLoader(getClass())); fail("not supplying 'types' parameter should cause an IllegalArgumentException"); } catch (IllegalArgumentException e) { Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopFilterFactory.java (working copy) @@ -34,33 +34,27 @@ public void testInform() throws Exception { ResourceLoader loader = new ResourceAsStreamResourceLoader(getClass()); assertTrue("loader is null and it shouldn't be", loader != null); - StopFilterFactory factory = new StopFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("words", "stop-1.txt"); args.put("ignoreCase", "true"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + StopFilterFactory factory = new StopFilterFactory(args); factory.inform(loader); CharArraySet words = factory.getStopWords(); assertTrue("words is null and it shouldn't be", words != null); assertTrue("words Size: " + words.size() + " is not: " + 2, words.size() == 2); assertTrue(factory.isIgnoreCase() + " does not equal: " + true, factory.isIgnoreCase() == true); - factory = new StopFilterFactory(); args.put("words", "stop-1.txt, stop-2.txt"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + factory = new StopFilterFactory(args); factory.inform(loader); words = factory.getStopWords(); assertTrue("words is null and it shouldn't be", words != null); assertTrue("words Size: " + words.size() + " is not: " + 4, words.size() == 4); assertTrue(factory.isIgnoreCase() + " does not equal: " + true, factory.isIgnoreCase() == true); - factory = new StopFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); args.put("words", "stop-snowball.txt"); args.put("format", "snowball"); - factory.init(args); + factory = new StopFilterFactory(args); factory.inform(loader); words = factory.getStopWords(); assertEquals(8, words.size()); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilterFactory.java (working copy) @@ -35,7 +35,7 @@ public void testStemming() throws Exception { Reader reader = new StringReader("Tischen"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - GermanStemFilterFactory factory = new GermanStemFilterFactory(); + GermanStemFilterFactory factory = new GermanStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "tisch" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanMinimalStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanMinimalStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanMinimalStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestGermanMinimalStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("bilder"); - GermanMinimalStemFilterFactory factory = new GermanMinimalStemFilterFactory(); + GermanMinimalStemFilterFactory factory = new GermanMinimalStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "bild" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanLightStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestGermanLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("häuser"); - GermanLightStemFilterFactory factory = new GermanLightStemFilterFactory(); + GermanLightStemFilterFactory factory = new GermanLightStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "haus" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanNormalizationFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanNormalizationFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanNormalizationFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestGermanNormalizationFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("weißbier"); - GermanNormalizationFilterFactory factory = new GermanNormalizationFilterFactory(); + GermanNormalizationFilterFactory factory = new GermanNormalizationFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "weissbier" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilterFactory.java (working copy) @@ -19,6 +19,7 @@ import java.io.Reader; import java.io.StringReader; +import java.util.HashMap; import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; @@ -30,7 +31,7 @@ public class TestSwedishLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("äpplen äpple"); - SwedishLightStemFilterFactory factory = new SwedishLightStemFilterFactory(); + SwedishLightStemFilterFactory factory = new SwedishLightStemFilterFactory(new HashMap()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "äppl", "äppl" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishLightStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestFinnishLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("aseistettujen"); - FinnishLightStemFilterFactory factory = new FinnishLightStemFilterFactory(); + FinnishLightStemFilterFactory factory = new FinnishLightStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "aseistet" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/hi/TestHindiFilters.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/hi/TestHindiFilters.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/hi/TestHindiFilters.java (working copy) @@ -37,13 +37,8 @@ */ public void testIndicNormalizer() throws Exception { Reader reader = new StringReader("ত্‍ अाैर"); - StandardTokenizerFactory factory = new StandardTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - IndicNormalizationFilterFactory filterFactory = new IndicNormalizationFilterFactory(); - filterFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); - filterFactory.init(args); + StandardTokenizerFactory factory = new StandardTokenizerFactory(TEST_VERSION_CURRENT_MAP()); + IndicNormalizationFilterFactory filterFactory = new IndicNormalizationFilterFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer tokenizer = factory.create(reader); TokenStream stream = filterFactory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "ৎ", "और" }); @@ -54,14 +49,9 @@ */ public void testHindiNormalizer() throws Exception { Reader reader = new StringReader("क़िताब"); - StandardTokenizerFactory factory = new StandardTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - IndicNormalizationFilterFactory indicFilterFactory = new IndicNormalizationFilterFactory(); - HindiNormalizationFilterFactory hindiFilterFactory = new HindiNormalizationFilterFactory(); - hindiFilterFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); - hindiFilterFactory.init(args); + StandardTokenizerFactory factory = new StandardTokenizerFactory(TEST_VERSION_CURRENT_MAP()); + IndicNormalizationFilterFactory indicFilterFactory = new IndicNormalizationFilterFactory(TEST_VERSION_CURRENT_MAP()); + HindiNormalizationFilterFactory hindiFilterFactory = new HindiNormalizationFilterFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer tokenizer = factory.create(reader); TokenStream stream = indicFilterFactory.create(tokenizer); stream = hindiFilterFactory.create(stream); @@ -73,15 +63,10 @@ */ public void testStemmer() throws Exception { Reader reader = new StringReader("किताबें"); - StandardTokenizerFactory factory = new StandardTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - IndicNormalizationFilterFactory indicFilterFactory = new IndicNormalizationFilterFactory(); - HindiNormalizationFilterFactory hindiFilterFactory = new HindiNormalizationFilterFactory(); - HindiStemFilterFactory stemFactory = new HindiStemFilterFactory(); - stemFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); - stemFactory.init(args); + StandardTokenizerFactory factory = new StandardTokenizerFactory(TEST_VERSION_CURRENT_MAP()); + IndicNormalizationFilterFactory indicFilterFactory = new IndicNormalizationFilterFactory(TEST_VERSION_CURRENT_MAP()); + HindiNormalizationFilterFactory hindiFilterFactory = new HindiNormalizationFilterFactory(TEST_VERSION_CURRENT_MAP()); + HindiStemFilterFactory stemFactory = new HindiStemFilterFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer tokenizer = factory.create(reader); TokenStream stream = indicFilterFactory.create(tokenizer); stream = hindiFilterFactory.create(stream); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestStandardFactories.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestStandardFactories.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestStandardFactories.java (working copy) @@ -42,10 +42,7 @@ */ public void testStandardTokenizer() throws Exception { Reader reader = new StringReader("Wha\u0301t's this thing do?"); - StandardTokenizerFactory factory = new StandardTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + StandardTokenizerFactory factory = new StandardTokenizerFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"Wha\u0301t's", "this", "thing", "do" }); @@ -59,11 +56,9 @@ String longWord = builder.toString(); String content = "one two three " + longWord + " four five six"; Reader reader = new StringReader(content); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("maxTokenLength", "1000"); - StandardTokenizerFactory factory = new StandardTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + StandardTokenizerFactory factory = new StandardTokenizerFactory(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"one", "two", "three", longWord, "four", "five", "six" }); @@ -74,10 +69,7 @@ */ public void testClassicTokenizer() throws Exception { Reader reader = new StringReader("What's this thing do?"); - ClassicTokenizerFactory factory = new ClassicTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + ClassicTokenizerFactory factory = new ClassicTokenizerFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"What's", "this", "thing", "do" }); @@ -91,11 +83,9 @@ String longWord = builder.toString(); String content = "one two three " + longWord + " four five six"; Reader reader = new StringReader(content); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("maxTokenLength", "1000"); - ClassicTokenizerFactory factory = new ClassicTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + ClassicTokenizerFactory factory = new ClassicTokenizerFactory(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"one", "two", "three", longWord, "four", "five", "six" }); @@ -106,13 +96,8 @@ */ public void testStandardFilter() throws Exception { Reader reader = new StringReader("What's this thing do?"); - ClassicTokenizerFactory factory = new ClassicTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); - ClassicFilterFactory filterFactory = new ClassicFilterFactory(); - filterFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - filterFactory.init(args); + ClassicTokenizerFactory factory = new ClassicTokenizerFactory(TEST_VERSION_CURRENT_MAP()); + ClassicFilterFactory filterFactory = new ClassicFilterFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer tokenizer = factory.create(reader); TokenStream stream = filterFactory.create(tokenizer); assertTokenStreamContents(stream, @@ -124,10 +109,7 @@ */ public void testKeywordTokenizer() throws Exception { Reader reader = new StringReader("What's this thing do?"); - KeywordTokenizerFactory factory = new KeywordTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + KeywordTokenizerFactory factory = new KeywordTokenizerFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"What's this thing do?"}); @@ -138,10 +120,7 @@ */ public void testWhitespaceTokenizer() throws Exception { Reader reader = new StringReader("What's this thing do?"); - WhitespaceTokenizerFactory factory = new WhitespaceTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + WhitespaceTokenizerFactory factory = new WhitespaceTokenizerFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"What's", "this", "thing", "do?"}); @@ -152,10 +131,7 @@ */ public void testLetterTokenizer() throws Exception { Reader reader = new StringReader("What's this thing do?"); - LetterTokenizerFactory factory = new LetterTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + LetterTokenizerFactory factory = new LetterTokenizerFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"What", "s", "this", "thing", "do"}); @@ -166,10 +142,7 @@ */ public void testLowerCaseTokenizer() throws Exception { Reader reader = new StringReader("What's this thing do?"); - LowerCaseTokenizerFactory factory = new LowerCaseTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + LowerCaseTokenizerFactory factory = new LowerCaseTokenizerFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"what", "s", "this", "thing", "do"}); @@ -181,10 +154,7 @@ public void testASCIIFolding() throws Exception { Reader reader = new StringReader("Česká"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - ASCIIFoldingFilterFactory factory = new ASCIIFoldingFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + ASCIIFoldingFilterFactory factory = new ASCIIFoldingFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "Ceska" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestUAX29URLEmailTokenizerFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestUAX29URLEmailTokenizerFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/standard/TestUAX29URLEmailTokenizerFactory.java (working copy) @@ -34,10 +34,7 @@ public void testUAX29URLEmailTokenizer() throws Exception { Reader reader = new StringReader("Wha\u0301t's this thing do?"); - UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"Wha\u0301t's", "this", "thing", "do" }); @@ -45,10 +42,7 @@ public void testArabic() throws Exception { Reader reader = new StringReader("الفيلم الوثائقي الأول عن ويكيبيديا يسمى \"الحقيقة بالأرقام: قصة ويكيبيديا\" (بالإنجليزية: Truth in Numbers: The Wikipedia Story)، سيتم إطلاقه في 2008."); - UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"الفيلم", "الوثائقي", "الأول", "عن", "ويكيبيديا", "يسمى", "الحقيقة", "بالأرقام", "قصة", "ويكيبيديا", @@ -57,10 +51,7 @@ public void testChinese() throws Exception { Reader reader = new StringReader("我是中国人。 1234 Tests "); - UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"我", "是", "中", "国", "人", "1234", "Tests"}); @@ -68,10 +59,7 @@ public void testKorean() throws Exception { Reader reader = new StringReader("안녕하세요 한글입니다"); - UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"안녕하세요", "한글입니다"}); @@ -79,10 +67,7 @@ public void testHyphen() throws Exception { Reader reader = new StringReader("some-dashed-phrase"); - UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"some", "dashed", "phrase"}); @@ -105,10 +90,7 @@ + " blah Sirrah woof " + "http://[a42:a7b6::]/qSmxSUU4z/%52qVl4\n"; Reader reader = new StringReader(textWithURLs); - UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] { @@ -147,10 +129,7 @@ + "lMahAA.j/5.RqUjS745.DtkcYdi@d2-4gb-l6.ae\n" + "lv'p@tqk.vj5s0tgl.0dlu7su3iyiaz.dqso.494.3hb76.XN--MGBAAM7A8H\n"; Reader reader = new StringReader(textWithEmails); - UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] { @@ -180,11 +159,9 @@ String longWord = builder.toString(); String content = "one two three " + longWord + " four five six"; Reader reader = new StringReader(content); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("maxTokenLength", "1000"); - UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + UAX29URLEmailTokenizerFactory factory = new UAX29URLEmailTokenizerFactory(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] {"one", "two", "three", longWord, "four", "five", "six" }); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestCapitalizationFilterFactory.java (working copy) @@ -33,13 +33,11 @@ public void testCapitalization() throws Exception { - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put( CapitalizationFilterFactory.KEEP, "and the it BIG" ); args.put( CapitalizationFilterFactory.ONLY_FIRST_WORD, "true" ); - CapitalizationFilterFactory factory = new CapitalizationFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init( args ); + CapitalizationFilterFactory factory = new CapitalizationFilterFactory(args); assertTokenStreamContents(factory.create( new MockTokenizer(new StringReader("kiTTEN"), MockTokenizer.WHITESPACE, false)), new String[] { "Kitten" }); @@ -94,10 +92,8 @@ new String[] { "Mckinley" }); // Now try some prefixes - factory = new CapitalizationFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); args.put( "okPrefix", "McK" ); // all words - factory.init( args ); + factory = new CapitalizationFilterFactory(args); assertTokenStreamContents(factory.create( new MockTokenizer(new StringReader("McKinley"), MockTokenizer.WHITESPACE, false)), new String[] { "McKinley" }); @@ -116,14 +112,12 @@ } public void testKeepIgnoreCase() throws Exception { - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put( CapitalizationFilterFactory.KEEP, "kitten" ); args.put( CapitalizationFilterFactory.KEEP_IGNORE_CASE, "true" ); args.put( CapitalizationFilterFactory.ONLY_FIRST_WORD, "true" ); - CapitalizationFilterFactory factory = new CapitalizationFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init( args ); + CapitalizationFilterFactory factory = new CapitalizationFilterFactory(args); factory.forceFirstLetter = true; assertTokenStreamContents(factory.create( new MockTokenizer(new StringReader("kiTTEN"), MockTokenizer.KEYWORD, false)), @@ -146,12 +140,10 @@ * This is very weird when combined with ONLY_FIRST_WORD!!! */ public void testMinWordLength() throws Exception { - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put(CapitalizationFilterFactory.ONLY_FIRST_WORD, "true"); args.put(CapitalizationFilterFactory.MIN_WORD_LENGTH, "5"); - CapitalizationFilterFactory factory = new CapitalizationFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + CapitalizationFilterFactory factory = new CapitalizationFilterFactory(args); Tokenizer tokenizer = new MockTokenizer(new StringReader( "helo testing"), MockTokenizer.WHITESPACE, false); TokenStream ts = factory.create(tokenizer); @@ -163,11 +155,9 @@ * in each token (it should do nothing) */ public void testMaxWordCount() throws Exception { - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put(CapitalizationFilterFactory.MAX_WORD_COUNT, "2"); - CapitalizationFilterFactory factory = new CapitalizationFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + CapitalizationFilterFactory factory = new CapitalizationFilterFactory(args); Tokenizer tokenizer = new MockTokenizer(new StringReader( "one two three four"), MockTokenizer.WHITESPACE, false); TokenStream ts = factory.create(tokenizer); @@ -178,11 +168,9 @@ * Test CapitalizationFilterFactory's maxWordCount option when exceeded */ public void testMaxWordCount2() throws Exception { - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put(CapitalizationFilterFactory.MAX_WORD_COUNT, "2"); - CapitalizationFilterFactory factory = new CapitalizationFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + CapitalizationFilterFactory factory = new CapitalizationFilterFactory(args); Tokenizer tokenizer = new MockTokenizer(new StringReader( "one two three four"), MockTokenizer.KEYWORD, false); TokenStream ts = factory.create(tokenizer); @@ -195,11 +183,9 @@ * This is weird, it is not really a max, but inclusive (look at 'is') */ public void testMaxTokenLength() throws Exception { - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put(CapitalizationFilterFactory.MAX_TOKEN_LENGTH, "2"); - CapitalizationFilterFactory factory = new CapitalizationFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + CapitalizationFilterFactory factory = new CapitalizationFilterFactory(args); Tokenizer tokenizer = new MockTokenizer(new StringReader( "this is a test"), MockTokenizer.WHITESPACE, false); TokenStream ts = factory.create(tokenizer); @@ -210,12 +196,10 @@ * Test CapitalizationFilterFactory's forceFirstLetter option */ public void testForceFirstLetter() throws Exception { - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put(CapitalizationFilterFactory.KEEP, "kitten"); args.put(CapitalizationFilterFactory.FORCE_FIRST_LETTER, "true"); - CapitalizationFilterFactory factory = new CapitalizationFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + CapitalizationFilterFactory factory = new CapitalizationFilterFactory(args); Tokenizer tokenizer = new MockTokenizer(new StringReader("kitten"), MockTokenizer.WHITESPACE, false); TokenStream ts = factory.create(tokenizer); assertTokenStreamContents(ts, new String[] {"Kitten"}); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestTrimFilterFactory.java (working copy) @@ -30,10 +30,9 @@ */ public class TestTrimFilterFactory extends BaseTokenStreamTestCase { public void testTrimming() throws Exception { - TrimFilterFactory factory = new TrimFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("updateOffsets", "false"); - factory.init(args); + TrimFilterFactory factory = new TrimFilterFactory(args); TokenStream ts = factory.create(new MockTokenizer(new StringReader("trim me "), MockTokenizer.KEYWORD, false)); assertTokenStreamContents(ts, new String[] { "trim me" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestRemoveDuplicatesTokenFilterFactory.java (working copy) @@ -43,7 +43,7 @@ throws Exception { final Iterator toks = Arrays.asList(tokens).iterator(); - RemoveDuplicatesTokenFilterFactory factory = new RemoveDuplicatesTokenFilterFactory(); + RemoveDuplicatesTokenFilterFactory factory = new RemoveDuplicatesTokenFilterFactory(TEST_VERSION_CURRENT_MAP()); final TokenStream ts = factory.create (new TokenStream() { CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilterFactory.java (working copy) @@ -39,12 +39,10 @@ // our stemdict stems dogs to 'cat' Reader reader = new StringReader("testing dogs"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - StemmerOverrideFilterFactory factory = new StemmerOverrideFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); ResourceLoader loader = new StringMockResourceLoader("dogs\tcat"); args.put("dictionary", "stemdict.txt"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + StemmerOverrideFilterFactory factory = new StemmerOverrideFilterFactory(args); factory.inform(loader); TokenStream ts = new PorterStemFilter(factory.create(tokenizer)); @@ -54,13 +52,11 @@ public void testKeywordsCaseInsensitive() throws IOException { Reader reader = new StringReader("testing DoGs"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - StemmerOverrideFilterFactory factory = new StemmerOverrideFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); ResourceLoader loader = new StringMockResourceLoader("dogs\tcat"); args.put("dictionary", "stemdict.txt"); args.put("ignoreCase", "true"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + StemmerOverrideFilterFactory factory = new StemmerOverrideFilterFactory(args); factory.inform(loader); TokenStream ts = new PorterStemFilter(factory.create(tokenizer)); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLengthFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLengthFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLengthFilterFactory.java (working copy) @@ -28,22 +28,20 @@ public class TestLengthFilterFactory extends BaseTokenStreamTestCase { public void test() throws IOException { - LengthFilterFactory factory = new LengthFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put(LengthFilterFactory.MIN_KEY, String.valueOf(4)); args.put(LengthFilterFactory.MAX_KEY, String.valueOf(10)); // default: args.put("enablePositionIncrements", "false"); - factory.init(args); + LengthFilterFactory factory = new LengthFilterFactory(args); String test = "foo foobar super-duper-trooper"; TokenStream stream = factory.create(new MockTokenizer(new StringReader(test), MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "foobar" }, new int[] { 1 }); - factory = new LengthFilterFactory(); - args = new HashMap(); + args = TEST_VERSION_CURRENT_MAP(); args.put(LengthFilterFactory.MIN_KEY, String.valueOf(4)); args.put(LengthFilterFactory.MAX_KEY, String.valueOf(10)); args.put("enablePositionIncrements", "true"); - factory.init(args); + factory = new LengthFilterFactory(args); stream = factory.create(new MockTokenizer(new StringReader(test), MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "foobar" }, new int[] { 2 }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeepFilterFactory.java (working copy) @@ -34,22 +34,18 @@ public void testInform() throws Exception { ResourceLoader loader = new ResourceAsStreamResourceLoader(getClass()); assertTrue("loader is null and it shouldn't be", loader != null); - KeepWordFilterFactory factory = new KeepWordFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("words", "keep-1.txt"); args.put("ignoreCase", "true"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + KeepWordFilterFactory factory = new KeepWordFilterFactory(args); factory.inform(loader); CharArraySet words = factory.getWords(); assertTrue("words is null and it shouldn't be", words != null); assertTrue("words Size: " + words.size() + " is not: " + 2, words.size() == 2); - factory = new KeepWordFilterFactory(); args.put("words", "keep-1.txt, keep-2.txt"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + factory = new KeepWordFilterFactory(args); factory.inform(loader); words = factory.getWords(); assertTrue("words is null and it shouldn't be", words != null); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestKeywordMarkerFilterFactory.java (working copy) @@ -38,12 +38,10 @@ public void testKeywords() throws IOException { Reader reader = new StringReader("dogs cats"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - KeywordMarkerFilterFactory factory = new KeywordMarkerFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); ResourceLoader loader = new StringMockResourceLoader("cats"); args.put("protected", "protwords.txt"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + KeywordMarkerFilterFactory factory = new KeywordMarkerFilterFactory(args); factory.inform(loader); TokenStream ts = new PorterStemFilter(factory.create(tokenizer)); @@ -53,13 +51,11 @@ public void testKeywordsCaseInsensitive() throws IOException { Reader reader = new StringReader("dogs cats Cats"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - KeywordMarkerFilterFactory factory = new KeywordMarkerFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); ResourceLoader loader = new StringMockResourceLoader("cats"); args.put("protected", "protwords.txt"); args.put("ignoreCase", "true"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + KeywordMarkerFilterFactory factory = new KeywordMarkerFilterFactory(args); factory.inform(loader); TokenStream ts = new PorterStemFilter(factory.create(tokenizer)); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemFilterFactory.java (working copy) @@ -35,7 +35,7 @@ public void testStemming() throws Exception { Reader reader = new StringReader("Brasília"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - BrazilianStemFilterFactory factory = new BrazilianStemFilterFactory(); + BrazilianStemFilterFactory factory = new BrazilianStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "brasil" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramFilters.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramFilters.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/ngram/TestNGramFilters.java (working copy) @@ -36,9 +36,7 @@ */ public void testNGramTokenizer() throws Exception { Reader reader = new StringReader("test"); - Map args = new HashMap(); - NGramTokenizerFactory factory = new NGramTokenizerFactory(); - factory.init(args); + NGramTokenizerFactory factory = new NGramTokenizerFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] { "t", "e", "s", "t", "te", "es", "st" }); @@ -48,11 +46,10 @@ */ public void testNGramTokenizer2() throws Exception { Reader reader = new StringReader("test"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("minGramSize", "2"); args.put("maxGramSize", "3"); - NGramTokenizerFactory factory = new NGramTokenizerFactory(); - factory.init(args); + NGramTokenizerFactory factory = new NGramTokenizerFactory(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] { "te", "es", "st", "tes", "est" }); @@ -62,9 +59,7 @@ */ public void testNGramFilter() throws Exception { Reader reader = new StringReader("test"); - Map args = new HashMap(); - NGramFilterFactory factory = new NGramFilterFactory(); - factory.init(args); + NGramFilterFactory factory = new NGramFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "t", "e", "s", "t", "te", "es", "st" }); @@ -74,11 +69,10 @@ */ public void testNGramFilter2() throws Exception { Reader reader = new StringReader("test"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("minGramSize", "2"); args.put("maxGramSize", "3"); - NGramFilterFactory factory = new NGramFilterFactory(); - factory.init(args); + NGramFilterFactory factory = new NGramFilterFactory(args); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "te", "es", "st", "tes", "est" }); @@ -88,9 +82,7 @@ */ public void testEdgeNGramTokenizer() throws Exception { Reader reader = new StringReader("test"); - Map args = new HashMap(); - EdgeNGramTokenizerFactory factory = new EdgeNGramTokenizerFactory(); - factory.init(args); + EdgeNGramTokenizerFactory factory = new EdgeNGramTokenizerFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] { "t" }); @@ -100,11 +92,10 @@ */ public void testEdgeNGramTokenizer2() throws Exception { Reader reader = new StringReader("test"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("minGramSize", "1"); args.put("maxGramSize", "2"); - EdgeNGramTokenizerFactory factory = new EdgeNGramTokenizerFactory(); - factory.init(args); + EdgeNGramTokenizerFactory factory = new EdgeNGramTokenizerFactory(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] { "t", "te" }); @@ -114,10 +105,9 @@ */ public void testEdgeNGramTokenizer3() throws Exception { Reader reader = new StringReader("ready"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("side", "back"); - EdgeNGramTokenizerFactory factory = new EdgeNGramTokenizerFactory(); - factory.init(args); + EdgeNGramTokenizerFactory factory = new EdgeNGramTokenizerFactory(args); Tokenizer stream = factory.create(reader); assertTokenStreamContents(stream, new String[] { "y" }); @@ -127,9 +117,7 @@ */ public void testEdgeNGramFilter() throws Exception { Reader reader = new StringReader("test"); - Map args = new HashMap(); - EdgeNGramFilterFactory factory = new EdgeNGramFilterFactory(); - factory.init(args); + EdgeNGramFilterFactory factory = new EdgeNGramFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "t" }); @@ -139,11 +127,10 @@ */ public void testEdgeNGramFilter2() throws Exception { Reader reader = new StringReader("test"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("minGramSize", "1"); args.put("maxGramSize", "2"); - EdgeNGramFilterFactory factory = new EdgeNGramFilterFactory(); - factory.init(args); + EdgeNGramFilterFactory factory = new EdgeNGramFilterFactory(args); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "t", "te" }); @@ -153,10 +140,9 @@ */ public void testEdgeNGramFilter3() throws Exception { Reader reader = new StringReader("ready"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("side", "back"); - EdgeNGramFilterFactory factory = new EdgeNGramFilterFactory(); - factory.init(args); + EdgeNGramFilterFactory factory = new EdgeNGramFilterFactory(args); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "y" }); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/fr/TestElisionFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/fr/TestElisionFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/fr/TestElisionFilterFactory.java (working copy) @@ -40,12 +40,10 @@ public void testElision() throws Exception { Reader reader = new StringReader("l'avion"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - ElisionFilterFactory factory = new ElisionFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); ResourceLoader loader = new ResourceAsStreamResourceLoader(getClass()); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("articles", "frenchArticles.txt"); - factory.init(args); + ElisionFilterFactory factory = new ElisionFilterFactory(args); factory.inform(loader); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "avion" }); @@ -57,10 +55,7 @@ public void testDefaultArticles() throws Exception { Reader reader = new StringReader("l'avion"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - ElisionFilterFactory factory = new ElisionFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + ElisionFilterFactory factory = new ElisionFilterFactory(TEST_VERSION_CURRENT_MAP()); ResourceLoader loader = new ResourceAsStreamResourceLoader(getClass()); factory.inform(loader); TokenStream stream = factory.create(tokenizer); @@ -73,13 +68,11 @@ public void testCaseInsensitive() throws Exception { Reader reader = new StringReader("L'avion"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - ElisionFilterFactory factory = new ElisionFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); ResourceLoader loader = new ResourceAsStreamResourceLoader(getClass()); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("articles", "frenchArticles.txt"); args.put("ignoreCase", "true"); - factory.init(args); + ElisionFilterFactory factory = new ElisionFilterFactory(args); factory.inform(loader); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "avion" }); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchMinimalStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchMinimalStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchMinimalStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestFrenchMinimalStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("chevaux"); - FrenchMinimalStemFilterFactory factory = new FrenchMinimalStemFilterFactory(); + FrenchMinimalStemFilterFactory factory = new FrenchMinimalStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "cheval" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchLightStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestFrenchLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("administrativement"); - FrenchLightStemFilterFactory factory = new FrenchLightStemFilterFactory(); + FrenchLightStemFilterFactory factory = new FrenchLightStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "administratif" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiWordFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiWordFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/th/TestThaiWordFilterFactory.java (working copy) @@ -19,8 +19,6 @@ import java.io.Reader; import java.io.StringReader; -import java.util.Collections; -import java.util.Map; import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; @@ -39,10 +37,7 @@ assumeTrue("JRE does not support Thai dictionary-based BreakIterator", ThaiWordFilter.DBBI_AVAILABLE); Reader reader = new StringReader("การที่ได้ต้องแสดงว่างานดี"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - ThaiWordFilterFactory factory = new ThaiWordFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + ThaiWordFilterFactory factory = new ThaiWordFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] {"การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี"}); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilterFactory.java (working copy) @@ -37,10 +37,7 @@ public void testReversing() throws Exception { Reader reader = new StringReader("simple test"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - ReverseStringFilterFactory factory = new ReverseStringFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + ReverseStringFilterFactory factory = new ReverseStringFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "elpmis", "tset" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestDelimitedPayloadTokenFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestDelimitedPayloadTokenFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/payloads/TestDelimitedPayloadTokenFilterFactory.java (working copy) @@ -34,10 +34,9 @@ public class TestDelimitedPayloadTokenFilterFactory extends BaseTokenStreamTestCase { public void testEncoder() throws Exception { - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put(DelimitedPayloadTokenFilterFactory.ENCODER_ATTR, "float"); - DelimitedPayloadTokenFilterFactory factory = new DelimitedPayloadTokenFilterFactory(); - factory.init(args); + DelimitedPayloadTokenFilterFactory factory = new DelimitedPayloadTokenFilterFactory(args); ResourceLoader loader = new StringMockResourceLoader("solr/collection1"); factory.inform(loader); @@ -56,11 +55,10 @@ } public void testDelim() throws Exception { - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put(DelimitedPayloadTokenFilterFactory.ENCODER_ATTR, FloatEncoder.class.getName()); args.put(DelimitedPayloadTokenFilterFactory.DELIMITER_ATTR, "*"); - DelimitedPayloadTokenFilterFactory factory = new DelimitedPayloadTokenFilterFactory(); - factory.init(args); + DelimitedPayloadTokenFilterFactory factory = new DelimitedPayloadTokenFilterFactory(args); ResourceLoader loader = new StringMockResourceLoader("solr/collection1"); factory.inform(loader); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianMinimalStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestNorwegianMinimalStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("eple eplet epler eplene eplets eplenes"); - NorwegianMinimalStemFilterFactory factory = new NorwegianMinimalStemFilterFactory(); + NorwegianMinimalStemFilterFactory factory = new NorwegianMinimalStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "epl", "epl", "epl", "epl", "epl", "epl" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/no/TestNorwegianLightStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestNorwegianLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("epler eple"); - NorwegianLightStemFilterFactory factory = new NorwegianLightStemFilterFactory(); + NorwegianLightStemFilterFactory factory = new NorwegianLightStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "epl", "epl" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/hu/TestHungarianLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/hu/TestHungarianLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/hu/TestHungarianLightStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestHungarianLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("házakat"); - HungarianLightStemFilterFactory factory = new HungarianLightStemFilterFactory(); + HungarianLightStemFilterFactory factory = new HungarianLightStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "haz" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizerFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizerFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternTokenizerFactory.java (working copy) @@ -30,10 +30,9 @@ final String INPUT = "Günther Günther is here"; // create PatternTokenizer - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put( PatternTokenizerFactory.PATTERN, "[,;/\\s]+" ); - PatternTokenizerFactory tokFactory = new PatternTokenizerFactory(); - tokFactory.init( args ); + PatternTokenizerFactory tokFactory = new PatternTokenizerFactory(args); TokenStream stream = tokFactory.create( new StringReader(INPUT) ); assertTokenStreamContents(stream, new String[] { "Günther", "Günther", "is", "here" }); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceCharFilterFactory.java (working copy) @@ -34,11 +34,10 @@ // this is test. public void testNothingChange() throws IOException { final String BLOCK = "this is test."; - PatternReplaceCharFilterFactory factory = new PatternReplaceCharFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("pattern", "(aa)\\s+(bb)\\s+(cc)"); args.put("replacement", "$1$2$3"); - factory.init(args); + PatternReplaceCharFilterFactory factory = new PatternReplaceCharFilterFactory(args); CharFilter cs = factory.create( new StringReader( BLOCK ) ); TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false); @@ -52,10 +51,9 @@ // aa bb cc public void testReplaceByEmpty() throws IOException { final String BLOCK = "aa bb cc"; - PatternReplaceCharFilterFactory factory = new PatternReplaceCharFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("pattern", "(aa)\\s+(bb)\\s+(cc)"); - factory.init(args); + PatternReplaceCharFilterFactory factory = new PatternReplaceCharFilterFactory(args); CharFilter cs = factory.create( new StringReader( BLOCK ) ); TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false); @@ -70,11 +68,10 @@ // aa#bb#cc public void test1block1matchSameLength() throws IOException { final String BLOCK = "aa bb cc"; - PatternReplaceCharFilterFactory factory = new PatternReplaceCharFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("pattern", "(aa)\\s+(bb)\\s+(cc)"); args.put("replacement", "$1#$2#$3"); - factory.init(args); + PatternReplaceCharFilterFactory factory = new PatternReplaceCharFilterFactory(args); CharFilter cs = factory.create( new StringReader( BLOCK ) ); TokenStream ts = new MockTokenizer(cs, MockTokenizer.WHITESPACE, false); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/pattern/TestPatternReplaceFilterFactory.java (working copy) @@ -32,11 +32,10 @@ public void testReplaceAll() throws Exception { String input = "aabfooaabfooabfoob ab caaaaaaaaab"; - PatternReplaceFilterFactory factory = new PatternReplaceFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("pattern", "a*b"); args.put("replacement", "-"); - factory.init(args); + PatternReplaceFilterFactory factory = new PatternReplaceFilterFactory(args); TokenStream ts = factory.create (new MockTokenizer(new StringReader(input), MockTokenizer.WHITESPACE, false)); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/snowball/TestSnowballPorterFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/snowball/TestSnowballPorterFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/snowball/TestSnowballPorterFilterFactory.java (working copy) @@ -44,12 +44,9 @@ gold[i] = stemmer.getCurrent(); } - SnowballPorterFilterFactory factory = new SnowballPorterFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("language", "English"); - - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + SnowballPorterFilterFactory factory = new SnowballPorterFilterFactory(args); factory.inform(new StringMockResourceLoader("")); Tokenizer tokenizer = new MockTokenizer( new StringReader(join(test, ' ')), MockTokenizer.WHITESPACE, false); @@ -79,7 +76,7 @@ return lines; } - public T newInstance(String cname, Class expectedType, String... subpackages) { + public T newInstance(String cname, Class expectedType, Class[] argTypes, Object[] args, String... subpackages) { return null; } @@ -92,13 +89,11 @@ * Test the protected words mechanism of SnowballPorterFilterFactory */ public void testProtected() throws Exception { - SnowballPorterFilterFactory factory = new SnowballPorterFilterFactory(); ResourceLoader loader = new StringMockResourceLoader("ridding"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("protected", "protwords.txt"); args.put("language", "English"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + SnowballPorterFilterFactory factory = new SnowballPorterFilterFactory(args); factory.inform(loader); Reader reader = new StringReader("ridding of some stemming"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestLatvianStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("tirgiem tirgus"); - LatvianStemFilterFactory factory = new LatvianStemFilterFactory(); + LatvianStemFilterFactory factory = new LatvianStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "tirg", "tirg" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestPortugueseMinimalStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("questões"); - PortugueseMinimalStemFilterFactory factory = new PortugueseMinimalStemFilterFactory(); + PortugueseMinimalStemFilterFactory factory = new PortugueseMinimalStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "questão" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestPortugueseLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("evidentemente"); - PortugueseLightStemFilterFactory factory = new PortugueseLightStemFilterFactory(); + PortugueseLightStemFilterFactory factory = new PortugueseLightStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "evident" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestPortugueseStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("maluquice"); - PortugueseStemFilterFactory factory = new PortugueseStemFilterFactory(); + PortugueseStemFilterFactory factory = new PortugueseStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "maluc" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilterFactory.java (working copy) @@ -19,6 +19,7 @@ import java.io.Reader; import java.io.StringReader; +import java.util.HashMap; import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; @@ -35,7 +36,7 @@ public void testCasing() throws Exception { Reader reader = new StringReader("AĞACI"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - TurkishLowerCaseFilterFactory factory = new TurkishLowerCaseFilterFactory(); + TurkishLowerCaseFilterFactory factory = new TurkishLowerCaseFilterFactory(new HashMap()); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "ağacı" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestRussianLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("журналы"); - RussianLightStemFilterFactory factory = new RussianLightStemFilterFactory(); + RussianLightStemFilterFactory factory = new RussianLightStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "журнал" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/ga/TestIrishLowerCaseFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/ga/TestIrishLowerCaseFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/ga/TestIrishLowerCaseFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestIrishLowerCaseFilterFactory extends BaseTokenStreamTestCase { public void testCasing() throws Exception { Reader reader = new StringReader("nAthair tUISCE hARD"); - IrishLowerCaseFilterFactory factory = new IrishLowerCaseFilterFactory(); + IrishLowerCaseFilterFactory factory = new IrishLowerCaseFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "n-athair", "t-uisce", "hard" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestShingleFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestShingleFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/shingle/TestShingleFilterFactory.java (working copy) @@ -35,9 +35,7 @@ */ public void testDefaults() throws Exception { Reader reader = new StringReader("this is a test"); - Map args = new HashMap(); - ShingleFilterFactory factory = new ShingleFilterFactory(); - factory.init(args); + ShingleFilterFactory factory = new ShingleFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] {"this", "this is", "is", "is a", "a", "a test", "test"}); @@ -48,10 +46,9 @@ */ public void testNoUnigrams() throws Exception { Reader reader = new StringReader("this is a test"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("outputUnigrams", "false"); - ShingleFilterFactory factory = new ShingleFilterFactory(); - factory.init(args); + ShingleFilterFactory factory = new ShingleFilterFactory(args); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] {"this is", "is a", "a test"}); @@ -62,10 +59,9 @@ */ public void testMaxShingleSize() throws Exception { Reader reader = new StringReader("this is a test"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("maxShingleSize", "3"); - ShingleFilterFactory factory = new ShingleFilterFactory(); - factory.init(args); + ShingleFilterFactory factory = new ShingleFilterFactory(args); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] {"this", "this is", "this is a", "is", @@ -77,11 +73,10 @@ */ public void testMinShingleSize() throws Exception { Reader reader = new StringReader("this is a test"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("minShingleSize", "3"); args.put("maxShingleSize", "4"); - ShingleFilterFactory factory = new ShingleFilterFactory(); - factory.init(args); + ShingleFilterFactory factory = new ShingleFilterFactory(args); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "this", "this is a", "this is a test", @@ -93,12 +88,11 @@ */ public void testMinShingleSizeNoUnigrams() throws Exception { Reader reader = new StringReader("this is a test"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("minShingleSize", "3"); args.put("maxShingleSize", "4"); args.put("outputUnigrams", "false"); - ShingleFilterFactory factory = new ShingleFilterFactory(); - factory.init(args); + ShingleFilterFactory factory = new ShingleFilterFactory(args); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "this is a", "this is a test", "is a test" }); @@ -109,11 +103,10 @@ */ public void testEqualMinAndMaxShingleSize() throws Exception { Reader reader = new StringReader("this is a test"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("minShingleSize", "3"); args.put("maxShingleSize", "3"); - ShingleFilterFactory factory = new ShingleFilterFactory(); - factory.init(args); + ShingleFilterFactory factory = new ShingleFilterFactory(args); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "this", "this is a", "is", "is a test", "a", "test" }); @@ -124,12 +117,11 @@ */ public void testEqualMinAndMaxShingleSizeNoUnigrams() throws Exception { Reader reader = new StringReader("this is a test"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("minShingleSize", "3"); args.put("maxShingleSize", "3"); args.put("outputUnigrams", "false"); - ShingleFilterFactory factory = new ShingleFilterFactory(); - factory.init(args); + ShingleFilterFactory factory = new ShingleFilterFactory(args); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "this is a", "is a test" }); @@ -140,10 +132,9 @@ */ public void testTokenSeparator() throws Exception { Reader reader = new StringReader("this is a test"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("tokenSeparator", "=BLAH="); - ShingleFilterFactory factory = new ShingleFilterFactory(); - factory.init(args); + ShingleFilterFactory factory = new ShingleFilterFactory(args); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "this", "this=BLAH=is", "is", "is=BLAH=a", @@ -155,11 +146,10 @@ */ public void testTokenSeparatorNoUnigrams() throws Exception { Reader reader = new StringReader("this is a test"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("tokenSeparator", "=BLAH="); args.put("outputUnigrams", "false"); - ShingleFilterFactory factory = new ShingleFilterFactory(); - factory.init(args); + ShingleFilterFactory factory = new ShingleFilterFactory(args); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "this=BLAH=is", "is=BLAH=a", "a=BLAH=test" }); @@ -170,10 +160,9 @@ */ public void testEmptyTokenSeparator() throws Exception { Reader reader = new StringReader("this is a test"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("tokenSeparator", ""); - ShingleFilterFactory factory = new ShingleFilterFactory(); - factory.init(args); + ShingleFilterFactory factory = new ShingleFilterFactory(args); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "this", "thisis", "is", "isa", "a", "atest", "test" }); @@ -185,12 +174,11 @@ */ public void testMinShingleSizeAndTokenSeparator() throws Exception { Reader reader = new StringReader("this is a test"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("minShingleSize", "3"); args.put("maxShingleSize", "4"); args.put("tokenSeparator", "=BLAH="); - ShingleFilterFactory factory = new ShingleFilterFactory(); - factory.init(args); + ShingleFilterFactory factory = new ShingleFilterFactory(args); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "this", "this=BLAH=is=BLAH=a", @@ -205,13 +193,12 @@ */ public void testMinShingleSizeAndTokenSeparatorNoUnigrams() throws Exception { Reader reader = new StringReader("this is a test"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("minShingleSize", "3"); args.put("maxShingleSize", "4"); args.put("tokenSeparator", "=BLAH="); args.put("outputUnigrams", "false"); - ShingleFilterFactory factory = new ShingleFilterFactory(); - factory.init(args); + ShingleFilterFactory factory = new ShingleFilterFactory(args); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "this=BLAH=is=BLAH=a", "this=BLAH=is=BLAH=a=BLAH=test", @@ -228,11 +215,10 @@ */ public void testOutputUnigramsIfNoShingles() throws Exception { Reader reader = new StringReader("test"); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("outputUnigrams", "false"); args.put("outputUnigramsIfNoShingles", "true"); - ShingleFilterFactory factory = new ShingleFilterFactory(); - factory.init(args); + ShingleFilterFactory factory = new ShingleFilterFactory(args); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "test" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestHunspellStemFilterFactory.java (working copy) @@ -32,12 +32,10 @@ */ public class TestHunspellStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { - HunspellStemFilterFactory factory = new HunspellStemFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("dictionary", "test.dic"); args.put("affix", "test.aff"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + HunspellStemFilterFactory factory = new HunspellStemFilterFactory(args); factory.inform(new ResourceAsStreamResourceLoader(getClass())); Reader reader = new StringReader("abc"); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/id/TestIndonesianStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/id/TestIndonesianStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/id/TestIndonesianStemFilterFactory.java (working copy) @@ -37,9 +37,7 @@ public void testStemming() throws Exception { Reader reader = new StringReader("dibukukannya"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - IndonesianStemFilterFactory factory = new IndonesianStemFilterFactory(); - Map args = new HashMap(); - factory.init(args); + IndonesianStemFilterFactory factory = new IndonesianStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "buku" }); } @@ -50,10 +48,9 @@ public void testStemmingInflectional() throws Exception { Reader reader = new StringReader("dibukukannya"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - IndonesianStemFilterFactory factory = new IndonesianStemFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("stemDerivational", "false"); - factory.init(args); + IndonesianStemFilterFactory factory = new IndonesianStemFilterFactory(args); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "dibukukan" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekLowerCaseFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekLowerCaseFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekLowerCaseFilterFactory.java (working copy) @@ -37,10 +37,7 @@ public void testNormalization() throws Exception { Reader reader = new StringReader("Μάϊος ΜΆΪΟΣ"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - GreekLowerCaseFilterFactory factory = new GreekLowerCaseFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + GreekLowerCaseFilterFactory factory = new GreekLowerCaseFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "μαιοσ", "μαιοσ" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/el/TestGreekStemFilterFactory.java (working copy) @@ -34,7 +34,7 @@ Reader reader = new StringReader("άνθρωπος"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); TokenStream normalized = new GreekLowerCaseFilter(TEST_VERSION_CURRENT, tokenizer); - GreekStemFilterFactory factory = new GreekStemFilterFactory(); + GreekStemFilterFactory factory = new GreekStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(normalized); assertTokenStreamContents(stream, new String[] { "ανθρωπ" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianMinimalStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianMinimalStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianMinimalStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestGalicianMinimalStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("elefantes"); - GalicianMinimalStemFilterFactory factory = new GalicianMinimalStemFilterFactory(); + GalicianMinimalStemFilterFactory factory = new GalicianMinimalStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "elefante" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestGalicianStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("cariñosa"); - GalicianStemFilterFactory factory = new GalicianStemFilterFactory(); + GalicianStemFilterFactory factory = new GalicianStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "cariñ" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicFilters.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicFilters.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/ar/TestArabicFilters.java (working copy) @@ -38,13 +38,8 @@ */ public void testNormalizer() throws Exception { Reader reader = new StringReader("الذين مَلكت أيمانكم"); - StandardTokenizerFactory factory = new StandardTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - ArabicNormalizationFilterFactory filterFactory = new ArabicNormalizationFilterFactory(); - filterFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); - filterFactory.init(args); + StandardTokenizerFactory factory = new StandardTokenizerFactory(TEST_VERSION_CURRENT_MAP()); + ArabicNormalizationFilterFactory filterFactory = new ArabicNormalizationFilterFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer tokenizer = factory.create(reader); TokenStream stream = filterFactory.create(tokenizer); assertTokenStreamContents(stream, new String[] {"الذين", "ملكت", "ايمانكم"}); @@ -55,14 +50,9 @@ */ public void testStemmer() throws Exception { Reader reader = new StringReader("الذين مَلكت أيمانكم"); - StandardTokenizerFactory factory = new StandardTokenizerFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - ArabicNormalizationFilterFactory normFactory = new ArabicNormalizationFilterFactory(); - normFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - ArabicStemFilterFactory stemFactory = new ArabicStemFilterFactory(); - Map args = Collections.emptyMap(); - factory.init(args); - normFactory.init(args); + StandardTokenizerFactory factory = new StandardTokenizerFactory(TEST_VERSION_CURRENT_MAP()); + ArabicNormalizationFilterFactory normFactory = new ArabicNormalizationFilterFactory(TEST_VERSION_CURRENT_MAP()); + ArabicStemFilterFactory stemFactory = new ArabicStemFilterFactory(TEST_VERSION_CURRENT_MAP()); Tokenizer tokenizer = factory.create(reader); TokenStream stream = normFactory.create(tokenizer); stream = stemFactory.create(stream); @@ -74,11 +64,8 @@ */ public void testPersianCharFilter() throws Exception { Reader reader = new StringReader("می‌خورد"); - PersianCharFilterFactory charfilterFactory = new PersianCharFilterFactory(); - StandardTokenizerFactory tokenizerFactory = new StandardTokenizerFactory(); - tokenizerFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - tokenizerFactory.init(args); + PersianCharFilterFactory charfilterFactory = new PersianCharFilterFactory(TEST_VERSION_CURRENT_MAP()); + StandardTokenizerFactory tokenizerFactory = new StandardTokenizerFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = tokenizerFactory.create(charfilterFactory.create(reader)); assertTokenStreamContents(stream, new String[] { "می", "خورد" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestKStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("bricks"); - KStemFilterFactory factory = new KStemFilterFactory(); + KStemFilterFactory factory = new KStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "brick" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/en/TestPorterStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/en/TestPorterStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/en/TestPorterStemFilterFactory.java (working copy) @@ -35,7 +35,7 @@ public void testStemming() throws Exception { Reader reader = new StringReader("dogs"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - PorterStemFilterFactory factory = new PorterStemFilterFactory(); + PorterStemFilterFactory factory = new PorterStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "dog" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishMinimalStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishMinimalStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishMinimalStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestEnglishMinimalStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("bricks"); - EnglishMinimalStemFilterFactory factory = new EnglishMinimalStemFilterFactory(); + EnglishMinimalStemFilterFactory factory = new EnglishMinimalStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "brick" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizerFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizerFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/wikipedia/TestWikipediaTokenizerFactory.java (working copy) @@ -20,6 +20,7 @@ import java.io.IOException; import java.io.Reader; import java.io.StringReader; +import java.util.HashMap; import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.Tokenizer; @@ -31,7 +32,7 @@ public class TestWikipediaTokenizerFactory extends BaseTokenStreamTestCase { public void testTokenizer() throws IOException { Reader reader = new StringReader("This is a [[Category:foo]]"); - WikipediaTokenizerFactory factory = new WikipediaTokenizerFactory(); + WikipediaTokenizerFactory factory = new WikipediaTokenizerFactory(new HashMap()); Tokenizer tokenizer = factory.create(reader); assertTokenStreamContents(tokenizer, new String[] { "This", "is", "a", "foo" }, Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKWidthFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKWidthFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKWidthFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestCJKWidthFilterFactory extends BaseTokenStreamTestCase { public void test() throws Exception { Reader reader = new StringReader("Test 1234"); - CJKWidthFilterFactory factory = new CJKWidthFilterFactory(); + CJKWidthFilterFactory factory = new CJKWidthFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "Test", "1234" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKBigramFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKBigramFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/cjk/TestCJKBigramFilterFactory.java (working copy) @@ -33,10 +33,7 @@ public class TestCJKBigramFilterFactory extends BaseTokenStreamTestCase { public void testDefaults() throws Exception { Reader reader = new StringReader("多くの学生が試験に落ちた。"); - CJKBigramFilterFactory factory = new CJKBigramFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + CJKBigramFilterFactory factory = new CJKBigramFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new StandardTokenizer(TEST_VERSION_CURRENT, reader)); assertTokenStreamContents(stream, new String[] { "多く", "くの", "の学", "学生", "生が", "が試", "試験", "験に", "に落", "落ち", "ちた" }); @@ -44,10 +41,9 @@ public void testHanOnly() throws Exception { Reader reader = new StringReader("多くの学生が試験に落ちた。"); - CJKBigramFilterFactory factory = new CJKBigramFilterFactory(); Map args = new HashMap(); args.put("hiragana", "false"); - factory.init(args); + CJKBigramFilterFactory factory = new CJKBigramFilterFactory(args); TokenStream stream = factory.create(new StandardTokenizer(TEST_VERSION_CURRENT, reader)); assertTokenStreamContents(stream, new String[] { "多", "く", "の", "学生", "が", "試験", "に", "落", "ち", "た" }); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishLightStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestSpanishLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("sociedades"); - SpanishLightStemFilterFactory factory = new SpanishLightStemFilterFactory(); + SpanishLightStemFilterFactory factory = new SpanishLightStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "sociedad" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianLightStemFilterFactory.java (working copy) @@ -30,7 +30,7 @@ public class TestItalianLightStemFilterFactory extends BaseTokenStreamTestCase { public void testStemming() throws Exception { Reader reader = new StringReader("ragazzo ragazzi"); - ItalianLightStemFilterFactory factory = new ItalianLightStemFilterFactory(); + ItalianLightStemFilterFactory factory = new ItalianLightStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(stream, new String[] { "ragazz", "ragazz" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/cz/TestCzechStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/cz/TestCzechStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/cz/TestCzechStemFilterFactory.java (working copy) @@ -35,7 +35,7 @@ public void testStemming() throws Exception { Reader reader = new StringReader("angličtí"); Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); - CzechStemFilterFactory factory = new CzechStemFilterFactory(); + CzechStemFilterFactory factory = new CzechStemFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, new String[] { "anglick" }); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymFilterFactory.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymFilterFactory.java (working copy) @@ -18,7 +18,6 @@ */ import java.io.StringReader; -import java.util.HashMap; import java.util.Map; import org.apache.lucene.analysis.BaseTokenStreamTestCase; @@ -31,11 +30,9 @@ public class TestSynonymFilterFactory extends BaseTokenStreamTestCase { /** test that we can parse and use the solr syn file */ public void testSynonyms() throws Exception { - SynonymFilterFactory factory = new SynonymFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("synonyms", "synonyms.txt"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + SynonymFilterFactory factory = new SynonymFilterFactory(args); factory.inform(new ResourceAsStreamResourceLoader(getClass())); TokenStream ts = factory.create(new MockTokenizer(new StringReader("GB"), MockTokenizer.WHITESPACE, false)); assertTrue(ts instanceof SynonymFilter); @@ -46,11 +43,9 @@ /** if the synonyms are completely empty, test that we still analyze correctly */ public void testEmptySynonyms() throws Exception { - SynonymFilterFactory factory = new SynonymFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("synonyms", "synonyms.txt"); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(args); + SynonymFilterFactory factory = new SynonymFilterFactory(args); factory.inform(new StringMockResourceLoader("")); // empty file! TokenStream ts = factory.create(new MockTokenizer(new StringReader("GB"), MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(ts, new String[] { "GB" }); Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/util/StringMockResourceLoader.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/util/StringMockResourceLoader.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/util/StringMockResourceLoader.java (working copy) @@ -20,6 +20,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; +import java.lang.reflect.Constructor; import java.util.Arrays; import java.util.List; @@ -36,10 +37,11 @@ } // TODO: do this subpackages thing... wtf is that? - public T newInstance(String cname, Class expectedType, String... subpackages) { + public T newInstance(String cname, Class expectedType, Class[] argTypes, Object[] args, String... subpackages) { try { Class clazz = Class.forName(cname).asSubclass(expectedType); - return clazz.newInstance(); + Constructor constructor = clazz.getConstructor(argTypes); + return constructor.newInstance(args); } catch (Exception e) { throw new RuntimeException(e); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/util/ResourceAsStreamResourceLoader.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/util/ResourceAsStreamResourceLoader.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/util/ResourceAsStreamResourceLoader.java (working copy) @@ -21,6 +21,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; +import java.lang.reflect.Constructor; import java.nio.charset.CharacterCodingException; import java.nio.charset.CodingErrorAction; import java.util.ArrayList; @@ -73,10 +74,11 @@ // TODO: do this subpackages thing... wtf is that? @Override - public T newInstance(String cname, Class expectedType, String... subpackages) { + public T newInstance(String cname, Class expectedType, Class[] argTypes, Object[] args, String... subpackages) { try { Class clazz = Class.forName(cname).asSubclass(expectedType); - return clazz.newInstance(); + Constructor constructor = clazz.getConstructor(argTypes); + return constructor.newInstance(args); } catch (Exception e) { throw new RuntimeException(e); } Index: lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestAnalysisSPILoader.java =================================================================== --- lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestAnalysisSPILoader.java (revision 1365868) +++ lucene/analysis/common/src/test/org/apache/lucene/analysis/util/TestAnalysisSPILoader.java (working copy) @@ -17,30 +17,31 @@ * limitations under the License. */ +import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.charfilter.HTMLStripCharFilterFactory; import org.apache.lucene.analysis.core.LowerCaseFilterFactory; import org.apache.lucene.analysis.core.WhitespaceTokenizerFactory; import org.apache.lucene.analysis.miscellaneous.RemoveDuplicatesTokenFilterFactory; import org.apache.lucene.util.LuceneTestCase; -public class TestAnalysisSPILoader extends LuceneTestCase { +public class TestAnalysisSPILoader extends BaseTokenStreamTestCase { public void testLookupTokenizer() { - assertSame(WhitespaceTokenizerFactory.class, TokenizerFactory.forName("Whitespace").getClass()); - assertSame(WhitespaceTokenizerFactory.class, TokenizerFactory.forName("WHITESPACE").getClass()); - assertSame(WhitespaceTokenizerFactory.class, TokenizerFactory.forName("whitespace").getClass()); + assertSame(WhitespaceTokenizerFactory.class, TokenizerFactory.forName("Whitespace", TEST_VERSION_CURRENT_MAP()).getClass()); + assertSame(WhitespaceTokenizerFactory.class, TokenizerFactory.forName("WHITESPACE", TEST_VERSION_CURRENT_MAP()).getClass()); + assertSame(WhitespaceTokenizerFactory.class, TokenizerFactory.forName("whitespace", TEST_VERSION_CURRENT_MAP()).getClass()); } public void testBogusLookupTokenizer() { try { - TokenizerFactory.forName("sdfsdfsdfdsfsdfsdf"); + TokenizerFactory.forName("sdfsdfsdfdsfsdfsdf", TEST_VERSION_CURRENT_MAP()); fail(); } catch (IllegalArgumentException expected) { // } try { - TokenizerFactory.forName("!(**#$U*#$*"); + TokenizerFactory.forName("!(**#$U*#$*", TEST_VERSION_CURRENT_MAP()); fail(); } catch (IllegalArgumentException expected) { // @@ -74,25 +75,25 @@ } public void testLookupTokenFilter() { - assertSame(LowerCaseFilterFactory.class, TokenFilterFactory.forName("Lowercase").getClass()); - assertSame(LowerCaseFilterFactory.class, TokenFilterFactory.forName("LOWERCASE").getClass()); - assertSame(LowerCaseFilterFactory.class, TokenFilterFactory.forName("lowercase").getClass()); + assertSame(LowerCaseFilterFactory.class, TokenFilterFactory.forName("Lowercase", TEST_VERSION_CURRENT_MAP()).getClass()); + assertSame(LowerCaseFilterFactory.class, TokenFilterFactory.forName("LOWERCASE", TEST_VERSION_CURRENT_MAP()).getClass()); + assertSame(LowerCaseFilterFactory.class, TokenFilterFactory.forName("lowercase", TEST_VERSION_CURRENT_MAP()).getClass()); - assertSame(RemoveDuplicatesTokenFilterFactory.class, TokenFilterFactory.forName("RemoveDuplicates").getClass()); - assertSame(RemoveDuplicatesTokenFilterFactory.class, TokenFilterFactory.forName("REMOVEDUPLICATES").getClass()); - assertSame(RemoveDuplicatesTokenFilterFactory.class, TokenFilterFactory.forName("removeduplicates").getClass()); + assertSame(RemoveDuplicatesTokenFilterFactory.class, TokenFilterFactory.forName("RemoveDuplicates", TEST_VERSION_CURRENT_MAP()).getClass()); + assertSame(RemoveDuplicatesTokenFilterFactory.class, TokenFilterFactory.forName("REMOVEDUPLICATES", TEST_VERSION_CURRENT_MAP()).getClass()); + assertSame(RemoveDuplicatesTokenFilterFactory.class, TokenFilterFactory.forName("removeduplicates", TEST_VERSION_CURRENT_MAP()).getClass()); } public void testBogusLookupTokenFilter() { try { - TokenFilterFactory.forName("sdfsdfsdfdsfsdfsdf"); + TokenFilterFactory.forName("sdfsdfsdfdsfsdfsdf", TEST_VERSION_CURRENT_MAP()); fail(); } catch (IllegalArgumentException expected) { // } try { - TokenFilterFactory.forName("!(**#$U*#$*"); + TokenFilterFactory.forName("!(**#$U*#$*", TEST_VERSION_CURRENT_MAP()); fail(); } catch (IllegalArgumentException expected) { // @@ -131,21 +132,21 @@ } public void testLookupCharFilter() { - assertSame(HTMLStripCharFilterFactory.class, CharFilterFactory.forName("HTMLStrip").getClass()); - assertSame(HTMLStripCharFilterFactory.class, CharFilterFactory.forName("HTMLSTRIP").getClass()); - assertSame(HTMLStripCharFilterFactory.class, CharFilterFactory.forName("htmlstrip").getClass()); + assertSame(HTMLStripCharFilterFactory.class, CharFilterFactory.forName("HTMLStrip", TEST_VERSION_CURRENT_MAP()).getClass()); + assertSame(HTMLStripCharFilterFactory.class, CharFilterFactory.forName("HTMLSTRIP", TEST_VERSION_CURRENT_MAP()).getClass()); + assertSame(HTMLStripCharFilterFactory.class, CharFilterFactory.forName("htmlstrip", TEST_VERSION_CURRENT_MAP()).getClass()); } public void testBogusLookupCharFilter() { try { - CharFilterFactory.forName("sdfsdfsdfdsfsdfsdf"); + CharFilterFactory.forName("sdfsdfsdfdsfsdfsdf", TEST_VERSION_CURRENT_MAP()); fail(); } catch (IllegalArgumentException expected) { // } try { - CharFilterFactory.forName("!(**#$U*#$*"); + CharFilterFactory.forName("!(**#$U*#$*", TEST_VERSION_CURRENT_MAP()); fail(); } catch (IllegalArgumentException expected) { // Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsFilterFactory.java (working copy) @@ -18,6 +18,7 @@ */ import java.io.IOException; +import java.util.Map; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.commongrams.CommonGramsFilter; @@ -42,6 +43,10 @@ public class CommonGramsFilterFactory extends TokenFilterFactory implements ResourceLoaderAware { + public CommonGramsFilterFactory(Map args) { + super(args); + } + public void inform(ResourceLoader loader) throws IOException { String commonWordFiles = args.get("words"); ignoreCase = getBoolean("ignoreCase", false); @@ -76,4 +81,4 @@ } - \ No newline at end of file + Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsQueryFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsQueryFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/commongrams/CommonGramsQueryFilterFactory.java (working copy) @@ -44,9 +44,8 @@ public class CommonGramsQueryFilterFactory extends TokenFilterFactory implements ResourceLoaderAware { - @Override - public void init(Map args) { - super.init(args); + public CommonGramsQueryFilterFactory(Map args) { + super(args); assureMatchVersion(); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/HTMLStripCharFilterFactory.java (working copy) @@ -53,9 +53,8 @@ return charFilter; } - @Override - public void init(Map args) { - super.init(args); + public HTMLStripCharFilterFactory(Map args) { + super(args); String escapedTagsArg = args.get("escapedTags"); if (null != escapedTagsArg) { Matcher matcher = TAG_NAME_PATTERN.matcher(escapedTagsArg); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/MappingCharFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/MappingCharFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/charfilter/MappingCharFilterFactory.java (working copy) @@ -22,6 +22,7 @@ import java.io.Reader; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -49,6 +50,10 @@ protected NormalizeCharMap normMap; private String mapping; + public MappingCharFilterFactory(Map args) { + super(args); + } + // TODO: this should use inputstreams from the loader, not File! public void inform(ResourceLoader loader) throws IOException { mapping = args.get("mapping"); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilterFactory.java (working copy) @@ -37,14 +37,14 @@ */ public class DictionaryCompoundWordTokenFilterFactory extends TokenFilterFactory implements ResourceLoaderAware { private CharArraySet dictionary; - private String dictFile; - private int minWordSize; - private int minSubwordSize; - private int maxSubwordSize; - private boolean onlyLongestMatch; - @Override - public void init(Map args) { - super.init(args); + private final String dictFile; + private final int minWordSize; + private final int minSubwordSize; + private final int maxSubwordSize; + private final boolean onlyLongestMatch; + + public DictionaryCompoundWordTokenFilterFactory(Map args) { + super(args); assureMatchVersion(); dictFile = args.get("dictionary"); if (null == dictFile) { Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java (working copy) @@ -59,21 +59,22 @@ public class HyphenationCompoundWordTokenFilterFactory extends TokenFilterFactory implements ResourceLoaderAware { private CharArraySet dictionary; private HyphenationTree hyphenator; - private String dictFile; - private String hypFile; - private String encoding; - private int minWordSize; - private int minSubwordSize; - private int maxSubwordSize; - private boolean onlyLongestMatch; + private final String dictFile; + private final String hypFile; + private final String encoding; + private final int minWordSize; + private final int minSubwordSize; + private final int maxSubwordSize; + private final boolean onlyLongestMatch; - @Override - public void init(Map args) { - super.init(args); + public HyphenationCompoundWordTokenFilterFactory(Map args) { + super(args); assureMatchVersion(); dictFile = args.get("dictionary"); if (args.containsKey("encoding")) encoding = args.get("encoding"); + else + encoding = null; hypFile = args.get("hyphenator"); if (null == hypFile) { throw new IllegalArgumentException("Missing required parameter: hyphenator"); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianNormalizationFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianNormalizationFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianNormalizationFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.fa.PersianNormalizationFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.util.AbstractAnalysisFactory; @@ -36,6 +38,11 @@ * */ public class PersianNormalizationFilterFactory extends TokenFilterFactory implements MultiTermAwareComponent { + + public PersianNormalizationFilterFactory(Map args) { + super(args); + } + public PersianNormalizationFilter create(TokenStream input) { return new PersianNormalizationFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianCharFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianCharFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianCharFilterFactory.java (working copy) @@ -18,6 +18,7 @@ */ import java.io.Reader; +import java.util.Map; import org.apache.lucene.analysis.CharFilter; import org.apache.lucene.analysis.fa.PersianCharFilter; @@ -38,6 +39,10 @@ */ public class PersianCharFilterFactory extends CharFilterFactory implements MultiTermAwareComponent { + public PersianCharFilterFactory(Map args) { + super(args); + } + @Override public CharFilter create(Reader input) { return new PersianCharFilter(input); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.bg.BulgarianStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class BulgarianStemFilterFactory extends TokenFilterFactory { + + public BulgarianStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new BulgarianStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizerFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizerFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseTokenizerFactory.java (working copy) @@ -36,9 +36,9 @@ * */ public class LowerCaseTokenizerFactory extends TokenizerFactory implements MultiTermAwareComponent { - @Override - public void init(Map args) { - super.init(args); + + public LowerCaseTokenizerFactory(Map args) { + super(args); assureMatchVersion(); } @@ -48,9 +48,6 @@ @Override public AbstractAnalysisFactory getMultiTermComponent() { - LowerCaseFilterFactory filt = new LowerCaseFilterFactory(); - filt.setLuceneMatchVersion(luceneMatchVersion); - filt.init(args); - return filt; + return new LowerCaseFilterFactory(args); } } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizerFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizerFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordTokenizerFactory.java (working copy) @@ -21,6 +21,7 @@ import org.apache.lucene.analysis.util.TokenizerFactory; import java.io.Reader; +import java.util.Map; /** * Factory for {@link KeywordTokenizer}. @@ -33,6 +34,11 @@ * */ public class KeywordTokenizerFactory extends TokenizerFactory { + + public KeywordTokenizerFactory(Map args) { + super(args); + } + public KeywordTokenizer create(Reader input) { return new KeywordTokenizer(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizerFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizerFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceTokenizerFactory.java (working copy) @@ -34,9 +34,9 @@ * */ public class WhitespaceTokenizerFactory extends TokenizerFactory { - @Override - public void init(Map args) { - super.init(args); + + public WhitespaceTokenizerFactory(Map args) { + super(args); assureMatchVersion(); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/core/TypeTokenFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/core/TypeTokenFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/core/TypeTokenFilterFactory.java (working copy) @@ -26,6 +26,7 @@ import java.io.IOException; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; /** @@ -41,6 +42,10 @@ */ public class TypeTokenFilterFactory extends TokenFilterFactory implements ResourceLoaderAware { + public TypeTokenFilterFactory(Map args) { + super(args); + } + @Override public void inform(ResourceLoader loader) throws IOException { String stopTypesFiles = args.get("types"); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LowerCaseFilterFactory.java (working copy) @@ -37,9 +37,9 @@ * */ public class LowerCaseFilterFactory extends TokenFilterFactory implements MultiTermAwareComponent { - @Override - public void init(Map args) { - super.init(args); + + public LowerCaseFilterFactory(Map args) { + super(args); assureMatchVersion(); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizerFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizerFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/core/LetterTokenizerFactory.java (working copy) @@ -35,9 +35,8 @@ */ public class LetterTokenizerFactory extends TokenizerFactory { - @Override - public void init(Map args) { - super.init(args); + public LetterTokenizerFactory(Map args) { + super(args); assureMatchVersion(); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/core/StopFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/core/StopFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/core/StopFilterFactory.java (working copy) @@ -39,9 +39,8 @@ */ public class StopFilterFactory extends TokenFilterFactory implements ResourceLoaderAware { - @Override - public void init(Map args) { - super.init(args); + public StopFilterFactory(Map args) { + super(args); assureMatchVersion(); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.de.GermanStemFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class GermanStemFilterFactory extends TokenFilterFactory { + + public GermanStemFilterFactory(Map args) { + super(args); + } + public GermanStemFilter create(TokenStream in) { return new GermanStemFilter(in); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanMinimalStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.de.GermanMinimalStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class GermanMinimalStemFilterFactory extends TokenFilterFactory { + + public GermanMinimalStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new GermanMinimalStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanLightStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.de.GermanLightStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class GermanLightStemFilterFactory extends TokenFilterFactory { + + public GermanLightStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new GermanLightStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanNormalizationFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanNormalizationFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/de/GermanNormalizationFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.de.GermanNormalizationFilter; import org.apache.lucene.analysis.util.AbstractAnalysisFactory; @@ -36,6 +38,10 @@ */ public class GermanNormalizationFilterFactory extends TokenFilterFactory implements MultiTermAwareComponent { + public GermanNormalizationFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new GermanNormalizationFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishLightStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.sv.SwedishLightStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class SwedishLightStemFilterFactory extends TokenFilterFactory { + + public SwedishLightStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new SwedishLightStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishLightStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.fi.FinnishLightStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class FinnishLightStemFilterFactory extends TokenFilterFactory { + + public FinnishLightStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new FinnishLightStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiNormalizationFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiNormalizationFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiNormalizationFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.hi.HindiNormalizationFilter; import org.apache.lucene.analysis.util.AbstractAnalysisFactory; @@ -35,6 +37,11 @@ * */ public class HindiNormalizationFilterFactory extends TokenFilterFactory implements MultiTermAwareComponent { + + public HindiNormalizationFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new HindiNormalizationFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.hi.HindiStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -33,6 +35,11 @@ * */ public class HindiStemFilterFactory extends TokenFilterFactory { + + public HindiStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new HindiStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerFactory.java (working copy) @@ -39,11 +39,10 @@ public class ClassicTokenizerFactory extends TokenizerFactory { - private int maxTokenLength; + private final int maxTokenLength; - @Override - public void init(Map args) { - super.init(args); + public ClassicTokenizerFactory(Map args) { + super(args); assureMatchVersion(); maxTokenLength = getInt("maxTokenLength", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -35,6 +37,11 @@ * */ public class ClassicFilterFactory extends TokenFilterFactory { + + public ClassicFilterFactory(Map args) { + super(args); + } + public TokenFilter create(TokenStream input) { return new ClassicFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerFactory.java (working copy) @@ -37,11 +37,10 @@ public class StandardTokenizerFactory extends TokenizerFactory { - private int maxTokenLength; + private final int maxTokenLength; - @Override - public void init(Map args) { - super.init(args); + public StandardTokenizerFactory(Map args) { + super(args); assureMatchVersion(); maxTokenLength = getInt("maxTokenLength", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizerFactory.java (working copy) @@ -38,11 +38,10 @@ public class UAX29URLEmailTokenizerFactory extends TokenizerFactory { - private int maxTokenLength; + private final int maxTokenLength; - @Override - public void init(Map args) { - super.init(args); + public UAX29URLEmailTokenizerFactory(Map args) { + super(args); assureMatchVersion(); maxTokenLength = getInt("maxTokenLength", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardFilterFactory.java (working copy) @@ -35,9 +35,9 @@ * */ public class StandardFilterFactory extends TokenFilterFactory { - @Override - public void init(Map args) { - super.init(args); + + public StandardFilterFactory(Map args) { + super(args); assureMatchVersion(); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/HyphenatedWordsFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.HyphenatedWordsFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -33,7 +35,12 @@ * */ public class HyphenatedWordsFilterFactory extends TokenFilterFactory { - public HyphenatedWordsFilter create(TokenStream input) { + + public HyphenatedWordsFilterFactory(Map args) { + super(args); + } + + public HyphenatedWordsFilter create(TokenStream input) { return new HyphenatedWordsFilter(input); } } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/WordDelimiterFilterFactory.java (working copy) @@ -74,9 +74,8 @@ private int flags; byte[] typeTable = null; - @Override - public void init(Map args) { - super.init(args); + public WordDelimiterFilterFactory(Map args) { + super(args); if (getInt("generateWordParts", 1) != 0) { flags |= GENERATE_WORD_PARTS; } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/CapitalizationFilterFactory.java (working copy) @@ -78,9 +78,8 @@ boolean onlyFirstWord = true; boolean forceFirstLetter = true; // make sure the first letter is capitol even if it is in the keep list - @Override - public void init(Map args) { - super.init(args); + public CapitalizationFilterFactory(Map args) { + super(args); assureMatchVersion(); String k = args.get(KEEP); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/TrimFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/TrimFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/TrimFilterFactory.java (working copy) @@ -39,9 +39,8 @@ protected boolean updateOffsets = false; - @Override - public void init(Map args) { - super.init( args ); + public TrimFilterFactory(Map args) { + super(args); String v = args.get( "updateOffsets" ); if (v != null) { Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.RemoveDuplicatesTokenFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -33,6 +35,11 @@ * */ public class RemoveDuplicatesTokenFilterFactory extends TokenFilterFactory { + + public RemoveDuplicatesTokenFilterFactory(Map args) { + super(args); + } + public RemoveDuplicatesTokenFilter create(TokenStream input) { return new RemoveDuplicatesTokenFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/StemmerOverrideFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/StemmerOverrideFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/StemmerOverrideFilterFactory.java (working copy) @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.List; +import java.util.Map; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.StemmerOverrideFilter; @@ -39,6 +40,10 @@ private CharArrayMap dictionary = null; private boolean ignoreCase; + public StemmerOverrideFilterFactory(Map args) { + super(args); + } + public void inform(ResourceLoader loader) throws IOException { String dictionaryFiles = args.get("dictionary"); ignoreCase = getBoolean("ignoreCase", false); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeepWordFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeepWordFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeepWordFilterFactory.java (working copy) @@ -38,9 +38,8 @@ */ public class KeepWordFilterFactory extends TokenFilterFactory implements ResourceLoaderAware { - @Override - public void init(Map args) { - super.init(args); + public KeepWordFilterFactory(Map args) { + super(args); assureMatchVersion(); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LengthFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LengthFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LengthFilterFactory.java (working copy) @@ -35,14 +35,14 @@ * */ public class LengthFilterFactory extends TokenFilterFactory { - int min,max; - boolean enablePositionIncrements; + final int min; + final int max; + final boolean enablePositionIncrements; public static final String MIN_KEY = "min"; public static final String MAX_KEY = "max"; - @Override - public void init(Map args) { - super.init(args); + public LengthFilterFactory(Map args) { + super(args); String minKey = args.get(MIN_KEY); String maxKey = args.get(MAX_KEY); if (minKey == null || maxKey == null) { Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeywordMarkerFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeywordMarkerFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/KeywordMarkerFilterFactory.java (working copy) @@ -18,6 +18,7 @@ */ import java.io.IOException; +import java.util.Map; import org.apache.lucene.analysis.miscellaneous.KeywordMarkerFilter; import org.apache.lucene.analysis.util.*; @@ -39,6 +40,10 @@ private CharArraySet protectedWords; private boolean ignoreCase; + public KeywordMarkerFilterFactory(Map args) { + super(args); + } + public void inform(ResourceLoader loader) throws IOException { String wordFiles = args.get(PROTECTED_TOKENS); ignoreCase = getBoolean("ignoreCase", false); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilterFactory.java (working copy) @@ -36,11 +36,10 @@ */ public class LimitTokenCountFilterFactory extends TokenFilterFactory { - int maxTokenCount; + final int maxTokenCount; - @Override - public void init(Map args) { - super.init( args ); + public LimitTokenCountFilterFactory(Map args) { + super(args); String maxTokenCountArg = args.get("maxTokenCount"); if (maxTokenCountArg == null) { throw new IllegalArgumentException("maxTokenCount is mandatory."); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.util.AbstractAnalysisFactory; import org.apache.lucene.analysis.util.MultiTermAwareComponent; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -35,6 +37,11 @@ * */ public class ASCIIFoldingFilterFactory extends TokenFilterFactory implements MultiTermAwareComponent { + + public ASCIIFoldingFilterFactory(Map args) { + super(args); + } + public ASCIIFoldingFilter create(TokenStream input) { return new ASCIIFoldingFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.br.BrazilianStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class BrazilianStemFilterFactory extends TokenFilterFactory { + + public BrazilianStemFilterFactory(Map args) { + super(args); + } + public BrazilianStemFilter create(TokenStream in) { return new BrazilianStemFilter(in); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramFilterFactory.java (working copy) @@ -34,14 +34,13 @@ * */ public class NGramFilterFactory extends TokenFilterFactory { - private int maxGramSize = 0; + private final int maxGramSize; - private int minGramSize = 0; + private final int minGramSize; /** Initialize the n-gram min and max sizes and the side from which one should start tokenizing. */ - @Override - public void init(Map args) { - super.init(args); + public NGramFilterFactory(Map args) { + super(args); String maxArg = args.get("maxGramSize"); maxGramSize = (maxArg != null ? Integer.parseInt(maxArg) : NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramFilterFactory.java (working copy) @@ -34,15 +34,14 @@ * */ public class EdgeNGramFilterFactory extends TokenFilterFactory { - private int maxGramSize = 0; + private final int maxGramSize; - private int minGramSize = 0; + private final int minGramSize; private String side; - @Override - public void init(Map args) { - super.init(args); + public EdgeNGramFilterFactory(Map args) { + super(args); String maxArg = args.get("maxGramSize"); maxGramSize = (maxArg != null ? Integer.parseInt(maxArg) : EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizerFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizerFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizerFactory.java (working copy) @@ -35,13 +35,12 @@ * */ public class NGramTokenizerFactory extends TokenizerFactory { - private int maxGramSize = 0; - private int minGramSize = 0; + private final int maxGramSize; + private final int minGramSize; /** Initializes the n-gram min and max sizes and the side from which one should start tokenizing. */ - @Override - public void init(Map args) { - super.init(args); + public NGramTokenizerFactory(Map args) { + super(args); String maxArg = args.get("maxGramSize"); maxGramSize = (maxArg != null ? Integer.parseInt(maxArg) : NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerFactory.java (working copy) @@ -34,15 +34,14 @@ * */ public class EdgeNGramTokenizerFactory extends TokenizerFactory { - private int maxGramSize = 0; + private final int maxGramSize; - private int minGramSize = 0; + private final int minGramSize; private String side; - @Override - public void init(Map args) { - super.init(args); + public EdgeNGramTokenizerFactory(Map args) { + super(args); String maxArg = args.get("maxGramSize"); maxGramSize = (maxArg != null ? Integer.parseInt(maxArg) : EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/ElisionFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/ElisionFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/ElisionFilterFactory.java (working copy) @@ -20,6 +20,8 @@ import org.apache.lucene.analysis.util.*; import java.io.IOException; +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; /** @@ -37,6 +39,10 @@ */ public class ElisionFilterFactory extends TokenFilterFactory implements ResourceLoaderAware { + public ElisionFilterFactory(Map args) { + super(args); + } + private CharArraySet articles; public void inform(ResourceLoader loader) throws IOException { Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchMinimalStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.fr.FrenchMinimalStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -35,6 +37,11 @@ * */ public class FrenchMinimalStemFilterFactory extends TokenFilterFactory { + + public FrenchMinimalStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new FrenchMinimalStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchLightStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.fr.FrenchLightStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -35,6 +37,11 @@ * */ public class FrenchLightStemFilterFactory extends TokenFilterFactory { + + public FrenchLightStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new FrenchLightStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/th/ThaiWordFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/th/ThaiWordFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/th/ThaiWordFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.th.ThaiWordFilter; import org.apache.lucene.analysis.TokenStream; @@ -34,6 +36,11 @@ * */ public class ThaiWordFilterFactory extends TokenFilterFactory { + + public ThaiWordFilterFactory(Map args) { + super(args); + } + public ThaiWordFilter create(TokenStream input) { assureMatchVersion(); return new ThaiWordFilter(luceneMatchVersion, input); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/reverse/ReverseStringFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/reverse/ReverseStringFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/reverse/ReverseStringFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.reverse.ReverseStringFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -35,8 +37,13 @@ * @since solr 1.4 */ public class ReverseStringFilterFactory extends TokenFilterFactory { + + public ReverseStringFilterFactory(Map args) { + super(args); + assureMatchVersion(); + } + public ReverseStringFilter create(TokenStream in) { - assureMatchVersion(); return new ReverseStringFilter(luceneMatchVersion,in); } } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.payloads.TokenOffsetPayloadTokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -33,6 +35,11 @@ * */ public class TokenOffsetPayloadTokenFilterFactory extends TokenFilterFactory { + + public TokenOffsetPayloadTokenFilterFactory(Map args) { + super(args); + } + public TokenOffsetPayloadTokenFilter create(TokenStream input) { return new TokenOffsetPayloadTokenFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterFactory.java (working copy) @@ -53,9 +53,8 @@ return new DelimitedPayloadTokenFilter(input, delimiter, encoder); } - @Override - public void init(Map args) { - super.init(args); + public DelimitedPayloadTokenFilterFactory(Map args) { + super(args); } public void inform(ResourceLoader loader) { @@ -70,7 +69,7 @@ } else if (encoderClass.equals("identity")){ encoder = new IdentityEncoder(); } else { - encoder = loader.newInstance(encoderClass, PayloadEncoder.class); + encoder = loader.newInstance(encoderClass, PayloadEncoder.class, new Class[0], new Object[0]); } String delim = args.get(DELIMITER_ATTR); @@ -82,4 +81,4 @@ } } } -} \ No newline at end of file +} Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterFactory.java (working copy) @@ -34,11 +34,11 @@ * */ public class NumericPayloadTokenFilterFactory extends TokenFilterFactory { - private float payload; - private String typeMatch; - @Override - public void init(Map args) { - super.init(args); + private final float payload; + private final String typeMatch; + + public NumericPayloadTokenFilterFactory(Map args) { + super(args); String payloadArg = args.get("payload"); typeMatch = args.get("typeMatch"); if (payloadArg == null || typeMatch == null) { Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -33,6 +35,11 @@ * */ public class TypeAsPayloadTokenFilterFactory extends TokenFilterFactory { + + public TypeAsPayloadTokenFilterFactory(Map args) { + super(args); + } + public TypeAsPayloadTokenFilter create(TokenStream input) { return new TypeAsPayloadTokenFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianMinimalStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.no.NorwegianMinimalStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -33,6 +35,11 @@ * </fieldType>

*/ public class NorwegianMinimalStemFilterFactory extends TokenFilterFactory { + + public NorwegianMinimalStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new NorwegianMinimalStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianLightStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.no.NorwegianLightStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -33,6 +35,11 @@ * </fieldType> */ public class NorwegianLightStemFilterFactory extends TokenFilterFactory { + + public NorwegianLightStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new NorwegianLightStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianLightStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.hu.HungarianLightStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class HungarianLightStemFilterFactory extends TokenFilterFactory { + + public HungarianLightStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new HungarianLightStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternTokenizerFactory.java (working copy) @@ -74,10 +74,9 @@ /** * Require a configured pattern */ - @Override - public void init(Map args) + public PatternTokenizerFactory(Map args) { - super.init(args); + super(args); pattern = getPattern( PATTERN ); group = -1; // use 'split' Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceCharFilterFactory.java (working copy) @@ -44,9 +44,8 @@ private Pattern p; private String replacement; - @Override - public void init(Map args) { - super.init( args ); + public PatternReplaceCharFilterFactory(Map args) { + super( args ); p = getPattern("pattern"); replacement = args.get( "replacement" ); if( replacement == null ) Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/pattern/PatternReplaceFilterFactory.java (working copy) @@ -43,9 +43,8 @@ String replacement; boolean all = true; - @Override - public void init(Map args) { - super.init(args); + public PatternReplaceFilterFactory(Map args) { + super(args); p = getPattern("pattern"); replacement = args.get("replacement"); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballPorterFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballPorterFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballPorterFilterFactory.java (working copy) @@ -46,7 +46,7 @@ public static final String PROTECTED_TOKENS = "protected"; private String language = "English"; - private Class stemClass; + private final Class stemClass; public void inform(ResourceLoader loader) throws IOException { @@ -58,14 +58,13 @@ private CharArraySet protectedWords = null; - @Override - public void init(Map args) { - super.init(args); + public SnowballPorterFilterFactory(Map args) { + super(args); final String cfgLanguage = args.get("language"); if(cfgLanguage!=null) language = cfgLanguage; try { - stemClass = Class.forName("org.tartarus.snowball.ext." + language + "Stemmer"); + stemClass = Class.forName("org.tartarus.snowball.ext." + language + "Stemmer").asSubclass(SnowballProgram.class); } catch (ClassNotFoundException e) { throw new IllegalArgumentException("Can't find class for stemmer language " + language, e); } @@ -74,7 +73,7 @@ public TokenFilter create(TokenStream input) { SnowballProgram program; try { - program = (SnowballProgram)stemClass.newInstance(); + program = stemClass.newInstance(); } catch (Exception e) { throw new RuntimeException("Error instantiating stemmer for language " + language + "from class " + stemClass, e); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/lv/LatvianStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/lv/LatvianStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/lv/LatvianStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.lv.LatvianStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -33,6 +35,11 @@ * </fieldType> */ public class LatvianStemFilterFactory extends TokenFilterFactory { + + public LatvianStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new LatvianStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseMinimalStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseMinimalStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseMinimalStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.pt.PortugueseMinimalStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class PortugueseMinimalStemFilterFactory extends TokenFilterFactory { + + public PortugueseMinimalStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new PortugueseMinimalStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseLightStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.pt.PortugueseLightStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class PortugueseLightStemFilterFactory extends TokenFilterFactory { + + public PortugueseLightStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new PortugueseLightStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.pt.PortugueseStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class PortugueseStemFilterFactory extends TokenFilterFactory { + + public PortugueseStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new PortugueseStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter; import org.apache.lucene.analysis.util.AbstractAnalysisFactory; @@ -35,6 +37,11 @@ * */ public class TurkishLowerCaseFilterFactory extends TokenFilterFactory implements MultiTermAwareComponent { + + public TurkishLowerCaseFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new TurkishLowerCaseFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLightStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ru.RussianLightStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class RussianLightStemFilterFactory extends TokenFilterFactory { + + public RussianLightStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new RussianLightStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/ga/IrishLowerCaseFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/ga/IrishLowerCaseFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/ga/IrishLowerCaseFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ga.IrishLowerCaseFilter; import org.apache.lucene.analysis.util.AbstractAnalysisFactory; @@ -36,6 +38,10 @@ */ public class IrishLowerCaseFilterFactory extends TokenFilterFactory implements MultiTermAwareComponent { + public IrishLowerCaseFilterFactory(Map args) { + super(args); + } + @Override public TokenStream create(TokenStream input) { return new IrishLowerCaseFilter(input); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilterFactory.java (working copy) @@ -36,15 +36,14 @@ * */ public class ShingleFilterFactory extends TokenFilterFactory { - private int minShingleSize; - private int maxShingleSize; - private boolean outputUnigrams; - private boolean outputUnigramsIfNoShingles; - private String tokenSeparator; + private final int minShingleSize; + private final int maxShingleSize; + private final boolean outputUnigrams; + private final boolean outputUnigramsIfNoShingles; + private final String tokenSeparator; - @Override - public void init(Map args) { - super.init(args); + public ShingleFilterFactory(Map args) { + super(args); maxShingleSize = getInt("maxShingleSize", ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE); if (maxShingleSize < 2) { Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/HunspellStemFilterFactory.java (working copy) @@ -22,6 +22,7 @@ import java.text.ParseException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.hunspell.HunspellDictionary; @@ -62,6 +63,10 @@ private HunspellDictionary dictionary; private boolean ignoreCase = false; + public HunspellStemFilterFactory(Map args) { + super(args); + } + /** * Loads the hunspell dictionary and affix files defined in the configuration * Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/id/IndonesianStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/id/IndonesianStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/id/IndonesianStemFilterFactory.java (working copy) @@ -36,11 +36,10 @@ * */ public class IndonesianStemFilterFactory extends TokenFilterFactory { - private boolean stemDerivational = true; + private final boolean stemDerivational; - @Override - public void init(Map args) { - super.init(args); + public IndonesianStemFilterFactory(Map args) { + super(args); stemDerivational = getBoolean("stemDerivational", true); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizerFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizerFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/path/PathHierarchyTokenizerFactory.java (working copy) @@ -45,9 +45,8 @@ /** * Require a configured pattern */ - @Override - public void init(Map args){ - super.init( args ); + public PathHierarchyTokenizerFactory(Map args){ + super( args ); String v = args.get( "delimiter" ); if( v != null ){ Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekLowerCaseFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekLowerCaseFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekLowerCaseFilterFactory.java (working copy) @@ -38,9 +38,8 @@ */ public class GreekLowerCaseFilterFactory extends TokenFilterFactory implements MultiTermAwareComponent { - @Override - public void init(Map args) { - super.init(args); + public GreekLowerCaseFilterFactory(Map args) { + super(args); assureMatchVersion(); if (args.containsKey("charset")) throw new IllegalArgumentException( Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/el/GreekStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.el.GreekStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -35,6 +37,10 @@ */ public class GreekStemFilterFactory extends TokenFilterFactory { + public GreekStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new GreekStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianMinimalStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianMinimalStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianMinimalStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.gl.GalicianMinimalStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class GalicianMinimalStemFilterFactory extends TokenFilterFactory { + + public GalicianMinimalStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new GalicianMinimalStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.gl.GalicianStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class GalicianStemFilterFactory extends TokenFilterFactory { + + public GalicianStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new GalicianStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicNormalizationFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicNormalizationFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicNormalizationFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ar.ArabicNormalizationFilter; import org.apache.lucene.analysis.util.AbstractAnalysisFactory; @@ -37,6 +39,10 @@ */ public class ArabicNormalizationFilterFactory extends TokenFilterFactory implements MultiTermAwareComponent { + public ArabicNormalizationFilterFactory(Map args) { + super(args); + } + public ArabicNormalizationFilter create(TokenStream input) { return new ArabicNormalizationFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ar.ArabicStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -36,6 +38,9 @@ */ public class ArabicStemFilterFactory extends TokenFilterFactory { + public ArabicStemFilterFactory(Map args) { + super(args); + } public ArabicStemFilter create(TokenStream input) { return new ArabicStemFilter(input); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/en/PorterStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.en.PorterStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class PorterStemFilterFactory extends TokenFilterFactory { + + public PorterStemFilterFactory(Map args) { + super(args); + } + public PorterStemFilter create(TokenStream input) { return new PorterStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishPossessiveFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishPossessiveFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishPossessiveFilterFactory.java (working copy) @@ -37,9 +37,8 @@ */ public class EnglishPossessiveFilterFactory extends TokenFilterFactory { - @Override - public void init(Map args) { - super.init(args); + public EnglishPossessiveFilterFactory(Map args) { + super(args); assureMatchVersion(); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/en/KStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.en.KStemFilter; @@ -27,6 +29,10 @@ */ public class KStemFilterFactory extends TokenFilterFactory { + public KStemFilterFactory(Map args) { + super(args); + } + public TokenFilter create(TokenStream input) { return new KStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishMinimalStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishMinimalStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishMinimalStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.en.EnglishMinimalStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class EnglishMinimalStemFilterFactory extends TokenFilterFactory { + + public EnglishMinimalStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new EnglishMinimalStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/position/PositionFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/position/PositionFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/position/PositionFilterFactory.java (working copy) @@ -40,11 +40,10 @@ * @since solr 1.4 */ public class PositionFilterFactory extends TokenFilterFactory { - private int positionIncrement; + private final int positionIncrement; - @Override - public void init(Map args) { - super.init(args); + public PositionFilterFactory(Map args) { + super(args); positionIncrement = getInt("positionIncrement", 0); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizationFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizationFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizationFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.in.IndicNormalizationFilter; import org.apache.lucene.analysis.util.AbstractAnalysisFactory; @@ -35,6 +37,11 @@ * */ public class IndicNormalizationFilterFactory extends TokenFilterFactory implements MultiTermAwareComponent { + + public IndicNormalizationFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new IndicNormalizationFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerFactory.java (working copy) @@ -18,6 +18,7 @@ */ import java.io.Reader; +import java.util.Map; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.util.TokenizerFactory; @@ -34,6 +35,11 @@ * */ public class WikipediaTokenizerFactory extends TokenizerFactory { + + public WikipediaTokenizerFactory(Map args) { + super(args); + } + // TODO: add support for WikipediaTokenizer's advanced options. public Tokenizer create(Reader input) { return new WikipediaTokenizer(input); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKWidthFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKWidthFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKWidthFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.cjk.CJKWidthFilter; import org.apache.lucene.analysis.util.AbstractAnalysisFactory; @@ -38,6 +40,10 @@ public class CJKWidthFilterFactory extends TokenFilterFactory implements MultiTermAwareComponent { + public CJKWidthFilterFactory(Map args) { + super(args); + } + @Override public TokenStream create(TokenStream input) { return new CJKWidthFilter(input); Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKBigramFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKBigramFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKBigramFilterFactory.java (working copy) @@ -40,9 +40,8 @@ public class CJKBigramFilterFactory extends TokenFilterFactory { int flags; - @Override - public void init(Map args) { - super.init(args); + public CJKBigramFilterFactory(Map args) { + super(args); flags = 0; if (getBoolean("han", true)) { flags |= CJKBigramFilter.HAN; Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/es/SpanishLightStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.es.SpanishLightStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class SpanishLightStemFilterFactory extends TokenFilterFactory { + + public SpanishLightStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new SpanishLightStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/it/ItalianLightStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.it.ItalianLightStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -34,6 +36,11 @@ * */ public class ItalianLightStemFilterFactory extends TokenFilterFactory { + + public ItalianLightStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new ItalianLightStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechStemFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechStemFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechStemFilterFactory.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import java.util.Map; + import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.cz.CzechStemFilter; import org.apache.lucene.analysis.util.TokenFilterFactory; @@ -33,6 +35,11 @@ * </fieldType> */ public class CzechStemFilterFactory extends TokenFilterFactory { + + public CzechStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new CzechStemFilter(input); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilterFactory.java (working copy) @@ -26,6 +26,7 @@ import java.nio.charset.CodingErrorAction; import java.text.ParseException; import java.util.List; +import java.util.Map; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; @@ -55,6 +56,10 @@ private SynonymMap map; private boolean ignoreCase; + public SynonymFilterFactory(Map args) { + super(args); + } + @Override public TokenStream create(TokenStream input) { // if the fst is null, it means there's actually no synonyms... just return the original stream @@ -154,9 +159,10 @@ // (there are no tests for this functionality) private TokenizerFactory loadTokenizerFactory(ResourceLoader loader, String cname) throws IOException { - TokenizerFactory tokFactory = loader.newInstance(cname, TokenizerFactory.class); - tokFactory.setLuceneMatchVersion(luceneMatchVersion); - tokFactory.init(args); + TokenizerFactory tokFactory = loader.newInstance(cname, TokenizerFactory.class, new Class[] {Map.class}, new Object[] {args}); + // nocommit, we must fix resourceloader to take args to fix tis + // tokFactory.setLuceneMatchVersion(luceneMatchVersion); + // tokFactory.init(args); if (tokFactory instanceof ResourceLoaderAware) { ((ResourceLoaderAware) tokFactory).inform(loader); } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/util/AbstractAnalysisFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/util/AbstractAnalysisFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/util/AbstractAnalysisFactory.java (working copy) @@ -41,13 +41,15 @@ public abstract class AbstractAnalysisFactory { /** The init args */ - protected Map args; + protected final Map args; /** the luceneVersion arg */ - protected Version luceneMatchVersion = null; + protected final Version luceneMatchVersion; - public void init(Map args) { + public AbstractAnalysisFactory(Map args) { this.args = args; + String luceneMatchVersionArg = args.get("luceneMatchVersion"); + this.luceneMatchVersion = luceneMatchVersionArg == null ? null : Version.parseLeniently(luceneMatchVersionArg); } public Map getArgs() { @@ -64,10 +66,6 @@ } } - public void setLuceneMatchVersion(Version luceneMatchVersion) { - this.luceneMatchVersion = luceneMatchVersion; - } - public Version getLuceneMatchVersion() { return this.luceneMatchVersion; } Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/util/TokenizerFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/util/TokenizerFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/util/TokenizerFactory.java (working copy) @@ -20,6 +20,7 @@ import org.apache.lucene.analysis.Tokenizer; import java.io.Reader; +import java.util.Map; import java.util.Set; /** @@ -28,6 +29,10 @@ */ public abstract class TokenizerFactory extends AbstractAnalysisFactory { + public TokenizerFactory(Map args) { + super(args); + } + private static final AnalysisSPILoader loader = getSPILoader(Thread.currentThread().getContextClassLoader()); @@ -41,8 +46,8 @@ } /** looks up a tokenizer by name from context classpath */ - public static TokenizerFactory forName(String name) { - return loader.newInstance(name); + public static TokenizerFactory forName(String name, Map args) { + return loader.newInstance(name, args); } /** looks up a tokenizer class by name from context classpath */ Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/util/CharFilterFactory.java (working copy) @@ -18,6 +18,7 @@ */ import java.io.Reader; +import java.util.Map; import java.util.Set; import org.apache.lucene.analysis.CharFilter; @@ -28,6 +29,10 @@ */ public abstract class CharFilterFactory extends AbstractAnalysisFactory { + public CharFilterFactory(Map args) { + super(args); + } + private static final AnalysisSPILoader loader = getSPILoader(Thread.currentThread().getContextClassLoader()); @@ -41,8 +46,8 @@ } /** looks up a charfilter by name from context classpath */ - public static CharFilterFactory forName(String name) { - return loader.newInstance(name); + public static CharFilterFactory forName(String name, Map args) { + return loader.newInstance(name, args); } /** looks up a charfilter class by name from context classpath */ Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/util/ResourceLoader.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/util/ResourceLoader.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/util/ResourceLoader.java (working copy) @@ -43,5 +43,5 @@ */ public List getLines(String resource) throws IOException; - public T newInstance(String cname, Class expectedType, String ... subpackages); + public T newInstance(String cname, Class expectedType, Class[] argTypes, Object[] args, String ... subpackages); } \ No newline at end of file Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/util/TokenFilterFactory.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/util/TokenFilterFactory.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/util/TokenFilterFactory.java (working copy) @@ -17,6 +17,7 @@ * limitations under the License. */ +import java.util.Map; import java.util.Set; import org.apache.lucene.analysis.TokenStream; @@ -27,6 +28,10 @@ */ public abstract class TokenFilterFactory extends AbstractAnalysisFactory { + public TokenFilterFactory(Map args) { + super(args); + } + private static final AnalysisSPILoader loader = getSPILoader(Thread.currentThread().getContextClassLoader()); @@ -41,8 +46,8 @@ } /** looks up a tokenfilter by name from context classpath */ - public static TokenFilterFactory forName(String name) { - return loader.newInstance(name); + public static TokenFilterFactory forName(String name, Map args) { + return loader.newInstance(name, args); } /** looks up a tokenfilter class by name from context classpath */ Index: lucene/analysis/common/src/java/org/apache/lucene/analysis/util/AnalysisSPILoader.java =================================================================== --- lucene/analysis/common/src/java/org/apache/lucene/analysis/util/AnalysisSPILoader.java (revision 1365868) +++ lucene/analysis/common/src/java/org/apache/lucene/analysis/util/AnalysisSPILoader.java (working copy) @@ -76,10 +76,11 @@ this.services = Collections.unmodifiableMap(services); } - public S newInstance(String name) { + public S newInstance(String name, Map args) { final Class service = lookupClass(name); try { - return service.newInstance(); + // TODO: can we just populate the map with ctors? + return service.getConstructor(Map.class).newInstance(args); } catch (Exception e) { throw new IllegalArgumentException("SPI class of type "+clazz.getName()+" with name '"+name+"' cannot be instantiated. " + "This is likely due to a misconfiguration of the java class '" + service.getName() + "': ", e); Index: lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikFilterFactory.java =================================================================== --- lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikFilterFactory.java (revision 1365868) +++ lucene/analysis/morfologik/src/test/org/apache/lucene/analysis/morfologik/TestMorfologikFilterFactory.java (working copy) @@ -31,12 +31,10 @@ public class TestMorfologikFilterFactory extends BaseTokenStreamTestCase { public void testCreateDictionary() throws Exception { StringReader reader = new StringReader("rowery bilety"); - Map initParams = new HashMap(); + Map initParams = TEST_VERSION_CURRENT_MAP(); initParams.put(MorfologikFilterFactory.DICTIONARY_SCHEMA_ATTRIBUTE, "morfologik"); - MorfologikFilterFactory factory = new MorfologikFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - factory.init(initParams); + MorfologikFilterFactory factory = new MorfologikFilterFactory(initParams); TokenStream ts = factory.create(new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader)); assertTokenStreamContents(ts, new String[] {"rower", "bilet"}); Index: lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilterFactory.java =================================================================== --- lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilterFactory.java (revision 1365868) +++ lucene/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikFilterFactory.java (working copy) @@ -62,9 +62,8 @@ /** * {@inheritDoc} */ - @Override - public void init(Map args) { - super.init(args); + public MorfologikFilterFactory(Map args) { + super(args); String dictionaryName = args.get(DICTIONARY_SCHEMA_ATTRIBUTE); if (dictionaryName != null && !dictionaryName.isEmpty()) { try { Index: lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilterFactory.java =================================================================== --- lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilterFactory.java (revision 1365868) +++ lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestPhoneticFilterFactory.java (working copy) @@ -43,21 +43,19 @@ */ public void testFactory() { - Map args = new HashMap(); - - PhoneticFilterFactory ff = new PhoneticFilterFactory(); - + Map args = TEST_VERSION_CURRENT_MAP(); + args.put( PhoneticFilterFactory.ENCODER, "Metaphone" ); - ff.init( args ); + PhoneticFilterFactory ff = new PhoneticFilterFactory(args); assertTrue( ff.getEncoder() instanceof Metaphone ); assertTrue( ff.inject ); // default args.put( PhoneticFilterFactory.INJECT, "false" ); - ff.init( args ); + ff = new PhoneticFilterFactory(args); assertFalse( ff.inject ); args.put( PhoneticFilterFactory.MAX_CODE_LENGTH, "2"); - ff.init( args ); + ff = new PhoneticFilterFactory(args); assertEquals(2,((Metaphone) ff.getEncoder()).getMaxCodeLen()); } @@ -66,23 +64,22 @@ */ public void testFactoryCaseFailure() { - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); - PhoneticFilterFactory ff = new PhoneticFilterFactory(); try { - ff.init( args ); + PhoneticFilterFactory ff = new PhoneticFilterFactory(args); fail( "missing encoder parameter" ); } catch( Exception ex ) {} args.put( PhoneticFilterFactory.ENCODER, "XXX" ); try { - ff.init( args ); + PhoneticFilterFactory ff = new PhoneticFilterFactory(args); fail( "unknown encoder parameter" ); } catch( Exception ex ) {} args.put( PhoneticFilterFactory.ENCODER, "org.apache.commons.codec.language.NonExistence" ); try { - ff.init( args ); + PhoneticFilterFactory ff = new PhoneticFilterFactory(args); fail( "unknown encoder parameter" ); } catch( Exception ex ) {} @@ -93,25 +90,23 @@ */ public void testFactoryCaseReflection() { - Map args = new HashMap(); - - PhoneticFilterFactory ff = new PhoneticFilterFactory(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put( PhoneticFilterFactory.ENCODER, "org.apache.commons.codec.language.Metaphone" ); - ff.init( args ); + PhoneticFilterFactory ff = new PhoneticFilterFactory(args); assertTrue( ff.getEncoder() instanceof Metaphone ); assertTrue( ff.inject ); // default // we use "Caverphone2" as it is registered in the REGISTRY as Caverphone, // so this effectively tests reflection without package name args.put( PhoneticFilterFactory.ENCODER, "Caverphone2" ); - ff.init( args ); + ff = new PhoneticFilterFactory(args); assertTrue( ff.getEncoder() instanceof Caverphone2 ); assertTrue( ff.inject ); // default // cross check with registry args.put( PhoneticFilterFactory.ENCODER, "Caverphone" ); - ff.init( args ); + ff = new PhoneticFilterFactory(args); assertTrue( ff.getEncoder() instanceof Caverphone2 ); assertTrue( ff.inject ); // default } @@ -153,11 +148,10 @@ static void assertAlgorithm(String algName, String inject, String input, String[] expected) throws Exception { Tokenizer tokenizer = new MockTokenizer(new StringReader(input), MockTokenizer.WHITESPACE, false); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("encoder", algName); args.put("inject", inject); - PhoneticFilterFactory factory = new PhoneticFilterFactory(); - factory.init(args); + PhoneticFilterFactory factory = new PhoneticFilterFactory(args); TokenStream stream = factory.create(tokenizer); assertTokenStreamContents(stream, expected); } Index: lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestDoubleMetaphoneFilterFactory.java =================================================================== --- lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestDoubleMetaphoneFilterFactory.java (revision 1365868) +++ lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestDoubleMetaphoneFilterFactory.java (working copy) @@ -30,8 +30,7 @@ public class TestDoubleMetaphoneFilterFactory extends BaseTokenStreamTestCase { public void testDefaults() throws Exception { - DoubleMetaphoneFilterFactory factory = new DoubleMetaphoneFilterFactory(); - factory.init(new HashMap()); + DoubleMetaphoneFilterFactory factory = new DoubleMetaphoneFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream inputStream = new MockTokenizer(new StringReader("international"), MockTokenizer.WHITESPACE, false); TokenStream filteredStream = factory.create(inputStream); @@ -40,11 +39,10 @@ } public void testSettingSizeAndInject() throws Exception { - DoubleMetaphoneFilterFactory factory = new DoubleMetaphoneFilterFactory(); - Map parameters = new HashMap(); + Map parameters = TEST_VERSION_CURRENT_MAP(); parameters.put("inject", "false"); parameters.put("maxCodeLength", "8"); - factory.init(parameters); + DoubleMetaphoneFilterFactory factory = new DoubleMetaphoneFilterFactory(parameters); TokenStream inputStream = new MockTokenizer(new StringReader("international"), MockTokenizer.WHITESPACE, false); @@ -57,8 +55,7 @@ * Ensure that reset() removes any state (buffered tokens) */ public void testReset() throws Exception { - DoubleMetaphoneFilterFactory factory = new DoubleMetaphoneFilterFactory(); - factory.init(new HashMap()); + DoubleMetaphoneFilterFactory factory = new DoubleMetaphoneFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream inputStream = new MockTokenizer(new StringReader("international"), MockTokenizer.WHITESPACE, false); TokenStream filteredStream = factory.create(inputStream); Index: lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilterFactory.java =================================================================== --- lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilterFactory.java (revision 1365868) +++ lucene/analysis/phonetic/src/test/org/apache/lucene/analysis/phonetic/TestBeiderMorseFilterFactory.java (working copy) @@ -29,10 +29,7 @@ /** Simple tests for {@link BeiderMorseFilterFactory} */ public class TestBeiderMorseFilterFactory extends BaseTokenStreamTestCase { public void testBasics() throws Exception { - BeiderMorseFilterFactory factory = new BeiderMorseFilterFactory(); - factory.setLuceneMatchVersion(TEST_VERSION_CURRENT); - Map args = Collections.emptyMap(); - factory.init(args); + BeiderMorseFilterFactory factory = new BeiderMorseFilterFactory(TEST_VERSION_CURRENT_MAP()); TokenStream ts = factory.create(new MockTokenizer(new StringReader("Weinberg"), MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(ts, new String[] { "vDnbirk", "vanbirk", "vinbirk", "wDnbirk", "wanbirk", "winbirk" }, @@ -42,10 +39,9 @@ } public void testLanguageSet() throws Exception { - BeiderMorseFilterFactory factory = new BeiderMorseFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("languageSet", "polish"); - factory.init(args); + BeiderMorseFilterFactory factory = new BeiderMorseFilterFactory(args); TokenStream ts = factory.create(new MockTokenizer(new StringReader("Weinberg"), MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(ts, new String[] { "vDmbYrk", "vDmbirk", "vambYrk", "vambirk", "vimbYrk", "vimbirk" }, @@ -55,11 +51,10 @@ } public void testOptions() throws Exception { - BeiderMorseFilterFactory factory = new BeiderMorseFilterFactory(); - Map args = new HashMap(); + Map args = TEST_VERSION_CURRENT_MAP(); args.put("nameType", "ASHKENAZI"); args.put("ruleType", "EXACT"); - factory.init(args); + BeiderMorseFilterFactory factory = new BeiderMorseFilterFactory(args); TokenStream ts = factory.create(new MockTokenizer(new StringReader("Weinberg"), MockTokenizer.WHITESPACE, false)); assertTokenStreamContents(ts, new String[] { "vajnberk" }, Index: lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/PhoneticFilterFactory.java =================================================================== --- lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/PhoneticFilterFactory.java (revision 1365868) +++ lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/PhoneticFilterFactory.java (working copy) @@ -81,9 +81,8 @@ protected Method setMaxCodeLenMethod = null; protected Integer maxCodeLength = null; - @Override - public void init(Map args) { - super.init( args ); + public PhoneticFilterFactory(Map args) { + super(args); inject = getBoolean(INJECT, true); Index: lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterFactory.java =================================================================== --- lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterFactory.java (revision 1365868) +++ lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/DoubleMetaphoneFilterFactory.java (working copy) @@ -44,9 +44,8 @@ private boolean inject = true; private int maxCodeLength = DEFAULT_MAX_CODE_LENGTH; - @Override - public void init(Map args) { - super.init(args); + public DoubleMetaphoneFilterFactory(Map args) { + super(args); inject = getBoolean(INJECT, true); Index: lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/BeiderMorseFilterFactory.java =================================================================== --- lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/BeiderMorseFilterFactory.java (revision 1365868) +++ lucene/analysis/phonetic/src/java/org/apache/lucene/analysis/phonetic/BeiderMorseFilterFactory.java (working copy) @@ -47,8 +47,8 @@ private PhoneticEngine engine; private LanguageSet languageSet; - public void init(Map args) { - super.init(args); + public BeiderMorseFilterFactory(Map args) { + super(args); // PhoneticEngine = NameType + RuleType + concat // we use common-codec's defaults: GENERIC + APPROX + true Index: lucene/analysis/stempel/src/test/org/apache/lucene/analysis/stempel/TestStempelPolishStemFilterFactory.java =================================================================== --- lucene/analysis/stempel/src/test/org/apache/lucene/analysis/stempel/TestStempelPolishStemFilterFactory.java (revision 1365868) +++ lucene/analysis/stempel/src/test/org/apache/lucene/analysis/stempel/TestStempelPolishStemFilterFactory.java (working copy) @@ -29,7 +29,7 @@ public class TestStempelPolishStemFilterFactory extends BaseTokenStreamTestCase { public void testBasics() throws Exception { StringReader document = new StringReader("studenta studenci"); - StempelPolishStemFilterFactory factory = new StempelPolishStemFilterFactory(); + StempelPolishStemFilterFactory factory = new StempelPolishStemFilterFactory(TEST_VERSION_CURRENT_MAP()); factory.inform(new ResourceAsStreamResourceLoader(getClass())); TokenStream ts = factory.create(new WhitespaceTokenizer(TEST_VERSION_CURRENT, document)); assertTokenStreamContents(ts, Index: lucene/analysis/stempel/src/test/org/apache/lucene/analysis/stempel/ResourceAsStreamResourceLoader.java =================================================================== --- lucene/analysis/stempel/src/test/org/apache/lucene/analysis/stempel/ResourceAsStreamResourceLoader.java (revision 1365868) +++ lucene/analysis/stempel/src/test/org/apache/lucene/analysis/stempel/ResourceAsStreamResourceLoader.java (working copy) @@ -21,6 +21,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; +import java.lang.reflect.Constructor; import java.nio.charset.CharacterCodingException; import java.nio.charset.CodingErrorAction; import java.util.ArrayList; @@ -74,10 +75,11 @@ // TODO: do this subpackages thing... wtf is that? @Override - public T newInstance(String cname, Class expectedType, String... subpackages) { + public T newInstance(String cname, Class expectedType, Class[] argTypes, Object[] args, String... subpackages) { try { Class clazz = Class.forName(cname).asSubclass(expectedType); - return clazz.newInstance(); + Constructor constructor = clazz.getConstructor(argTypes); + return constructor.newInstance(args); } catch (Exception e) { throw new RuntimeException(e); } Index: lucene/analysis/stempel/src/java/org/apache/lucene/analysis/stempel/StempelPolishStemFilterFactory.java =================================================================== --- lucene/analysis/stempel/src/java/org/apache/lucene/analysis/stempel/StempelPolishStemFilterFactory.java (revision 1365868) +++ lucene/analysis/stempel/src/java/org/apache/lucene/analysis/stempel/StempelPolishStemFilterFactory.java (working copy) @@ -18,6 +18,7 @@ */ import java.io.IOException; +import java.util.Map; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.stempel.StempelFilter; @@ -33,7 +34,11 @@ public class StempelPolishStemFilterFactory extends TokenFilterFactory implements ResourceLoaderAware { private Trie stemmer = null; private static final String STEMTABLE = "/org/apache/lucene/analysis/pl/stemmer_20000.tbl"; - + + public StempelPolishStemFilterFactory(Map args) { + super(args); + } + public TokenStream create(TokenStream input) { return new StempelFilter(input, new StempelStemmer(stemmer)); } Index: lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMAAnnotationsTokenizerFactory.java =================================================================== --- lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMAAnnotationsTokenizerFactory.java (revision 1365868) +++ lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMAAnnotationsTokenizerFactory.java (working copy) @@ -32,9 +32,8 @@ private String descriptorPath; private String tokenType; - @Override - public void init(Map args) { - super.init(args); + public UIMAAnnotationsTokenizerFactory(Map args) { + super(args); descriptorPath = args.get("descriptorPath"); tokenType = args.get("tokenType"); if (descriptorPath == null || tokenType == null) { Index: lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMATypeAwareAnnotationsTokenizerFactory.java =================================================================== --- lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMATypeAwareAnnotationsTokenizerFactory.java (revision 1365868) +++ lucene/analysis/uima/src/java/org/apache/lucene/analysis/uima/UIMATypeAwareAnnotationsTokenizerFactory.java (working copy) @@ -33,9 +33,8 @@ private String tokenType; private String featurePath; - @Override - public void init(Map args) { - super.init(args); + public UIMATypeAwareAnnotationsTokenizerFactory(Map args) { + super(args); descriptorPath = args.get("descriptorPath"); tokenType = args.get("tokenType"); featurePath = args.get("featurePath"); Index: lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java =================================================================== --- lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java (revision 1365868) +++ lucene/test-framework/src/java/org/apache/lucene/analysis/BaseTokenStreamTestCase.java (working copy) @@ -906,4 +906,10 @@ } return ret; } + + public static Map TEST_VERSION_CURRENT_MAP() { + Map map = new HashMap(); + map.put("luceneMatchVersion", TEST_VERSION_CURRENT.toString()); + return map; + } }