Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java (revision 826601) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java (working copy) @@ -64,7 +64,7 @@ /** * Contains the stopwords used with the StopFilter. */ - private Set stoptable = new HashSet(); + private final Set stoptable; /** * The comment character in the stopwords file. All lines prefixed with this will be ignored */ @@ -119,15 +119,15 @@ /** * Builds an analyzer with the given stop words. */ - public ArabicAnalyzer( String[] stopwords ) { + public ArabicAnalyzer( String... stopwords ) { stoptable = StopFilter.makeStopSet( stopwords ); } /** * Builds an analyzer with the given stop words. */ - public ArabicAnalyzer( Hashtable stopwords ) { - stoptable = new HashSet(stopwords.keySet()); + public ArabicAnalyzer( Hashtable stopwords ) { + stoptable = new HashSet( stopwords.keySet() ); } /** @@ -149,7 +149,7 @@ TokenStream result = new ArabicLetterTokenizer( reader ); result = new LowerCaseFilter(result); // the order here is important: the stopword list is not normalized! - result = new StopFilter( result, stoptable ); + result = new StopFilter(false, result, stoptable ); result = new ArabicNormalizationFilter( result ); result = new ArabicStemFilter( result ); @@ -177,7 +177,7 @@ streams.source = new ArabicLetterTokenizer(reader); streams.result = new LowerCaseFilter(streams.source); // the order here is important: the stopword list is not normalized! - streams.result = new StopFilter(streams.result, stoptable); + streams.result = new StopFilter(false, streams.result, stoptable); streams.result = new ArabicNormalizationFilter(streams.result); streams.result = new ArabicStemFilter(streams.result); setPreviousTokenStream(streams); Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java (revision 826601) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java (working copy) @@ -23,6 +23,7 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; +import java.util.Collections; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.LowerCaseFilter; @@ -71,12 +72,12 @@ /** * Contains the stopwords used with the {@link StopFilter}. */ - private Set stoptable = new HashSet(); + private Set stoptable = Collections.emptySet(); /** * Contains words that should be indexed but not stemmed. */ - private Set excltable = new HashSet(); + private Set excltable = Collections.emptySet(); /** * Builds an analyzer with the default stop words ({@link #BRAZILIAN_STOP_WORDS}). @@ -88,7 +89,7 @@ /** * Builds an analyzer with the given stop words. */ - public BrazilianAnalyzer( String[] stopwords ) { + public BrazilianAnalyzer( String... stopwords ) { stoptable = StopFilter.makeStopSet( stopwords ); } @@ -109,7 +110,7 @@ /** * Builds an exclusionlist from an array of Strings. */ - public void setStemExclusionTable( String[] exclusionlist ) { + public void setStemExclusionTable( String... exclusionlist ) { excltable = StopFilter.makeStopSet( exclusionlist ); setPreviousTokenStream(null); // force a new stemmer to be created } @@ -139,7 +140,7 @@ TokenStream result = new StandardTokenizer( reader ); result = new LowerCaseFilter( result ); result = new StandardFilter( result ); - result = new StopFilter( result, stoptable ); + result = new StopFilter( false, result, stoptable ); result = new BrazilianStemFilter( result, excltable ); return result; } @@ -165,7 +166,7 @@ streams.source = new StandardTokenizer(reader); streams.result = new LowerCaseFilter(streams.source); streams.result = new StandardFilter(streams.result); - streams.result = new StopFilter(streams.result, stoptable); + streams.result = new StopFilter(false, streams.result, stoptable); streams.result = new BrazilianStemFilter(streams.result, excltable); setPreviousTokenStream(streams); } else { Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java (revision 826601) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java (working copy) @@ -55,7 +55,7 @@ /** * stop word list */ - private Set stopTable; + private final Set stopTable; //~ Constructors ----------------------------------------------------------- @@ -71,7 +71,7 @@ * * @param stopWords stop word array */ - public CJKAnalyzer(String[] stopWords) { + public CJKAnalyzer(String... stopWords) { stopTable = StopFilter.makeStopSet(stopWords); } @@ -86,7 +86,7 @@ * {@link StopFilter} */ public final TokenStream tokenStream(String fieldName, Reader reader) { - return new StopFilter(new CJKTokenizer(reader), stopTable); + return new StopFilter(false, new CJKTokenizer(reader), stopTable); } private class SavedStreams { @@ -109,7 +109,7 @@ if (streams == null) { streams = new SavedStreams(); streams.source = new CJKTokenizer(reader); - streams.result = new StopFilter(streams.source, stopTable); + streams.result = new StopFilter(false, streams.source, stopTable); setPreviousTokenStream(streams); } else { streams.source.reset(reader); Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java (revision 826601) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java (working copy) @@ -29,6 +29,7 @@ import java.io.*; import java.util.HashSet; import java.util.Set; +import java.util.Collections; /** * {@link Analyzer} for Czech language. @@ -79,7 +80,7 @@ /** * Builds an analyzer with the given stop words. */ - public CzechAnalyzer( String[] stopwords ) { + public CzechAnalyzer( String... stopwords ) { stoptable = StopFilter.makeStopSet( stopwords ); } @@ -107,7 +108,7 @@ } try { // clear any previous table (if present) - stoptable = new HashSet(); + stoptable = Collections.emptySet(); InputStreamReader isr; if (encoding == null) @@ -115,16 +116,11 @@ else isr = new InputStreamReader(wordfile, encoding); - LineNumberReader lnr = new LineNumberReader(isr); - String word; - while ( ( word = lnr.readLine() ) != null ) { - stoptable.add(word); - } - + stoptable = WordlistLoader.getWordSet(isr); } catch ( IOException e ) { // clear any previous table (if present) // TODO: throw IOException - stoptable = new HashSet(); + stoptable = Collections.emptySet(); } } @@ -138,7 +134,7 @@ TokenStream result = new StandardTokenizer( reader ); result = new StandardFilter( result ); result = new LowerCaseFilter( result ); - result = new StopFilter( result, stoptable ); + result = new StopFilter(false, result, stoptable ); return result; } @@ -162,7 +158,7 @@ streams.source = new StandardTokenizer(reader); streams.result = new StandardFilter(streams.source); streams.result = new LowerCaseFilter(streams.result); - streams.result = new StopFilter(streams.result, stoptable); + streams.result = new StopFilter(false, streams.result, stoptable); setPreviousTokenStream(streams); } else { streams.source.reset(reader); Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java (revision 826601) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java (working copy) @@ -86,7 +86,7 @@ /** * Builds an analyzer with the given stop words. */ - public GermanAnalyzer(String[] stopwords) { + public GermanAnalyzer(String... stopwords) { stopSet = StopFilter.makeStopSet(stopwords); setOverridesTokenStreamMethod(GermanAnalyzer.class); } @@ -142,7 +142,7 @@ TokenStream result = new StandardTokenizer(reader); result = new StandardFilter(result); result = new LowerCaseFilter(result); - result = new StopFilter(result, stopSet); + result = new StopFilter(false, result, stopSet); result = new GermanStemFilter(result, exclusionSet); return result; } @@ -174,7 +174,7 @@ streams.source = new StandardTokenizer(reader); streams.result = new StandardFilter(streams.source); streams.result = new LowerCaseFilter(streams.result); - streams.result = new StopFilter(streams.result, stopSet); + streams.result = new StopFilter(false, streams.result, stopSet); streams.result = new GermanStemFilter(streams.result, exclusionSet); setPreviousTokenStream(streams); } else { Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java (revision 826601) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java (working copy) @@ -67,7 +67,7 @@ * Builds an analyzer with the given stop words. * @param stopwords Array of stopwords to use. */ - public GreekAnalyzer(String [] stopwords) + public GreekAnalyzer(String... stopwords) { super(); stopSet = StopFilter.makeStopSet(stopwords); @@ -92,7 +92,7 @@ { TokenStream result = new StandardTokenizer(reader); result = new GreekLowerCaseFilter(result); - result = new StopFilter(result, stopSet); + result = new StopFilter(false, result, stopSet); return result; } @@ -115,7 +115,7 @@ streams = new SavedStreams(); streams.source = new StandardTokenizer(reader); streams.result = new GreekLowerCaseFilter(streams.source); - streams.result = new StopFilter(streams.result, stopSet); + streams.result = new StopFilter(false, streams.result, stopSet); setPreviousTokenStream(streams); } else { streams.source.reset(reader); Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java (revision 826601) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java (working copy) @@ -92,7 +92,7 @@ /** * Builds an analyzer with the given stop words. */ - public FrenchAnalyzer(String[] stopwords) { + public FrenchAnalyzer(String... stopwords) { stoptable = StopFilter.makeStopSet(stopwords); } @@ -107,7 +107,7 @@ /** * Builds an exclusionlist from an array of Strings. */ - public void setStemExclusionTable(String[] exclusionlist) { + public void setStemExclusionTable(String... exclusionlist) { excltable = StopFilter.makeStopSet(exclusionlist); setPreviousTokenStream(null); // force a new stemmer to be created } @@ -144,7 +144,7 @@ TokenStream result = new StandardTokenizer(reader); result = new StandardFilter(result); - result = new StopFilter(result, stoptable); + result = new StopFilter(false, result, stoptable); result = new FrenchStemFilter(result, excltable); // Convert to lowercase after stemming! result = new LowerCaseFilter(result); @@ -171,7 +171,7 @@ streams = new SavedStreams(); streams.source = new StandardTokenizer(reader); streams.result = new StandardFilter(streams.source); - streams.result = new StopFilter(streams.result, stoptable); + streams.result = new StopFilter(false, streams.result, stoptable); streams.result = new FrenchStemFilter(streams.result, excltable); // Convert to lowercase after stemming! streams.result = new LowerCaseFilter(streams.result); Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java (revision 826601) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java (working copy) @@ -94,7 +94,7 @@ * * @param stopwords */ - public DutchAnalyzer(String[] stopwords) { + public DutchAnalyzer(String... stopwords) { setOverridesTokenStreamMethod(DutchAnalyzer.class); stoptable = StopFilter.makeStopSet(stopwords); } @@ -129,7 +129,7 @@ * * @param exclusionlist */ - public void setStemExclusionTable(String[] exclusionlist) { + public void setStemExclusionTable(String... exclusionlist) { excltable = StopFilter.makeStopSet(exclusionlist); setPreviousTokenStream(null); // force a new stemmer to be created } @@ -181,7 +181,7 @@ public TokenStream tokenStream(String fieldName, Reader reader) { TokenStream result = new StandardTokenizer(reader); result = new StandardFilter(result); - result = new StopFilter(result, stoptable); + result = new StopFilter(false, result, stoptable); result = new DutchStemFilter(result, excltable, stemdict); return result; } @@ -213,7 +213,7 @@ streams = new SavedStreams(); streams.source = new StandardTokenizer(reader); streams.result = new StandardFilter(streams.source); - streams.result = new StopFilter(streams.result, stoptable); + streams.result = new StopFilter(false, streams.result, stoptable); streams.result = new DutchStemFilter(streams.result, excltable, stemdict); setPreviousTokenStream(streams); } else { Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java (revision 826601) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java (working copy) @@ -175,7 +175,7 @@ } HashSet stopWords = (HashSet) stopWordsPerField.get(fieldName); if (stopWords != null) { - result = new StopFilter(result, stopWords); + result = new StopFilter(false, result, stopWords); } return result; } @@ -217,7 +217,7 @@ /* if there are any stopwords for the field, save the stopfilter */ HashSet stopWords = (HashSet) stopWordsPerField.get(fieldName); if (stopWords != null) - streams.withStopFilter = new StopFilter(streams.wrapped, stopWords); + streams.withStopFilter = new StopFilter(false, streams.wrapped, stopWords); else streams.withStopFilter = streams.wrapped; @@ -238,7 +238,7 @@ streams.wrapped = result; HashSet stopWords = (HashSet) stopWordsPerField.get(fieldName); if (stopWords != null) - streams.withStopFilter = new StopFilter(streams.wrapped, stopWords); + streams.withStopFilter = new StopFilter(false, streams.wrapped, stopWords); else streams.withStopFilter = streams.wrapped; } Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java (revision 826601) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java (working copy) @@ -67,7 +67,7 @@ /** * Builds an analyzer with the given stop words. */ - public RussianAnalyzer(String[] stopwords) + public RussianAnalyzer(String... stopwords) { super(); stopSet = StopFilter.makeStopSet(stopwords); @@ -96,7 +96,7 @@ { TokenStream result = new RussianLetterTokenizer(reader); result = new LowerCaseFilter(result); - result = new StopFilter(result, stopSet); + result = new StopFilter(false, result, stopSet); result = new RussianStemFilter(result); return result; } @@ -122,7 +122,7 @@ streams = new SavedStreams(); streams.source = new RussianLetterTokenizer(reader); streams.result = new LowerCaseFilter(streams.source); - streams.result = new StopFilter(streams.result, stopSet); + streams.result = new StopFilter(false, streams.result, stopSet); streams.result = new RussianStemFilter(streams.result); setPreviousTokenStream(streams); } else { Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapper.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapper.java (revision 826601) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapper.java (working copy) @@ -23,6 +23,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.util.Version; /** * A ShingleAnalyzerWrapper wraps a {@link ShingleFilter} around another {@link Analyzer}. @@ -50,14 +51,17 @@ /** * Wraps {@link StandardAnalyzer}. */ - public ShingleAnalyzerWrapper() { + public ShingleAnalyzerWrapper(Version matchVersion) { super(); - this.defaultAnalyzer = new StandardAnalyzer(); + this.defaultAnalyzer = new StandardAnalyzer(matchVersion); setOverridesTokenStreamMethod(ShingleAnalyzerWrapper.class); } - public ShingleAnalyzerWrapper(int nGramSize) { - this(); + /** + * Wraps {@link StandardAnalyzer}. + */ + public ShingleAnalyzerWrapper(Version matchVersion, int nGramSize) { + this(matchVersion); this.maxShingleSize = nGramSize; } Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java (revision 826601) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java (working copy) @@ -40,7 +40,7 @@ TokenStream ts = new StandardTokenizer(reader); ts = new StandardFilter(ts); ts = new ThaiWordFilter(ts); - ts = new StopFilter(ts, StopAnalyzer.ENGLISH_STOP_WORDS_SET); + ts = new StopFilter(false, ts, StopAnalyzer.ENGLISH_STOP_WORDS_SET); return ts; } @@ -63,7 +63,7 @@ streams.source = new StandardTokenizer(reader); streams.result = new StandardFilter(streams.source); streams.result = new ThaiWordFilter(streams.result); - streams.result = new StopFilter(streams.result, StopAnalyzer.ENGLISH_STOP_WORDS_SET); + streams.result = new StopFilter(false, streams.result, StopAnalyzer.ENGLISH_STOP_WORDS_SET); setPreviousTokenStream(streams); } else { streams.source.reset(reader); Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java =================================================================== --- contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (revision 826601) +++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (working copy) @@ -216,6 +216,10 @@ * subclass that acts just like whitespace analyzer for testing */ private class ShingleWrapperSubclassAnalyzer extends ShingleAnalyzerWrapper { + public ShingleWrapperSubclassAnalyzer() { + super(org.apache.lucene.util.Version.LUCENE_CURRENT); + } + public TokenStream tokenStream(String fieldName, Reader reader) { return new WhitespaceTokenizer(reader); } Index: contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java =================================================================== --- contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java (revision 826601) +++ contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java (working copy) @@ -69,7 +69,7 @@ dir = FSDirectory.open(indexDir); searcher = new IndexSearcher(dir, true); - analyzer = new StopAnalyzer(); + analyzer = new StopAnalyzer(false); } Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java =================================================================== --- contrib/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java (revision 826601) +++ contrib/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java (working copy) @@ -22,6 +22,7 @@ import org.apache.lucene.queryParser.ParseException; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.Query; +import org.apache.lucene.util.Version; /** * Simplistic quality query parser. A Lucene query is created by passing @@ -49,7 +50,7 @@ public Query parse(QualityQuery qq) throws ParseException { QueryParser qp = (QueryParser) queryParser.get(); if (qp==null) { - qp = new QueryParser(indexField, new StandardAnalyzer()); + qp = new QueryParser(indexField, new StandardAnalyzer(Version.LUCENE_CURRENT)); queryParser.set(qp); } return qp.parse(qq.getValue(qqName)); Index: contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java =================================================================== --- contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (revision 826601) +++ contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (working copy) @@ -83,13 +83,16 @@ * */ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatter { + // TODO: change to CURRENT, does not work because posIncr: + static final Version TEST_VERSION = Version.LUCENE_24; + private IndexReader reader; static final String FIELD_NAME = "contents"; private Query query; RAMDirectory ramDir; public IndexSearcher searcher = null; int numHighlights = 0; - Analyzer analyzer = new StandardAnalyzer(); + final Analyzer analyzer = new StandardAnalyzer(TEST_VERSION); TopDocs hits; String[] texts = { @@ -140,7 +143,7 @@ String s1 = "I call our world Flatland, not because we call it so,"; - QueryParser parser = new QueryParser(FIELD_NAME, new StandardAnalyzer(Version.LUCENE_CURRENT)); + QueryParser parser = new QueryParser(FIELD_NAME, new StandardAnalyzer(TEST_VERSION)); // Verify that a query against the default field results in text being // highlighted @@ -172,7 +175,7 @@ */ private static String highlightField(Query query, String fieldName, String text) throws IOException, InvalidTokenOffsetsException { - TokenStream tokenStream = new StandardAnalyzer(Version.LUCENE_CURRENT).tokenStream(fieldName, new StringReader(text)); + TokenStream tokenStream = new StandardAnalyzer(TEST_VERSION).tokenStream(fieldName, new StringReader(text)); // Assuming "", "" used to highlight SimpleHTMLFormatter formatter = new SimpleHTMLFormatter(); QueryScorer scorer = new QueryScorer(query, fieldName, FIELD_NAME); @@ -542,7 +545,7 @@ // Need to explicitly set the QueryParser property to use TermRangeQuery // rather // than RangeFilters - QueryParser parser = new QueryParser(FIELD_NAME, new StandardAnalyzer()); + QueryParser parser = new QueryParser(FIELD_NAME, analyzer); parser.setUseOldRangeQuery(true); query = parser.parse(queryString); doSearching(query); @@ -693,7 +696,7 @@ hg.setTextFragmenter(new NullFragmenter()); String match = null; - match = hg.getBestFragment(new StandardAnalyzer(), "data", "help me [54-65]"); + match = hg.getBestFragment(analyzer, "data", "help me [54-65]"); assertEquals("help me [54-65]", match); } @@ -1004,13 +1007,13 @@ sb.append(stopWords.iterator().next()); } SimpleHTMLFormatter fm = new SimpleHTMLFormatter(); - Highlighter hg = getHighlighter(query, "data", new StandardAnalyzer(stopWords).tokenStream( + Highlighter hg = getHighlighter(query, "data", new StandardAnalyzer(TEST_VERSION, stopWords).tokenStream( "data", new StringReader(sb.toString())), fm);// new Highlighter(fm, // new // QueryTermScorer(query)); hg.setTextFragmenter(new NullFragmenter()); hg.setMaxDocBytesToAnalyze(100); - match = hg.getBestFragment(new StandardAnalyzer(stopWords), "data", sb.toString()); + match = hg.getBestFragment(new StandardAnalyzer(TEST_VERSION, stopWords), "data", sb.toString()); assertTrue("Matched text should be no more than 100 chars in length ", match.length() < hg .getMaxDocBytesToAnalyze()); @@ -1021,7 +1024,7 @@ // + whitespace) sb.append(" "); sb.append(goodWord); - match = hg.getBestFragment(new StandardAnalyzer(stopWords), "data", sb.toString()); + match = hg.getBestFragment(new StandardAnalyzer(TEST_VERSION, stopWords), "data", sb.toString()); assertTrue("Matched text should be no more than 100 chars in length ", match.length() < hg .getMaxDocBytesToAnalyze()); } @@ -1041,11 +1044,11 @@ String text = "this is a text with searchterm in it"; SimpleHTMLFormatter fm = new SimpleHTMLFormatter(); - Highlighter hg = getHighlighter(query, "text", new StandardAnalyzer( + Highlighter hg = getHighlighter(query, "text", new StandardAnalyzer(TEST_VERSION, stopWords).tokenStream("text", new StringReader(text)), fm); hg.setTextFragmenter(new NullFragmenter()); hg.setMaxDocCharsToAnalyze(36); - String match = hg.getBestFragment(new StandardAnalyzer(stopWords), "text", text); + String match = hg.getBestFragment(new StandardAnalyzer(TEST_VERSION, stopWords), "text", text); assertTrue( "Matched text should contain remainder of text after highlighted query ", match.endsWith("in it")); @@ -1061,7 +1064,7 @@ numHighlights = 0; // test to show how rewritten query can still be used searcher = new IndexSearcher(ramDir, true); - Analyzer analyzer = new StandardAnalyzer(); + Analyzer analyzer = new StandardAnalyzer(TEST_VERSION); QueryParser parser = new QueryParser(FIELD_NAME, analyzer); Query query = parser.parse("JF? or Kenned*"); @@ -1074,7 +1077,7 @@ // highlighted text // QueryHighlightExtractor highlighter = new // QueryHighlightExtractor(this, - // query, new StandardAnalyzer()); + // query, new StandardAnalyzer(TEST_VERSION)); int maxNumFragmentsRequired = 3; @@ -1173,7 +1176,7 @@ public void testMultiSearcher() throws Exception { // setup index 1 RAMDirectory ramDir1 = new RAMDirectory(); - IndexWriter writer1 = new IndexWriter(ramDir1, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer1 = new IndexWriter(ramDir1, new StandardAnalyzer(TEST_VERSION), true, IndexWriter.MaxFieldLength.UNLIMITED); Document d = new Document(); Field f = new Field(FIELD_NAME, "multiOne", Field.Store.YES, Field.Index.ANALYZED); d.add(f); @@ -1184,7 +1187,7 @@ // setup index 2 RAMDirectory ramDir2 = new RAMDirectory(); - IndexWriter writer2 = new IndexWriter(ramDir2, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer2 = new IndexWriter(ramDir2, new StandardAnalyzer(TEST_VERSION), true, IndexWriter.MaxFieldLength.UNLIMITED); d = new Document(); f = new Field(FIELD_NAME, "multiTwo", Field.Store.YES, Field.Index.ANALYZED); d.add(f); @@ -1197,14 +1200,14 @@ searchers[0] = new IndexSearcher(ramDir1, true); searchers[1] = new IndexSearcher(ramDir2, true); MultiSearcher multiSearcher = new MultiSearcher(searchers); - QueryParser parser = new QueryParser(FIELD_NAME, new StandardAnalyzer()); + QueryParser parser = new QueryParser(FIELD_NAME, new StandardAnalyzer(TEST_VERSION)); parser.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE); query = parser.parse("multi*"); System.out.println("Searching for: " + query.toString(FIELD_NAME)); // at this point the multisearcher calls combine(query[]) hits = multiSearcher.search(query, null, 1000); - // query = QueryParser.parse("multi*", FIELD_NAME, new StandardAnalyzer()); + // query = QueryParser.parse("multi*", FIELD_NAME, new StandardAnalyzer(TEST_VERSION)); Query expandedQueries[] = new Query[2]; expandedQueries[0] = query.rewrite(reader1); expandedQueries[1] = query.rewrite(reader2); @@ -1527,7 +1530,7 @@ } public void doSearching(String queryString) throws Exception { - QueryParser parser = new QueryParser(FIELD_NAME, new StandardAnalyzer()); + QueryParser parser = new QueryParser(FIELD_NAME, analyzer); parser.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE); query = parser.parse(queryString); doSearching(query); @@ -1564,7 +1567,7 @@ protected void setUp() throws Exception { super.setUp(); ramDir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(ramDir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(ramDir, new StandardAnalyzer(TEST_VERSION), true, IndexWriter.MaxFieldLength.UNLIMITED); for (int i = 0; i < texts.length; i++) { addDoc(writer, texts[i]); } Index: contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java =================================================================== --- contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (revision 826601) +++ contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (working copy) @@ -63,7 +63,7 @@ RAMDirectory dir = new RAMDirectory(); // create dir data - IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); for (int i = 0; i < 20; i++) { Document document = new Document(); assembleDocument(document, i); @@ -87,7 +87,7 @@ InstantiatedIndex ii = new InstantiatedIndex(); // create dir data - IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); for (int i = 0; i < 500; i++) { Document document = new Document(); assembleDocument(document, i); @@ -96,7 +96,7 @@ indexWriter.close(); // test ii writer - InstantiatedIndexWriter instantiatedIndexWriter = ii.indexWriterFactory(new StandardAnalyzer(), true); + InstantiatedIndexWriter instantiatedIndexWriter = ii.indexWriterFactory(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true); for (int i = 0; i < 500; i++) { Document document = new Document(); assembleDocument(document, i); Index: contrib/lucli/src/java/lucli/LuceneMethods.java =================================================================== --- contrib/lucli/src/java/lucli/LuceneMethods.java (revision 826601) +++ contrib/lucli/src/java/lucli/LuceneMethods.java (working copy) @@ -57,6 +57,7 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Searcher; import org.apache.lucene.store.FSDirectory; +import org.apache.lucene.util.Version; /** * Various methods that interact with Lucene and provide info about the @@ -80,18 +81,18 @@ } private Analyzer createAnalyzer() { - if (analyzerClassFQN == null) return new StandardAnalyzer(); + if (analyzerClassFQN == null) return new StandardAnalyzer(Version.LUCENE_CURRENT); try { Class aClass = Class.forName(analyzerClassFQN); Object obj = aClass.newInstance(); if (!(obj instanceof Analyzer)) { message("Given class is not an Analyzer: " + analyzerClassFQN); - return new StandardAnalyzer(); + return new StandardAnalyzer(Version.LUCENE_CURRENT); } return (Analyzer)obj; } catch (Exception e) { message("Unable to use Analyzer " + analyzerClassFQN); - return new StandardAnalyzer(); + return new StandardAnalyzer(Version.LUCENE_CURRENT); } } Index: contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java =================================================================== --- contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (revision 826601) +++ contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (working copy) @@ -277,8 +277,8 @@ Analyzer[] analyzers = new Analyzer[] { new SimpleAnalyzer(), - new StopAnalyzer(), - new StandardAnalyzer(), + new StopAnalyzer(true), + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), PatternAnalyzer.DEFAULT_ANALYZER, // new WhitespaceAnalyzer(), // new PatternAnalyzer(PatternAnalyzer.NON_WORD_PATTERN, false, null), Index: contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java =================================================================== --- contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java (revision 826601) +++ contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java (working copy) @@ -6,6 +6,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.Version; import java.util.Collections; /* @@ -29,7 +30,7 @@ public void test() throws Exception { Directory dir = new RAMDirectory(); - IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(Collections.EMPTY_SET), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(Version.LUCENE_CURRENT, Collections.emptySet()), true, IndexWriter.MaxFieldLength.UNLIMITED); Document doc; Index: contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java =================================================================== --- contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (revision 826601) +++ contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (working copy) @@ -36,7 +36,7 @@ public class TestComplexPhraseQuery extends TestCase { - Analyzer analyzer = new StandardAnalyzer(); + Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); DocData docsContent[] = { new DocData("john smith", "1"), new DocData("johathon smith", "2"), Index: contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java =================================================================== --- contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java (revision 826601) +++ contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java (working copy) @@ -44,6 +44,7 @@ import java.util.Calendar; import java.util.GregorianCalendar; import java.util.HashSet; +import java.util.Collections; public class TestPrecedenceQueryParser extends LocalizedTestCase { @@ -233,7 +234,7 @@ assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null, "+(title:dog title:cat) -author:\"bob dole\""); - PrecedenceQueryParser qp = new PrecedenceQueryParser("field", new StandardAnalyzer()); + PrecedenceQueryParser qp = new PrecedenceQueryParser("field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); // make sure OR is the default: assertEquals(PrecedenceQueryParser.OR_OPERATOR, qp.getDefaultOperator()); qp.setDefaultOperator(PrecedenceQueryParser.AND_OPERATOR); @@ -267,7 +268,7 @@ assertQueryEquals("term 1.0 1 2", null, "term"); assertQueryEquals("term term1 term2", null, "term term term"); - Analyzer a = new StandardAnalyzer(); + Analyzer a = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); assertQueryEquals("3", a, "3"); assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2"); assertQueryEquals("term term1 term2", a, "term term1 term2"); @@ -505,7 +506,7 @@ public void testBoost() throws Exception { - StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(new String[]{"on"}); + StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, Collections.singleton("on")); PrecedenceQueryParser qp = new PrecedenceQueryParser("field", oneStopAnalyzer); Query q = qp.parse("on^1.0"); assertNotNull(q); @@ -518,7 +519,7 @@ q = qp.parse("\"on\"^1.0"); assertNotNull(q); - q = getParser(new StandardAnalyzer()).parse("the^3"); + q = getParser(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)).parse("the^3"); assertNotNull(q); } Index: contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java =================================================================== --- contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (revision 826601) +++ contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (working copy) @@ -49,6 +49,7 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.PriorityQueue; +import org.apache.lucene.util.Version; /** @@ -160,7 +161,7 @@ * Default analyzer to parse source doc with. * @see #getAnalyzer */ - public static final Analyzer DEFAULT_ANALYZER = new StandardAnalyzer(); + public static final Analyzer DEFAULT_ANALYZER = new StandardAnalyzer(Version.LUCENE_CURRENT); /** * Ignore terms with less than this frequency in the source doc. Index: contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java =================================================================== --- contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (revision 826601) +++ contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (working copy) @@ -42,7 +42,7 @@ protected void setUp() throws Exception { directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED); //Add series of docs with filterable fields : url, text and dates flags addDoc(writer, "http://lucene.apache.org", "lucene 1.4.3 available", "20040101"); Index: contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java =================================================================== --- contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java (revision 826601) +++ contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java (working copy) @@ -43,7 +43,7 @@ protected void setUp() throws Exception { directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(), + IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, MaxFieldLength.UNLIMITED); // Add series of docs with specific information for MoreLikeThis Index: contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java =================================================================== --- contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java (revision 826601) +++ contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java (working copy) @@ -83,7 +83,7 @@ String[] fields = { "b", "t" }; StandardQueryParser mfqp = new StandardQueryParser(); mfqp.setMultiFields(fields); - mfqp.setAnalyzer(new StandardAnalyzer()); + mfqp.setAnalyzer(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); Query q = mfqp.parse("one", null); assertEquals("b:one t:one", q.toString()); @@ -153,7 +153,7 @@ StandardQueryParser mfqp = new StandardQueryParser(); mfqp.setMultiFields(fields); mfqp.setFieldsBoost(boosts); - mfqp.setAnalyzer(new StandardAnalyzer()); + mfqp.setAnalyzer(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); // Check for simple Query q = mfqp.parse("one", null); @@ -181,24 +181,24 @@ public void testStaticMethod1() throws QueryNodeException { String[] fields = { "b", "t" }; String[] queries = { "one", "two" }; - Query q = QueryParserUtil.parse(queries, fields, new StandardAnalyzer()); + Query q = QueryParserUtil.parse(queries, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("b:one t:two", q.toString()); String[] queries2 = { "+one", "+two" }; - q = QueryParserUtil.parse(queries2, fields, new StandardAnalyzer()); + q = QueryParserUtil.parse(queries2, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("(+b:one) (+t:two)", q.toString()); String[] queries3 = { "one", "+two" }; - q = QueryParserUtil.parse(queries3, fields, new StandardAnalyzer()); + q = QueryParserUtil.parse(queries3, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("b:one (+t:two)", q.toString()); String[] queries4 = { "one +more", "+two" }; - q = QueryParserUtil.parse(queries4, fields, new StandardAnalyzer()); + q = QueryParserUtil.parse(queries4, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("(b:one +b:more) (+t:two)", q.toString()); String[] queries5 = { "blah" }; try { - q = QueryParserUtil.parse(queries5, fields, new StandardAnalyzer()); + q = QueryParserUtil.parse(queries5, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -222,15 +222,15 @@ BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT }; Query q = QueryParserUtil.parse("one", fields, flags, - new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+b:one -t:one", q.toString()); - q = QueryParserUtil.parse("one two", fields, flags, new StandardAnalyzer()); + q = QueryParserUtil.parse("one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; - q = QueryParserUtil.parse("blah", fields, flags2, new StandardAnalyzer()); + q = QueryParserUtil.parse("blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -243,19 +243,19 @@ BooleanClause.Occur.MUST_NOT }; StandardQueryParser parser = new StandardQueryParser(); parser.setMultiFields(fields); - parser.setAnalyzer(new StandardAnalyzer()); + parser.setAnalyzer(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); Query q = QueryParserUtil.parse("one", fields, flags, - new StandardAnalyzer());// , fields, flags, new + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));// , fields, flags, new // StandardAnalyzer()); assertEquals("+b:one -t:one", q.toString()); - q = QueryParserUtil.parse("one two", fields, flags, new StandardAnalyzer()); + q = QueryParserUtil.parse("one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; - q = QueryParserUtil.parse("blah", fields, flags2, new StandardAnalyzer()); + q = QueryParserUtil.parse("blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -268,13 +268,13 @@ BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD }; Query q = QueryParserUtil.parse(queries, fields, flags, - new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+f1:one -f2:two f3:three", q.toString()); try { BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; q = QueryParserUtil - .parse(queries, fields, flags2, new StandardAnalyzer()); + .parse(queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -287,13 +287,13 @@ BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT }; Query q = QueryParserUtil.parse(queries, fields, flags, - new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+b:one -t:two", q.toString()); try { BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; q = QueryParserUtil - .parse(queries, fields, flags2, new StandardAnalyzer()); + .parse(queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -319,7 +319,7 @@ } public void testStopWordSearching() throws Exception { - Analyzer analyzer = new StandardAnalyzer(); + Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); Directory ramDir = new RAMDirectory(); IndexWriter iw = new IndexWriter(ramDir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); @@ -345,7 +345,7 @@ * Return empty tokens for field "f1". */ private static class AnalyzerReturningNull extends Analyzer { - StandardAnalyzer stdAnalyzer = new StandardAnalyzer(); + StandardAnalyzer stdAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); public AnalyzerReturningNull() { } Index: contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java =================================================================== --- contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java (revision 826601) +++ contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java (working copy) @@ -78,7 +78,7 @@ public void testSimple() throws Exception { String[] fields = { "b", "t" }; MultiFieldQueryParserWrapper mfqp = new MultiFieldQueryParserWrapper( - fields, new StandardAnalyzer()); + fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); Query q = mfqp.parse("one"); assertEquals("b:one t:one", q.toString()); @@ -146,7 +146,7 @@ boosts.put("t", Float.valueOf(10)); String[] fields = { "b", "t" }; MultiFieldQueryParserWrapper mfqp = new MultiFieldQueryParserWrapper( - fields, new StandardAnalyzer(), boosts); + fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), boosts); // Check for simple Query q = mfqp.parse("one"); @@ -175,28 +175,28 @@ String[] fields = { "b", "t" }; String[] queries = { "one", "two" }; Query q = MultiFieldQueryParserWrapper.parse(queries, fields, - new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("b:one t:two", q.toString()); String[] queries2 = { "+one", "+two" }; q = MultiFieldQueryParserWrapper.parse(queries2, fields, - new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("(+b:one) (+t:two)", q.toString()); String[] queries3 = { "one", "+two" }; q = MultiFieldQueryParserWrapper.parse(queries3, fields, - new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("b:one (+t:two)", q.toString()); String[] queries4 = { "one +more", "+two" }; q = MultiFieldQueryParserWrapper.parse(queries4, fields, - new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("(b:one +b:more) (+t:two)", q.toString()); String[] queries5 = { "blah" }; try { q = MultiFieldQueryParserWrapper.parse(queries5, fields, - new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -220,17 +220,17 @@ BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT }; Query q = MultiFieldQueryParserWrapper.parse("one", fields, flags, - new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+b:one -t:one", q.toString()); q = MultiFieldQueryParserWrapper.parse("one two", fields, flags, - new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; q = MultiFieldQueryParserWrapper.parse("blah", fields, flags2, - new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -244,20 +244,20 @@ BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT }; MultiFieldQueryParserWrapper parser = new MultiFieldQueryParserWrapper( - fields, new StandardAnalyzer()); + fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); Query q = MultiFieldQueryParserWrapper.parse("one", fields, flags, - new StandardAnalyzer());// , fields, flags, new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));// , fields, flags, new StandardAnalyzer()); assertEquals("+b:one -t:one", q.toString()); q = MultiFieldQueryParserWrapper.parse("one two", fields, flags, - new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; q = MultiFieldQueryParserWrapper.parse("blah", fields, flags2, - new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -270,13 +270,13 @@ BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD }; Query q = MultiFieldQueryParserWrapper.parse(queries, fields, flags, - new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+f1:one -f2:two f3:three", q.toString()); try { BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; q = MultiFieldQueryParserWrapper.parse(queries, fields, flags2, - new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -289,13 +289,13 @@ BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT }; Query q = MultiFieldQueryParserWrapper.parse(queries, fields, flags, - new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+b:one -t:two", q.toString()); try { BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST }; q = MultiFieldQueryParserWrapper.parse(queries, fields, flags2, - new StandardAnalyzer()); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch (IllegalArgumentException e) { // expected exception, array length differs @@ -319,7 +319,7 @@ } public void testStopWordSearching() throws Exception { - Analyzer analyzer = new StandardAnalyzer(); + Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); Directory ramDir = new RAMDirectory(); IndexWriter iw = new IndexWriter(ramDir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); @@ -343,7 +343,7 @@ * Return empty tokens for field "f1". */ private static class AnalyzerReturningNull extends Analyzer { - StandardAnalyzer stdAnalyzer = new StandardAnalyzer(); + StandardAnalyzer stdAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); public AnalyzerReturningNull() { } Index: contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java =================================================================== --- contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java (revision 826601) +++ contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java (working copy) @@ -30,6 +30,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Collections; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.KeywordAnalyzer; @@ -77,6 +78,7 @@ import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.util.LocalizedTestCase; +import org.apache.lucene.util.Version; /** * This test case is a copy of the core Lucene query parser test, it was adapted @@ -410,7 +412,7 @@ assertQueryEquals("term 1.0 1 2", null, "term"); assertQueryEquals("term term1 term2", null, "term term term"); - Analyzer a = new StandardAnalyzer(); + Analyzer a = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); assertQueryEquals("3", a, "3"); assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2"); assertQueryEquals("term term1 term2", a, "term term1 term2"); @@ -896,8 +898,7 @@ } public void testBoost() throws Exception { - StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer( - new String[] { "on" }); + StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(Version.LUCENE_CURRENT, Collections.singleton("on")); StandardQueryParser qp = new StandardQueryParser(); qp.setAnalyzer(oneStopAnalyzer); @@ -913,7 +914,7 @@ assertNotNull(q); StandardQueryParser qp2 = new StandardQueryParser(); - qp2.setAnalyzer(new StandardAnalyzer()); + qp2.setAnalyzer(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); q = qp2.parse("the^3", "field"); // "the" is a stop word so the result is an empty query: @@ -1069,7 +1070,7 @@ public void testStopwords() throws Exception { StandardQueryParser qp = new StandardQueryParser(); qp.setAnalyzer( - new StopAnalyzer(new String[] { "the", "foo" })); + new StopAnalyzer(StopFilter.makeStopSet("the", "foo" ), true)); Query result = qp.parse("a:the OR a:foo", "a"); assertNotNull("result is null and it shouldn't be", result); @@ -1090,31 +1091,24 @@ } public void testPositionIncrement() throws Exception { - boolean dflt = StopFilter.getEnablePositionIncrementsDefault(); - StopFilter.setEnablePositionIncrementsDefault(true); - try { - StandardQueryParser qp = new StandardQueryParser(); - qp.setAnalyzer( - new StopAnalyzer(new String[] { "the", "in", "are", "this" })); + StandardQueryParser qp = new StandardQueryParser(); + qp.setAnalyzer( + new StopAnalyzer(StopFilter.makeStopSet("the", "in", "are", "this" ), true)); - qp.setEnablePositionIncrements(true); + qp.setEnablePositionIncrements(true); - String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\""; - // 0 2 5 7 8 - int expectedPositions[] = { 1, 3, 4, 6, 9 }; - PhraseQuery pq = (PhraseQuery) qp.parse(qtxt, "a"); - // System.out.println("Query text: "+qtxt); - // System.out.println("Result: "+pq); - Term t[] = pq.getTerms(); - int pos[] = pq.getPositions(); - for (int i = 0; i < t.length; i++) { - // System.out.println(i+". "+t[i]+" pos: "+pos[i]); - assertEquals("term " + i + " = " + t[i] + " has wrong term-position!", - expectedPositions[i], pos[i]); - } - - } finally { - StopFilter.setEnablePositionIncrementsDefault(dflt); + String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\""; + // 0 2 5 7 8 + int expectedPositions[] = { 1, 3, 4, 6, 9 }; + PhraseQuery pq = (PhraseQuery) qp.parse(qtxt, "a"); + // System.out.println("Query text: "+qtxt); + // System.out.println("Result: "+pq); + Term t[] = pq.getTerms(); + int pos[] = pq.getPositions(); + for (int i = 0; i < t.length; i++) { + // System.out.println(i+". "+t[i]+" pos: "+pos[i]); + assertEquals("term " + i + " = " + t[i] + " has wrong term-position!", + expectedPositions[i], pos[i]); } } Index: contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java =================================================================== --- contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java (revision 826601) +++ contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java (working copy) @@ -28,6 +28,7 @@ import java.util.HashSet; import java.util.List; import java.util.Locale; +import java.util.Collections; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.KeywordAnalyzer; @@ -72,6 +73,7 @@ import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LocalizedTestCase; +import org.apache.lucene.util.Version; /** * This test case is a copy of the core Lucene query parser test, it was adapted @@ -375,7 +377,7 @@ "+(title:dog title:cat) -author:\"bob dole\""); QueryParserWrapper qp = new QueryParserWrapper("field", - new StandardAnalyzer()); + new StandardAnalyzer(Version.LUCENE_CURRENT)); // make sure OR is the default: assertEquals(QueryParserWrapper.OR_OPERATOR, qp.getDefaultOperator()); qp.setDefaultOperator(QueryParserWrapper.AND_OPERATOR); @@ -406,7 +408,7 @@ assertQueryEquals("term 1.0 1 2", null, "term"); assertQueryEquals("term term1 term2", null, "term term term"); - Analyzer a = new StandardAnalyzer(); + Analyzer a = new StandardAnalyzer(Version.LUCENE_CURRENT); assertQueryEquals("3", a, "3"); assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2"); assertQueryEquals("term term1 term2", a, "term term1 term2"); @@ -882,8 +884,7 @@ } public void testBoost() throws Exception { - StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer( - new String[] { "on" }); + StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(Version.LUCENE_CURRENT, Collections.singleton("on")); QueryParserWrapper qp = new QueryParserWrapper("field", oneStopAnalyzer); Query q = qp.parse("on^1.0"); assertNotNull(q); @@ -897,7 +898,7 @@ assertNotNull(q); QueryParserWrapper qp2 = new QueryParserWrapper("field", - new StandardAnalyzer()); + new StandardAnalyzer(Version.LUCENE_CURRENT)); q = qp2.parse("the^3"); // "the" is a stop word so the result is an empty query: assertNotNull(q); @@ -1047,8 +1048,7 @@ } public void testStopwords() throws Exception { - QueryParserWrapper qp = new QueryParserWrapper("a", new StopAnalyzer( - new String[] { "the", "foo" })); + QueryParserWrapper qp = new QueryParserWrapper("a", new StopAnalyzer(StopFilter.makeStopSet("the", "foo"), false)); Query result = qp.parse("a:the OR a:foo"); assertNotNull("result is null and it shouldn't be", result); assertTrue("result is not a BooleanQuery", result instanceof BooleanQuery); @@ -1067,28 +1067,20 @@ } public void testPositionIncrement() throws Exception { - boolean dflt = StopFilter.getEnablePositionIncrementsDefault(); - StopFilter.setEnablePositionIncrementsDefault(true); - try { - QueryParserWrapper qp = new QueryParserWrapper("a", new StopAnalyzer( - new String[] { "the", "in", "are", "this" })); - qp.setEnablePositionIncrements(true); - String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\""; - // 0 2 5 7 8 - int expectedPositions[] = { 1, 3, 4, 6, 9 }; - PhraseQuery pq = (PhraseQuery) qp.parse(qtxt); - // System.out.println("Query text: "+qtxt); - // System.out.println("Result: "+pq); - Term t[] = pq.getTerms(); - int pos[] = pq.getPositions(); - for (int i = 0; i < t.length; i++) { - // System.out.println(i+". "+t[i]+" pos: "+pos[i]); - assertEquals("term " + i + " = " + t[i] + " has wrong term-position!", - expectedPositions[i], pos[i]); - } - - } finally { - StopFilter.setEnablePositionIncrementsDefault(dflt); + QueryParserWrapper qp = new QueryParserWrapper("a", new StopAnalyzer(StopFilter.makeStopSet("the", "in", "are", "this"), true)); + qp.setEnablePositionIncrements(true); + String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\""; + // 0 2 5 7 8 + int expectedPositions[] = { 1, 3, 4, 6, 9 }; + PhraseQuery pq = (PhraseQuery) qp.parse(qtxt); + // System.out.println("Query text: "+qtxt); + // System.out.println("Result: "+pq); + Term t[] = pq.getTerms(); + int pos[] = pq.getPositions(); + for (int i = 0; i < t.length; i++) { + // System.out.println(i+". "+t[i]+" pos: "+pos[i]); + assertEquals("term " + i + " = " + t[i] + " has wrong term-position!", + expectedPositions[i], pos[i]); } } Index: contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java =================================================================== --- contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java (revision 826601) +++ contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java (working copy) @@ -109,14 +109,14 @@ Field.Index.ANALYZED_NO_NORMS)); // creating first index writer - IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(), + IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writerA.addDocument(lDoc); writerA.optimize(); writerA.close(); // creating second index writer - IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(), + IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writerB.addDocument(lDoc2); writerB.optimize(); Index: contrib/snowball/src/java/org/apache/lucene/analysis/snowball/SnowballAnalyzer.java =================================================================== --- contrib/snowball/src/java/org/apache/lucene/analysis/snowball/SnowballAnalyzer.java (revision 826601) +++ contrib/snowball/src/java/org/apache/lucene/analysis/snowball/SnowballAnalyzer.java (working copy) @@ -55,7 +55,7 @@ result = new StandardFilter(result); result = new LowerCaseFilter(result); if (stopSet != null) - result = new StopFilter(result, stopSet); + result = new StopFilter(false, result, stopSet); result = new SnowballFilter(result, name); return result; } @@ -84,7 +84,7 @@ streams.result = new StandardFilter(streams.source); streams.result = new LowerCaseFilter(streams.result); if (stopSet != null) - streams.result = new StopFilter(streams.result, stopSet); + streams.result = new StopFilter(false, streams.result, stopSet); streams.result = new SnowballFilter(streams.result, name); setPreviousTokenStream(streams); } else { Index: contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java =================================================================== --- contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java (revision 826601) +++ contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java (working copy) @@ -42,6 +42,7 @@ import org.apache.lucene.search.Searcher; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.FSDirectory; +import org.apache.lucene.util.Version; /** @@ -77,7 +78,7 @@ String query = args[1]; String field = "contents"; - Query q = expand( query, searcher, new StandardAnalyzer(), field, 0.9f); + Query q = expand( query, searcher, new StandardAnalyzer(Version.LUCENE_CURRENT), field, 0.9f); System.out.println( "Query: " + q.toString( field)); @@ -112,7 +113,7 @@ final Set already = new HashSet(); // avoid dups List top = new LinkedList(); // needs to be separately listed.. final String field = ( f == null) ? "contents" : f; - if ( a == null) a = new StandardAnalyzer(); + if ( a == null) a = new StandardAnalyzer(Version.LUCENE_CURRENT); // [1] Parse query into separate words so that when we expand we can avoid dups TokenStream ts = a.tokenStream( field, new StringReader( query)); Index: contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java =================================================================== --- contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java (revision 826601) +++ contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java (working copy) @@ -36,6 +36,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.store.FSDirectory; +import org.apache.lucene.util.Version; /** * Convert the prolog file wn_s.pl from the WordNet prolog download @@ -88,7 +89,7 @@ /** * */ - private static final Analyzer ana = new StandardAnalyzer(); + private static final Analyzer ana = new StandardAnalyzer(Version.LUCENE_CURRENT); /** * Takes arg of prolog file name and index directory. Index: contrib/xml-query-parser/src/demo/java/org/apache/lucene/xmlparser/webdemo/FormBasedXmlQueryDemo.java =================================================================== --- contrib/xml-query-parser/src/demo/java/org/apache/lucene/xmlparser/webdemo/FormBasedXmlQueryDemo.java (revision 826601) +++ contrib/xml-query-parser/src/demo/java/org/apache/lucene/xmlparser/webdemo/FormBasedXmlQueryDemo.java (working copy) @@ -51,7 +51,7 @@ private QueryTemplateManager queryTemplateManager; private CorePlusExtensionsParser xmlParser; private IndexSearcher searcher; - private Analyzer analyzer=new StandardAnalyzer(); + private Analyzer analyzer=new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); public void init(ServletConfig config) throws ServletException { super.init(config); Index: contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java =================================================================== --- contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (revision 826601) +++ contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (working copy) @@ -39,7 +39,8 @@ CoreParser builder; static Directory dir; - Analyzer analyzer=new StandardAnalyzer(); + // TODO: change to CURRENT and rewrite test (this needs to set QueryParser.enablePositionIncrements, too, for work with CURRENT): + Analyzer analyzer=new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_24); IndexReader reader; private IndexSearcher searcher; Index: contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java =================================================================== --- contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java (revision 826601) +++ contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java (working copy) @@ -42,7 +42,7 @@ public class TestQueryTemplateManager extends TestCase { CoreParser builder; - Analyzer analyzer=new StandardAnalyzer(); + Analyzer analyzer=new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); private IndexSearcher searcher; //A collection of documents' field values for use in our tests Index: src/java/org/apache/lucene/analysis/Analyzer.java =================================================================== --- src/java/org/apache/lucene/analysis/Analyzer.java (revision 826601) +++ src/java/org/apache/lucene/analysis/Analyzer.java (working copy) @@ -84,25 +84,18 @@ } } - protected boolean overridesTokenStreamMethod; + /** @deprecated */ + protected boolean overridesTokenStreamMethod = false; /** @deprecated This is only present to preserve * back-compat of classes that subclass a core analyzer * and override tokenStream but not reusableTokenStream */ - protected void setOverridesTokenStreamMethod(Class baseClass) { - - final Class[] params = new Class[2]; - params[0] = String.class; - params[1] = Reader.class; - + protected void setOverridesTokenStreamMethod(Class baseClass) { try { - Method m = this.getClass().getMethod("tokenStream", params); - if (m != null) { - overridesTokenStreamMethod = m.getDeclaringClass() != baseClass; - } else { - overridesTokenStreamMethod = false; - } + Method m = this.getClass().getMethod("tokenStream", String.class, Reader.class); + overridesTokenStreamMethod = m.getDeclaringClass() != baseClass; } catch (NoSuchMethodException nsme) { + // cannot happen, as baseClass is subclass of Analyzer through generics overridesTokenStreamMethod = false; } } @@ -121,8 +114,7 @@ * @param fieldName Fieldable name being indexed. * @return position increment gap, added to the next token emitted from {@link #tokenStream(String,Reader)} */ - public int getPositionIncrementGap(String fieldName) - { + public int getPositionIncrementGap(String fieldName) { return 0; } Index: src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java =================================================================== --- src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java (revision 826601) +++ src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java (working copy) @@ -36,86 +36,25 @@ * */ public class StandardAnalyzer extends Analyzer { - private Set stopSet; + private Set stopSet; /** * Specifies whether deprecated acronyms should be replaced with HOST type. - * This is false by default to support backward compatibility. - * - * @deprecated this should be removed in the next release (3.0). - * - * See https://issues.apache.org/jira/browse/LUCENE-1068 + * See {@linkplain https://issues.apache.org/jira/browse/LUCENE-1068} */ - private boolean replaceInvalidAcronym = defaultReplaceInvalidAcronym; + private final boolean replaceInvalidAcronym,enableStopPositionIncrements; - private static boolean defaultReplaceInvalidAcronym; - private boolean enableStopPositionIncrements; - - // @deprecated - private boolean useDefaultStopPositionIncrements; - - // Default to true (fixed the bug), unless the system prop is set - static { - final String v = System.getProperty("org.apache.lucene.analysis.standard.StandardAnalyzer.replaceInvalidAcronym"); - if (v == null || v.equals("true")) - defaultReplaceInvalidAcronym = true; - else - defaultReplaceInvalidAcronym = false; - } - - /** - * - * @return true if new instances of StandardTokenizer will - * replace mischaracterized acronyms - * - * See https://issues.apache.org/jira/browse/LUCENE-1068 - * @deprecated This will be removed (hardwired to true) in 3.0 - */ - public static boolean getDefaultReplaceInvalidAcronym() { - return defaultReplaceInvalidAcronym; - } - - /** - * - * @param replaceInvalidAcronym Set to true to have new - * instances of StandardTokenizer replace mischaracterized - * acronyms by default. Set to false to preserve the - * previous (before 2.4) buggy behavior. Alternatively, - * set the system property - * org.apache.lucene.analysis.standard.StandardAnalyzer.replaceInvalidAcronym - * to false. - * - * See https://issues.apache.org/jira/browse/LUCENE-1068 - * @deprecated This will be removed (hardwired to true) in 3.0 - */ - public static void setDefaultReplaceInvalidAcronym(boolean replaceInvalidAcronym) { - defaultReplaceInvalidAcronym = replaceInvalidAcronym; - } - - - /** An array containing some common English words that are usually not - useful for searching. - @deprecated Use {@link #STOP_WORDS_SET} instead */ - public static final String[] STOP_WORDS = StopAnalyzer.ENGLISH_STOP_WORDS; - /** An unmodifiable set containing some common English words that are usually not useful for searching. */ - public static final Set/**/ STOP_WORDS_SET = StopAnalyzer.ENGLISH_STOP_WORDS_SET; + public static final Set STOP_WORDS_SET = StopAnalyzer.ENGLISH_STOP_WORDS_SET; /** Builds an analyzer with the default stop words ({@link * #STOP_WORDS_SET}). - * @deprecated Use {@link #StandardAnalyzer(Version)} instead. */ - public StandardAnalyzer() { - this(Version.LUCENE_24, STOP_WORDS_SET); - } - - /** Builds an analyzer with the default stop words ({@link - * #STOP_WORDS}). * @param matchVersion Lucene version to match See {@link * above} */ @@ -124,141 +63,34 @@ } /** Builds an analyzer with the given stop words. - * @deprecated Use {@link #StandardAnalyzer(Version, Set)} - * instead */ - public StandardAnalyzer(Set stopWords) { - this(Version.LUCENE_24, stopWords); - } - - /** Builds an analyzer with the given stop words. * @param matchVersion Lucene version to match See {@link * above} * @param stopWords stop words */ - public StandardAnalyzer(Version matchVersion, Set stopWords) { + public StandardAnalyzer(Version matchVersion, Set stopWords) { stopSet = stopWords; - init(matchVersion); + setOverridesTokenStreamMethod(StandardAnalyzer.class); + enableStopPositionIncrements = matchVersion.onOrAfter(Version.LUCENE_29); + replaceInvalidAcronym = matchVersion.onOrAfter(Version.LUCENE_24); } - /** Builds an analyzer with the given stop words. - * @deprecated Use {@link #StandardAnalyzer(Version, Set)} instead */ - public StandardAnalyzer(String[] stopWords) { - this(Version.LUCENE_24, StopFilter.makeStopSet(stopWords)); - } - /** Builds an analyzer with the stop words from the given file. * @see WordlistLoader#getWordSet(File) - * @deprecated Use {@link #StandardAnalyzer(Version, File)} - * instead - */ - public StandardAnalyzer(File stopwords) throws IOException { - this(Version.LUCENE_24, stopwords); - } - - /** Builds an analyzer with the stop words from the given file. - * @see WordlistLoader#getWordSet(File) * @param matchVersion Lucene version to match See {@link * above} * @param stopwords File to read stop words from */ public StandardAnalyzer(Version matchVersion, File stopwords) throws IOException { - stopSet = WordlistLoader.getWordSet(stopwords); - init(matchVersion); + this(matchVersion, WordlistLoader.getWordSet(stopwords)); } /** Builds an analyzer with the stop words from the given reader. * @see WordlistLoader#getWordSet(Reader) - * @deprecated Use {@link #StandardAnalyzer(Version, Reader)} - * instead - */ - public StandardAnalyzer(Reader stopwords) throws IOException { - this(Version.LUCENE_24, stopwords); - } - - /** Builds an analyzer with the stop words from the given reader. - * @see WordlistLoader#getWordSet(Reader) * @param matchVersion Lucene version to match See {@link * above} * @param stopwords Reader to read stop words from */ public StandardAnalyzer(Version matchVersion, Reader stopwords) throws IOException { - stopSet = WordlistLoader.getWordSet(stopwords); - init(matchVersion); + this(matchVersion, WordlistLoader.getWordSet(stopwords)); } - /** - * - * @param replaceInvalidAcronym Set to true if this analyzer should replace mischaracterized acronyms in the StandardTokenizer - * - * See https://issues.apache.org/jira/browse/LUCENE-1068 - * - * @deprecated Remove in 3.X and make true the only valid value - */ - public StandardAnalyzer(boolean replaceInvalidAcronym) { - this(Version.LUCENE_24, STOP_WORDS_SET); - this.replaceInvalidAcronym = replaceInvalidAcronym; - useDefaultStopPositionIncrements = true; - } - - /** - * @param stopwords The stopwords to use - * @param replaceInvalidAcronym Set to true if this analyzer should replace mischaracterized acronyms in the StandardTokenizer - * - * See https://issues.apache.org/jira/browse/LUCENE-1068 - * - * @deprecated Remove in 3.X and make true the only valid value - */ - public StandardAnalyzer(Reader stopwords, boolean replaceInvalidAcronym) throws IOException{ - this(Version.LUCENE_24, stopwords); - this.replaceInvalidAcronym = replaceInvalidAcronym; - } - - /** - * @param stopwords The stopwords to use - * @param replaceInvalidAcronym Set to true if this analyzer should replace mischaracterized acronyms in the StandardTokenizer - * - * See https://issues.apache.org/jira/browse/LUCENE-1068 - * - * @deprecated Remove in 3.X and make true the only valid value - */ - public StandardAnalyzer(File stopwords, boolean replaceInvalidAcronym) throws IOException{ - this(Version.LUCENE_24, stopwords); - this.replaceInvalidAcronym = replaceInvalidAcronym; - } - - /** - * - * @param stopwords The stopwords to use - * @param replaceInvalidAcronym Set to true if this analyzer should replace mischaracterized acronyms in the StandardTokenizer - * - * See https://issues.apache.org/jira/browse/LUCENE-1068 - * - * @deprecated Remove in 3.X and make true the only valid value - */ - public StandardAnalyzer(String [] stopwords, boolean replaceInvalidAcronym) throws IOException{ - this(Version.LUCENE_24, StopFilter.makeStopSet(stopwords)); - this.replaceInvalidAcronym = replaceInvalidAcronym; - } - - /** - * @param stopwords The stopwords to use - * @param replaceInvalidAcronym Set to true if this analyzer should replace mischaracterized acronyms in the StandardTokenizer - * - * See https://issues.apache.org/jira/browse/LUCENE-1068 - * - * @deprecated Remove in 3.X and make true the only valid value - */ - public StandardAnalyzer(Set stopwords, boolean replaceInvalidAcronym) throws IOException{ - this(Version.LUCENE_24, stopwords); - this.replaceInvalidAcronym = replaceInvalidAcronym; - } - - private final void init(Version matchVersion) { - setOverridesTokenStreamMethod(StandardAnalyzer.class); - if (matchVersion.onOrAfter(Version.LUCENE_29)) { - enableStopPositionIncrements = true; - } else { - useDefaultStopPositionIncrements = true; - } - } - /** Constructs a {@link StandardTokenizer} filtered by a {@link StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}. */ public TokenStream tokenStream(String fieldName, Reader reader) { @@ -266,11 +98,7 @@ tokenStream.setMaxTokenLength(maxTokenLength); TokenStream result = new StandardFilter(tokenStream); result = new LowerCaseFilter(result); - if (useDefaultStopPositionIncrements) { - result = new StopFilter(result, stopSet); - } else { - result = new StopFilter(enableStopPositionIncrements, result, stopSet); - } + result = new StopFilter(enableStopPositionIncrements, result, stopSet); return result; } @@ -301,7 +129,6 @@ return maxTokenLength; } - /** @deprecated Use {@link #tokenStream} instead */ public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException { if (overridesTokenStreamMethod) { // LUCENE-1678: force fallback to tokenStream() if we @@ -316,11 +143,7 @@ streams.tokenStream = new StandardTokenizer(reader); streams.filteredTokenStream = new StandardFilter(streams.tokenStream); streams.filteredTokenStream = new LowerCaseFilter(streams.filteredTokenStream); - if (useDefaultStopPositionIncrements) { - streams.filteredTokenStream = new StopFilter(streams.filteredTokenStream, stopSet); - } else { - streams.filteredTokenStream = new StopFilter(enableStopPositionIncrements, streams.filteredTokenStream, stopSet); - } + streams.filteredTokenStream = new StopFilter(enableStopPositionIncrements, streams.filteredTokenStream, stopSet); } else { streams.tokenStream.reset(reader); } @@ -330,26 +153,4 @@ return streams.filteredTokenStream; } - - /** - * - * @return true if this Analyzer is replacing mischaracterized acronyms in the StandardTokenizer - * - * See https://issues.apache.org/jira/browse/LUCENE-1068 - * @deprecated This will be removed (hardwired to true) in 3.0 - */ - public boolean isReplaceInvalidAcronym() { - return replaceInvalidAcronym; - } - - /** - * - * @param replaceInvalidAcronym Set to true if this Analyzer is replacing mischaracterized acronyms in the StandardTokenizer - * - * See https://issues.apache.org/jira/browse/LUCENE-1068 - * @deprecated This will be removed (hardwired to true) in 3.0 - */ - public void setReplaceInvalidAcronym(boolean replaceInvalidAcronym) { - this.replaceInvalidAcronym = replaceInvalidAcronym; - } } Index: src/java/org/apache/lucene/analysis/StopAnalyzer.java =================================================================== --- src/java/org/apache/lucene/analysis/StopAnalyzer.java (revision 826601) +++ src/java/org/apache/lucene/analysis/StopAnalyzer.java (working copy) @@ -22,147 +22,72 @@ import java.io.Reader; import java.util.Arrays; import java.util.Set; +import java.util.List; /** Filters {@link LetterTokenizer} with {@link LowerCaseFilter} and {@link StopFilter}. */ public final class StopAnalyzer extends Analyzer { - private final Set/**/ stopWords; - // @deprecated - private final boolean useDefaultStopPositionIncrement; + private final Set stopWords; private final boolean enablePositionIncrements; - - /** An array containing some common English words that are not usually useful - for searching. - @deprecated Use {@link #ENGLISH_STOP_WORDS_SET} instead */ - public static final String[] ENGLISH_STOP_WORDS = { - "a", "an", "and", "are", "as", "at", "be", "but", "by", - "for", "if", "in", "into", "is", "it", - "no", "not", "of", "on", "or", "such", - "that", "the", "their", "then", "there", "these", - "they", "this", "to", "was", "will", "with" - }; /** An unmodifiable set containing some common English words that are not usually useful for searching.*/ - public static final Set/**/ ENGLISH_STOP_WORDS_SET; + public static final Set ENGLISH_STOP_WORDS_SET; static { - final String[] stopWords = new String[]{ - "a", "an", "and", "are", "as", "at", "be", "but", "by", - "for", "if", "in", "into", "is", "it", - "no", "not", "of", "on", "or", "such", - "that", "the", "their", "then", "there", "these", - "they", "this", "to", "was", "will", "with" - }; - final CharArraySet stopSet = new CharArraySet(stopWords.length, false); - stopSet.addAll(Arrays.asList(stopWords)); - ENGLISH_STOP_WORDS_SET = CharArraySet.unmodifiableSet(stopSet); + final List stopWords = Arrays.asList( + "a", "an", "and", "are", "as", "at", "be", "but", "by", + "for", "if", "in", "into", "is", "it", + "no", "not", "of", "on", "or", "such", + "that", "the", "their", "then", "there", "these", + "they", "this", "to", "was", "will", "with" + ); + final CharArraySet stopSet = new CharArraySet(stopWords.size(), false); + stopSet.addAll(stopWords); + ENGLISH_STOP_WORDS_SET = CharArraySet.unmodifiableSet(stopSet); } /** Builds an analyzer which removes words in - * ENGLISH_STOP_WORDS. - * @deprecated Use {@link #StopAnalyzer(boolean)} instead */ - public StopAnalyzer() { - stopWords = ENGLISH_STOP_WORDS_SET; - useDefaultStopPositionIncrement = true; - enablePositionIncrements = false; - } - - /** Builds an analyzer which removes words in - * ENGLISH_STOP_WORDS. + * {@link #ENGLISH_STOP_WORDS}. * @param enablePositionIncrements See {@link * StopFilter#setEnablePositionIncrements} */ public StopAnalyzer(boolean enablePositionIncrements) { stopWords = ENGLISH_STOP_WORDS_SET; this.enablePositionIncrements = enablePositionIncrements; - useDefaultStopPositionIncrement = false; } /** Builds an analyzer with the stop words from the given set. - * @deprecated Use {@link #StopAnalyzer(Set, boolean)} instead */ - public StopAnalyzer(Set stopWords) { - this.stopWords = stopWords; - useDefaultStopPositionIncrement = true; - enablePositionIncrements = false; - } - - /** Builds an analyzer with the stop words from the given set. * @param stopWords Set of stop words * @param enablePositionIncrements See {@link * StopFilter#setEnablePositionIncrements} */ - public StopAnalyzer(Set stopWords, boolean enablePositionIncrements) { + public StopAnalyzer(Set stopWords, boolean enablePositionIncrements) { this.stopWords = stopWords; this.enablePositionIncrements = enablePositionIncrements; - useDefaultStopPositionIncrement = false; } - /** Builds an analyzer which removes words in the provided array. - * @deprecated Use {@link #StopAnalyzer(Set, boolean)} instead */ - public StopAnalyzer(String[] stopWords) { - this.stopWords = StopFilter.makeStopSet(stopWords); - useDefaultStopPositionIncrement = true; - enablePositionIncrements = false; - } - - /** Builds an analyzer which removes words in the provided array. - * @param stopWords Array of stop words - * @param enablePositionIncrements See {@link - * StopFilter#setEnablePositionIncrements} - * @deprecated Use {@link #StopAnalyzer(Set, boolean)} instead*/ - public StopAnalyzer(String[] stopWords, boolean enablePositionIncrements) { - this.stopWords = StopFilter.makeStopSet(stopWords); - this.enablePositionIncrements = enablePositionIncrements; - useDefaultStopPositionIncrement = false; - } - /** Builds an analyzer with the stop words from the given file. * @see WordlistLoader#getWordSet(File) - * @deprecated Use {@link #StopAnalyzer(File, boolean)} instead */ - public StopAnalyzer(File stopwordsFile) throws IOException { - stopWords = WordlistLoader.getWordSet(stopwordsFile); - useDefaultStopPositionIncrement = true; - enablePositionIncrements = false; - } - - /** Builds an analyzer with the stop words from the given file. - * @see WordlistLoader#getWordSet(File) * @param stopwordsFile File to load stop words from * @param enablePositionIncrements See {@link * StopFilter#setEnablePositionIncrements} */ public StopAnalyzer(File stopwordsFile, boolean enablePositionIncrements) throws IOException { stopWords = WordlistLoader.getWordSet(stopwordsFile); this.enablePositionIncrements = enablePositionIncrements; - useDefaultStopPositionIncrement = false; } /** Builds an analyzer with the stop words from the given reader. * @see WordlistLoader#getWordSet(Reader) - * @deprecated Use {@link #StopAnalyzer(Reader, boolean)} instead - */ - public StopAnalyzer(Reader stopwords) throws IOException { - stopWords = WordlistLoader.getWordSet(stopwords); - useDefaultStopPositionIncrement = true; - enablePositionIncrements = false; - } - - /** Builds an analyzer with the stop words from the given reader. - * @see WordlistLoader#getWordSet(Reader) * @param stopwords Reader to load stop words from * @param enablePositionIncrements See {@link * StopFilter#setEnablePositionIncrements} */ public StopAnalyzer(Reader stopwords, boolean enablePositionIncrements) throws IOException { stopWords = WordlistLoader.getWordSet(stopwords); this.enablePositionIncrements = enablePositionIncrements; - useDefaultStopPositionIncrement = false; } /** Filters LowerCaseTokenizer with StopFilter. */ public TokenStream tokenStream(String fieldName, Reader reader) { - if (useDefaultStopPositionIncrement) { - return new StopFilter(new LowerCaseTokenizer(reader), stopWords); - } else { - return new StopFilter(enablePositionIncrements, new LowerCaseTokenizer(reader), stopWords); - } + return new StopFilter(enablePositionIncrements, new LowerCaseTokenizer(reader), stopWords); } /** Filters LowerCaseTokenizer with StopFilter. */ @@ -175,11 +100,7 @@ if (streams == null) { streams = new SavedStreams(); streams.source = new LowerCaseTokenizer(reader); - if (useDefaultStopPositionIncrement) { - streams.result = new StopFilter(streams.source, stopWords); - } else { - streams.result = new StopFilter(enablePositionIncrements, streams.source, stopWords); - } + streams.result = new StopFilter(enablePositionIncrements, streams.source, stopWords); setPreviousTokenStream(streams); } else streams.source.reset(reader); Index: src/java/org/apache/lucene/analysis/StopFilter.java =================================================================== --- src/java/org/apache/lucene/analysis/StopFilter.java (revision 826601) +++ src/java/org/apache/lucene/analysis/StopFilter.java (working copy) @@ -32,64 +32,14 @@ public final class StopFilter extends TokenFilter { - // deprecated - private static boolean ENABLE_POSITION_INCREMENTS_DEFAULT = false; - private final CharArraySet stopWords; - private boolean enablePositionIncrements = ENABLE_POSITION_INCREMENTS_DEFAULT; + private boolean enablePositionIncrements = false; private TermAttribute termAtt; private PositionIncrementAttribute posIncrAtt; - - /** - * Construct a token stream filtering the given input. - * @deprecated Use {@link #StopFilter(boolean, TokenStream, String[])} instead - */ - public StopFilter(TokenStream input, String [] stopWords) - { - this(ENABLE_POSITION_INCREMENTS_DEFAULT, input, stopWords, false); - } /** * Construct a token stream filtering the given input. - * @param enablePositionIncrements true if token positions should record the removed stop words - * @param input input TokenStream - * @param stopWords array of stop words - * @deprecated Use {@link #StopFilter(boolean, TokenStream, Set)} instead. - */ - public StopFilter(boolean enablePositionIncrements, TokenStream input, String [] stopWords) - { - this(enablePositionIncrements, input, stopWords, false); - } - - /** - * Constructs a filter which removes words from the input - * TokenStream that are named in the array of words. - * @deprecated Use {@link #StopFilter(boolean, TokenStream, String[], boolean)} instead - */ - public StopFilter(TokenStream in, String[] stopWords, boolean ignoreCase) { - this(ENABLE_POSITION_INCREMENTS_DEFAULT, in, stopWords, ignoreCase); - } - - /** - * Constructs a filter which removes words from the input - * TokenStream that are named in the array of words. - * @param enablePositionIncrements true if token positions should record the removed stop words - * @param in input TokenStream - * @param stopWords array of stop words - * @param ignoreCase true if case is ignored - * @deprecated Use {@link #StopFilter(boolean, TokenStream, Set, boolean)} instead. - */ - public StopFilter(boolean enablePositionIncrements, TokenStream in, String[] stopWords, boolean ignoreCase) { - super(in); - this.stopWords = (CharArraySet)makeStopSet(stopWords, ignoreCase); - this.enablePositionIncrements = enablePositionIncrements; - init(); - } - - - /** - * Construct a token stream filtering the given input. * If stopWords is an instance of {@link CharArraySet} (true if * makeStopSet() was used to construct the set) it will be directly used * and ignoreCase will be ignored since CharArraySet @@ -99,33 +49,13 @@ * a new CharArraySet will be constructed and ignoreCase will be * used to specify the case sensitivity of that set. * - * @param input - * @param stopWords The set of Stop Words. - * @param ignoreCase -Ignore case when stopping. - * @deprecated Use {@link #StopFilter(boolean, TokenStream, Set, boolean)} instead - */ - public StopFilter(TokenStream input, Set stopWords, boolean ignoreCase) - { - this(ENABLE_POSITION_INCREMENTS_DEFAULT, input, stopWords, ignoreCase); - } - - /** - * Construct a token stream filtering the given input. - * If stopWords is an instance of {@link CharArraySet} (true if - * makeStopSet() was used to construct the set) it will be directly used - * and ignoreCase will be ignored since CharArraySet - * directly controls case sensitivity. - *

- * If stopWords is not an instance of {@link CharArraySet}, - * a new CharArraySet will be constructed and ignoreCase will be - * used to specify the case sensitivity of that set. - * * @param enablePositionIncrements true if token positions should record the removed stop words * @param input Input TokenStream - * @param stopWords The set of Stop Words. + * @param stopWords A Set of Strings or char[] or any other toString()-able set representing the stopwords + * @param ignoreCase if true, all words are lower cased first * @param ignoreCase -Ignore case when stopping. */ - public StopFilter(boolean enablePositionIncrements, TokenStream input, Set stopWords, boolean ignoreCase) + public StopFilter(boolean enablePositionIncrements, TokenStream input, Set stopWords, boolean ignoreCase) { super(input); if (stopWords instanceof CharArraySet) { @@ -135,37 +65,22 @@ this.stopWords.addAll(stopWords); } this.enablePositionIncrements = enablePositionIncrements; - init(); + termAtt = addAttribute(TermAttribute.class); + posIncrAtt = addAttribute(PositionIncrementAttribute.class); } /** * Constructs a filter which removes words from the input * TokenStream that are named in the Set. * - * @see #makeStopSet(java.lang.String[]) - * @deprecated Use {@link #StopFilter(boolean, TokenStream, Set)} instead - */ - public StopFilter(TokenStream in, Set stopWords) { - this(ENABLE_POSITION_INCREMENTS_DEFAULT, in, stopWords, false); - } - - /** - * Constructs a filter which removes words from the input - * TokenStream that are named in the Set. - * * @param enablePositionIncrements true if token positions should record the removed stop words * @param in Input stream - * @param stopWords The set of Stop Words. + * @param stopWords A Set of Strings or char[] or any other toString()-able set representing the stopwords * @see #makeStopSet(java.lang.String[]) */ - public StopFilter(boolean enablePositionIncrements, TokenStream in, Set stopWords) { + public StopFilter(boolean enablePositionIncrements, TokenStream in, Set stopWords) { this(enablePositionIncrements, in, stopWords, false); } - - public void init() { - termAtt = addAttribute(TermAttribute.class); - posIncrAtt = addAttribute(PositionIncrementAttribute.class); - } /** * Builds a Set from an array of stop words, @@ -175,7 +90,7 @@ * * @see #makeStopSet(java.lang.String[], boolean) passing false to ignoreCase */ - public static final Set makeStopSet(String[] stopWords) { + public static final Set makeStopSet(String... stopWords) { return makeStopSet(stopWords, false); } @@ -184,10 +99,11 @@ * appropriate for passing into the StopFilter constructor. * This permits this stopWords construction to be cached once when * an Analyzer is constructed. - * + * @param stopWords A List of Strings or char[] or any other toString()-able list representing the stopwords + * @return A Set ({@link CharArraySet}) containing the words * @see #makeStopSet(java.lang.String[], boolean) passing false to ignoreCase */ - public static final Set makeStopSet(List/**/ stopWords) { + public static final Set makeStopSet(List stopWords) { return makeStopSet(stopWords, false); } @@ -197,7 +113,7 @@ * @param ignoreCase If true, all words are lower cased first. * @return a Set containing the words */ - public static final Set makeStopSet(String[] stopWords, boolean ignoreCase) { + public static final Set makeStopSet(String[] stopWords, boolean ignoreCase) { CharArraySet stopSet = new CharArraySet(stopWords.length, ignoreCase); stopSet.addAll(Arrays.asList(stopWords)); return stopSet; @@ -205,11 +121,11 @@ /** * - * @param stopWords A List of Strings representing the stopwords + * @param stopWords A List of Strings or char[] or any other toString()-able list representing the stopwords * @param ignoreCase if true, all words are lower cased first - * @return A Set containing the words + * @return A Set ({@link CharArraySet}) containing the words */ - public static final Set makeStopSet(List/**/ stopWords, boolean ignoreCase){ + public static final Set makeStopSet(List stopWords, boolean ignoreCase){ CharArraySet stopSet = new CharArraySet(stopWords.size(), ignoreCase); stopSet.addAll(stopWords); return stopSet; @@ -230,35 +146,11 @@ } skippedPositions += posIncrAtt.getPositionIncrement(); } - // reached EOS -- return null + // reached EOS -- return false return false; } /** - * @see #setEnablePositionIncrementsDefault(boolean). - * @deprecated Please specify this when you create the StopFilter - */ - public static boolean getEnablePositionIncrementsDefault() { - return ENABLE_POSITION_INCREMENTS_DEFAULT; - } - - /** - * Set the default position increments behavior of every StopFilter created from now on. - *

- * Note: behavior of a single StopFilter instance can be modified - * with {@link #setEnablePositionIncrements(boolean)}. - * This static method allows control over behavior of classes using StopFilters internally, - * for example {@link org.apache.lucene.analysis.standard.StandardAnalyzer StandardAnalyzer}. - *

- * Default : false. - * @see #setEnablePositionIncrements(boolean). - * @deprecated Please specify this when you create the StopFilter - */ - public static void setEnablePositionIncrementsDefault(boolean defaultValue) { - ENABLE_POSITION_INCREMENTS_DEFAULT = defaultValue; - } - - /** * @see #setEnablePositionIncrements(boolean). */ public boolean getEnablePositionIncrements() { Index: src/java/org/apache/lucene/util/Version.java =================================================================== --- src/java/org/apache/lucene/util/Version.java (revision 826601) +++ src/java/org/apache/lucene/util/Version.java (working copy) @@ -37,6 +37,18 @@ */ public static final Version LUCENE_CURRENT = new Version("LUCENE_CURRENT", 0); + /** Match settings and bugs in Lucene's 2.0 release. */ + public static final Version LUCENE_20 = new Version("LUCENE_20", 2000); + + /** Match settings and bugs in Lucene's 2.1 release. */ + public static final Version LUCENE_21 = new Version("LUCENE_21", 2100); + + /** Match settings and bugs in Lucene's 2.2 release. */ + public static final Version LUCENE_22 = new Version("LUCENE_22", 2200); + + /** Match settings and bugs in Lucene's 2.3 release. */ + public static final Version LUCENE_23 = new Version("LUCENE_23", 2300); + /** Match settings and bugs in Lucene's 2.4 release. */ public static final Version LUCENE_24 = new Version("LUCENE_24", 2400); Index: src/test/org/apache/lucene/analysis/TestAnalyzers.java =================================================================== --- src/test/org/apache/lucene/analysis/TestAnalyzers.java (revision 826601) +++ src/test/org/apache/lucene/analysis/TestAnalyzers.java (working copy) @@ -74,7 +74,7 @@ } public void testStop() throws Exception { - Analyzer a = new StopAnalyzer(); + Analyzer a = new StopAnalyzer(true); assertAnalyzesTo(a, "foo bar FOO BAR", new String[] { "foo", "bar", "foo", "bar" }); assertAnalyzesTo(a, "foo a bar such FOO THESE BAR", @@ -120,6 +120,10 @@ } private static class MyStandardAnalyzer extends StandardAnalyzer { + public MyStandardAnalyzer() { + super(org.apache.lucene.util.Version.LUCENE_CURRENT); + } + public TokenStream tokenStream(String field, Reader reader) { return new WhitespaceAnalyzer().tokenStream(field, reader); } Index: src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java =================================================================== --- src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java (revision 826601) +++ src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java (working copy) @@ -26,16 +26,16 @@ public class TestStandardAnalyzer extends BaseTokenStreamTestCase { - private Analyzer a = new StandardAnalyzer(); + private Analyzer a = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); public void testMaxTermLength() throws Exception { - StandardAnalyzer sa = new StandardAnalyzer(); + StandardAnalyzer sa = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); sa.setMaxTokenLength(5); assertAnalyzesTo(sa, "ab cd toolong xy z", new String[]{"ab", "cd", "xy", "z"}); } public void testMaxTermLength2() throws Exception { - StandardAnalyzer sa = new StandardAnalyzer(); + StandardAnalyzer sa = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); assertAnalyzesTo(sa, "ab cd toolong xy z", new String[]{"ab", "cd", "toolong", "xy", "z"}); sa.setMaxTokenLength(5); @@ -99,23 +99,23 @@ public void testLucene1140() throws Exception { try { - StandardAnalyzer analyzer = new StandardAnalyzer(true); + StandardAnalyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); assertAnalyzesTo(analyzer, "www.nutch.org.", new String[]{ "www.nutch.org" }, new String[] { "" }); } catch (NullPointerException e) { - assertTrue("Should not throw an NPE and it did", false); + fail("Should not throw an NPE and it did"); } } public void testDomainNames() throws Exception { // Don't reuse a because we alter its state (setReplaceInvalidAcronym) - StandardAnalyzer a2 = new StandardAnalyzer(); + StandardAnalyzer a2 = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); // domain names assertAnalyzesTo(a2, "www.nutch.org", new String[]{"www.nutch.org"}); //Notice the trailing . See https://issues.apache.org/jira/browse/LUCENE-1068. // the following should be recognized as HOST: assertAnalyzesTo(a2, "www.nutch.org.", new String[]{ "www.nutch.org" }, new String[] { "" }); - a2.setReplaceInvalidAcronym(false); + a2 = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_23); assertAnalyzesTo(a2, "www.nutch.org.", new String[]{ "wwwnutchorg" }, new String[] { "" }); } @@ -220,11 +220,4 @@ "", "", "", "", "", "", ""}); } - - /** @deprecated this should be removed in the 3.0. */ - public void testDeprecatedAcronyms() throws Exception { - // test backward compatibility for applications that require the old behavior. - // this should be removed once replaceDepAcronym is removed. - assertAnalyzesTo(a, "lucene.apache.org.", new String[]{ "lucene.apache.org" }, new String[] { "" }); - } } Index: src/test/org/apache/lucene/analysis/TestStopAnalyzer.java =================================================================== --- src/test/org/apache/lucene/analysis/TestStopAnalyzer.java (revision 826601) +++ src/test/org/apache/lucene/analysis/TestStopAnalyzer.java (working copy) @@ -61,7 +61,7 @@ stopWordsSet.add("good"); stopWordsSet.add("test"); stopWordsSet.add("analyzer"); - StopAnalyzer newStop = new StopAnalyzer((String[])stopWordsSet.toArray(new String[3])); + StopAnalyzer newStop = new StopAnalyzer(stopWordsSet, false); StringReader reader = new StringReader("This is a good test of the english stop analyzer"); TokenStream stream = newStop.tokenStream("test", reader); assertNotNull(stream); @@ -76,29 +76,23 @@ } public void testStopListPositions() throws IOException { - boolean defaultEnable = StopFilter.getEnablePositionIncrementsDefault(); - StopFilter.setEnablePositionIncrementsDefault(true); - try { - Set stopWordsSet = new HashSet(); - stopWordsSet.add("good"); - stopWordsSet.add("test"); - stopWordsSet.add("analyzer"); - StopAnalyzer newStop = new StopAnalyzer((String[])stopWordsSet.toArray(new String[3])); - StringReader reader = new StringReader("This is a good test of the english stop analyzer with positions"); - int expectedIncr[] = { 1, 1, 1, 3, 1, 1, 1, 2, 1}; - TokenStream stream = newStop.tokenStream("test", reader); - assertNotNull(stream); - int i = 0; - TermAttribute termAtt = stream.getAttribute(TermAttribute.class); - PositionIncrementAttribute posIncrAtt = stream.addAttribute(PositionIncrementAttribute.class); + Set stopWordsSet = new HashSet(); + stopWordsSet.add("good"); + stopWordsSet.add("test"); + stopWordsSet.add("analyzer"); + StopAnalyzer newStop = new StopAnalyzer(stopWordsSet, true); + StringReader reader = new StringReader("This is a good test of the english stop analyzer with positions"); + int expectedIncr[] = { 1, 1, 1, 3, 1, 1, 1, 2, 1}; + TokenStream stream = newStop.tokenStream("test", reader); + assertNotNull(stream); + int i = 0; + TermAttribute termAtt = stream.getAttribute(TermAttribute.class); + PositionIncrementAttribute posIncrAtt = stream.addAttribute(PositionIncrementAttribute.class); - while (stream.incrementToken()) { - String text = termAtt.term(); - assertFalse(stopWordsSet.contains(text)); - assertEquals(expectedIncr[i++],posIncrAtt.getPositionIncrement()); - } - } finally { - StopFilter.setEnablePositionIncrementsDefault(defaultEnable); + while (stream.incrementToken()) { + String text = termAtt.term(); + assertFalse(stopWordsSet.contains(text)); + assertEquals(expectedIncr[i++],posIncrAtt.getPositionIncrement()); } } Index: src/test/org/apache/lucene/analysis/TestStopFilter.java =================================================================== --- src/test/org/apache/lucene/analysis/TestStopFilter.java (revision 826601) +++ src/test/org/apache/lucene/analysis/TestStopFilter.java (working copy) @@ -23,7 +23,9 @@ import java.io.IOException; import java.io.StringReader; import java.util.ArrayList; +import java.util.Arrays; import java.util.Set; +import java.util.HashSet; public class TestStopFilter extends BaseTokenStreamTestCase { @@ -34,8 +36,8 @@ public void testExactCase() throws IOException { StringReader reader = new StringReader("Now is The Time"); - String[] stopWords = new String[] { "is", "the", "Time" }; - TokenStream stream = new StopFilter(false, new WhitespaceTokenizer(reader), stopWords); + Set stopWords = new HashSet(Arrays.asList("is", "the", "Time")); + TokenStream stream = new StopFilter(false, new WhitespaceTokenizer(reader), stopWords, false); final TermAttribute termAtt = stream.getAttribute(TermAttribute.class); assertTrue(stream.incrementToken()); assertEquals("Now", termAtt.term()); @@ -46,7 +48,7 @@ public void testIgnoreCase() throws IOException { StringReader reader = new StringReader("Now is The Time"); - String[] stopWords = new String[] { "is", "the", "Time" }; + Set stopWords = new HashSet(Arrays.asList( "is", "the", "Time" )); TokenStream stream = new StopFilter(false, new WhitespaceTokenizer(reader), stopWords, true); final TermAttribute termAtt = stream.getAttribute(TermAttribute.class); assertTrue(stream.incrementToken()); Index: src/test/org/apache/lucene/document/TestBinaryDocument.java =================================================================== --- src/test/org/apache/lucene/document/TestBinaryDocument.java (revision 826601) +++ src/test/org/apache/lucene/document/TestBinaryDocument.java (working copy) @@ -59,7 +59,7 @@ /** add the doc to a ram index */ MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.addDocument(doc); writer.close(); @@ -97,7 +97,7 @@ /** add the doc to a ram index */ MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.addDocument(doc); writer.close(); Index: src/test/org/apache/lucene/document/TestDocument.java =================================================================== --- src/test/org/apache/lucene/document/TestDocument.java (revision 826601) +++ src/test/org/apache/lucene/document/TestDocument.java (working copy) @@ -154,7 +154,7 @@ public void testGetValuesForIndexedDocument() throws Exception { RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.addDocument(makeDocumentWithFields()); writer.close(); @@ -225,7 +225,7 @@ doc.add(new Field("keyword", "test", Field.Store.YES, Field.Index.NOT_ANALYZED)); RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.addDocument(doc); field.setValue("id2"); writer.addDocument(doc); Index: src/test/org/apache/lucene/index/TestDirectoryReader.java =================================================================== --- src/test/org/apache/lucene/index/TestDirectoryReader.java (revision 826601) +++ src/test/org/apache/lucene/index/TestDirectoryReader.java (working copy) @@ -193,7 +193,7 @@ } private void addDoc(RAMDirectory ramDir1, String s, boolean create) throws IOException { - IndexWriter iw = new IndexWriter(ramDir1, new StandardAnalyzer(), create, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw = new IndexWriter(ramDir1, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), create, IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); doc.add(new Field("body", s, Field.Store.YES, Field.Index.ANALYZED)); iw.addDocument(doc); Index: src/test/org/apache/lucene/index/TestDocumentWriter.java =================================================================== --- src/test/org/apache/lucene/index/TestDocumentWriter.java (revision 826601) +++ src/test/org/apache/lucene/index/TestDocumentWriter.java (working copy) @@ -259,7 +259,7 @@ doc.add(new Field("f2", "v1", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); doc.add(new Field("f2", "v2", Store.YES, Index.NOT_ANALYZED, TermVector.NO)); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.addDocument(doc); writer.close(); @@ -292,7 +292,7 @@ doc.add(f); doc.add(new Field("f2", "v2", Store.YES, Index.NO)); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.addDocument(doc); writer.optimize(); // be sure to have a single segment writer.close(); Index: src/test/org/apache/lucene/index/TestIndexReader.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexReader.java (revision 826601) +++ src/test/org/apache/lucene/index/TestIndexReader.java (working copy) @@ -76,7 +76,7 @@ commitUserData.put("foo", "fighters"); // set up writer - IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); for(int i=0;i<27;i++) addDocumentWithFields(writer); @@ -98,7 +98,7 @@ assertTrue(c.equals(r.getIndexCommit())); // Change the index - writer = new IndexWriter(d, new StandardAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); for(int i=0;i<7;i++) addDocumentWithFields(writer); @@ -109,7 +109,7 @@ assertFalse(r2.getIndexCommit().isOptimized()); r3.close(); - writer = new IndexWriter(d, new StandardAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); writer.optimize(); writer.close(); @@ -123,19 +123,19 @@ public void testIsCurrent() throws Exception { RAMDirectory d = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); addDocumentWithFields(writer); writer.close(); // set up reader: IndexReader reader = IndexReader.open(d, false); assertTrue(reader.isCurrent()); // modify index by adding another document: - writer = new IndexWriter(d, new StandardAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); addDocumentWithFields(writer); writer.close(); assertFalse(reader.isCurrent()); // re-create index: - writer = new IndexWriter(d, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); addDocumentWithFields(writer); writer.close(); assertFalse(reader.isCurrent()); @@ -151,7 +151,7 @@ { RAMDirectory d = new MockRAMDirectory(); // set up writer - IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); addDocumentWithFields(writer); writer.close(); // set up reader @@ -163,7 +163,7 @@ assertTrue(fieldNames.contains("unstored")); reader.close(); // add more documents - writer = new IndexWriter(d, new StandardAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); // want to get some more segments here for (int i = 0; i < 5*writer.getMergeFactor(); i++) { @@ -243,7 +243,7 @@ public void testTermVectors() throws Exception { RAMDirectory d = new MockRAMDirectory(); // set up writer - IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); // want to get some more segments here // new termvector fields for (int i = 0; i < 5 * writer.getMergeFactor(); i++) { @@ -1418,7 +1418,7 @@ RAMDirectory d = new MockRAMDirectory(); // set up writer - IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); for(int i=0;i<27;i++) addDocumentWithFields(writer); @@ -1434,7 +1434,7 @@ assertTrue(c.equals(r.getIndexCommit())); // Change the index - writer = new IndexWriter(d, new StandardAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); for(int i=0;i<7;i++) addDocumentWithFields(writer); @@ -1445,7 +1445,7 @@ assertFalse(r2.getIndexCommit().isOptimized()); r2.close(); - writer = new IndexWriter(d, new StandardAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); writer.optimize(); writer.close(); @@ -1459,7 +1459,7 @@ public void testReadOnly() throws Throwable { RAMDirectory d = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); addDocumentWithFields(writer); writer.commit(); addDocumentWithFields(writer); @@ -1473,7 +1473,7 @@ // expected } - writer = new IndexWriter(d, new StandardAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); addDocumentWithFields(writer); writer.close(); @@ -1490,7 +1490,7 @@ // expected } - writer = new IndexWriter(d, new StandardAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); writer.optimize(); writer.close(); @@ -1508,7 +1508,7 @@ } // Make sure write lock isn't held - writer = new IndexWriter(d, new StandardAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(d, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); writer.close(); r3.close(); @@ -1518,7 +1518,7 @@ // LUCENE-1474 public void testIndexReader() throws Exception { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); writer.addDocument(createDocument("a")); writer.addDocument(createDocument("b")); @@ -1536,7 +1536,7 @@ public void testIndexReaderUnDeleteAll() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); dir.setPreventDoubleWrite(false); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); writer.addDocument(createDocument("a")); writer.addDocument(createDocument("b")); @@ -1578,7 +1578,7 @@ Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); Index: src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (revision 826601) +++ src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (working copy) @@ -69,7 +69,7 @@ protected void setUp() throws Exception { super.setUp(); similarityOne = new SimilarityOne(); - anlzr = new StandardAnalyzer(); + anlzr = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); } /** Index: src/test/org/apache/lucene/index/TestIndexReaderReopen.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexReaderReopen.java (revision 826601) +++ src/test/org/apache/lucene/index/TestIndexReaderReopen.java (working copy) @@ -687,7 +687,7 @@ final Directory dir = new MockRAMDirectory(); final int n = 150; - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); for (int i = 0; i < n; i++) { writer.addDocument(createDocument(i, 3)); } @@ -705,7 +705,7 @@ modifier.deleteDocument(i % modifier.maxDoc()); modifier.close(); } else { - IndexWriter modifier = new IndexWriter(dir, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter modifier = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); modifier.addDocument(createDocument(n + i, 6)); modifier.close(); } Index: src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexWriter.java (revision 826601) +++ src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -543,7 +543,7 @@ */ public void testWickedLongTerm() throws IOException { RAMDirectory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); char[] chars = new char[DocumentsWriter.CHAR_BLOCK_SIZE-1]; Arrays.fill(chars, 'x'); @@ -587,7 +587,7 @@ // maximum length term, and search on that term: doc = new Document(); doc.add(new Field("content", bigTerm, Field.Store.NO, Field.Index.ANALYZED)); - StandardAnalyzer sa = new StandardAnalyzer(); + StandardAnalyzer sa = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); sa.setMaxTokenLength(100000); writer = new IndexWriter(dir, sa, IndexWriter.MaxFieldLength.LIMITED); writer.addDocument(doc); @@ -1573,7 +1573,7 @@ */ public void testBadSegment() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter ir = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter ir = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); Document document = new Document(); document.add(new Field("tvtest", "", Field.Store.NO, Field.Index.ANALYZED, @@ -1586,7 +1586,7 @@ // LUCENE-1008 public void testNoTermVectorAfterTermVector() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); Document document = new Document(); document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); @@ -1612,7 +1612,7 @@ // LUCENE-1010 public void testNoTermVectorAfterTermVectorMerge() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); Document document = new Document(); document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); @@ -1644,7 +1644,7 @@ int pri = Thread.currentThread().getPriority(); try { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); Document document = new Document(); document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); @@ -1682,7 +1682,7 @@ // LUCENE-1013 public void testSetMaxMergeDocs() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); iw.setMergeScheduler(new MyMergeScheduler()); iw.setMaxMergeDocs(20); iw.setMaxBufferedDocs(2); @@ -2730,7 +2730,7 @@ Directory dir = new MockRAMDirectory(); for(int iter=0;iter<2;iter++) { IndexWriter writer = new IndexWriter(dir, - new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); writer.setMergeScheduler(new SerialMergeScheduler()); @@ -2763,7 +2763,7 @@ reader.close(); writer = new IndexWriter(dir, - new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); writer.setMergeScheduler(new SerialMergeScheduler()); @@ -2782,7 +2782,7 @@ Directory dir = new MockRAMDirectory(); for(int iter=0;iter<2;iter++) { IndexWriter writer = new IndexWriter(dir, - new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); writer.setMergeScheduler(new SerialMergeScheduler()); @@ -2819,7 +2819,7 @@ public void testTermVectorCorruption3() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, - new StandardAnalyzer(), + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); @@ -2841,7 +2841,7 @@ writer.close(); writer = new IndexWriter(dir, - new StandardAnalyzer(), + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); @@ -2889,7 +2889,7 @@ public void testExpungeDeletes() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, - new StandardAnalyzer(), + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); @@ -2917,7 +2917,7 @@ ir.close(); writer = new IndexWriter(dir, - new StandardAnalyzer(), + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); assertEquals(8, writer.numDocs()); assertEquals(10, writer.maxDoc()); @@ -2935,7 +2935,7 @@ public void testExpungeDeletes2() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, - new StandardAnalyzer(), + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); writer.setMergeFactor(50); @@ -2964,7 +2964,7 @@ ir.close(); writer = new IndexWriter(dir, - new StandardAnalyzer(), + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setMergeFactor(3); assertEquals(49, writer.numDocs()); @@ -2982,7 +2982,7 @@ public void testExpungeDeletes3() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, - new StandardAnalyzer(), + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); writer.setMergeFactor(50); @@ -3011,7 +3011,7 @@ ir.close(); writer = new IndexWriter(dir, - new StandardAnalyzer(), + new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); // Force many merges to happen writer.setMergeFactor(3); @@ -4014,7 +4014,7 @@ final List thrown = new ArrayList(); - final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED) { + final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED) { public void message(final String message) { if (message.startsWith("now flush at close") && 0 == thrown.size()) { thrown.add(null); @@ -4167,7 +4167,7 @@ // LUCENE-1448 public void testEndOffsetPositionStopFilter() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new StopAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new StopAnalyzer(true), IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); Field f = new Field("field", "abcd the", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(f); @@ -4189,7 +4189,7 @@ // LUCENE-1448 public void testEndOffsetPositionStandard() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); Field f = new Field("field", "abcd the ", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); @@ -4219,7 +4219,7 @@ // LUCENE-1448 public void testEndOffsetPositionStandardEmptyField() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); Field f = new Field("field", "", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); @@ -4246,7 +4246,7 @@ // LUCENE-1448 public void testEndOffsetPositionStandardEmptyField2() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); Field f = new Field("field", "abcd", Field.Store.NO, Index: src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java (revision 826601) +++ src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java (working copy) @@ -75,10 +75,10 @@ IndexWriter im; FSDirectory dir = FSDirectory.open(this.__test_dir); try { - im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); } catch (FileNotFoundException e) { try { - im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); } catch (FileNotFoundException e1) { } } finally { Index: src/test/org/apache/lucene/index/TestIndexWriterMerging.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexWriterMerging.java (revision 826601) +++ src/test/org/apache/lucene/index/TestIndexWriterMerging.java (working copy) @@ -56,7 +56,7 @@ Directory merged = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(merged, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(merged, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.setMergeFactor(2); writer.addIndexesNoOptimize(new Directory[]{indexA, indexB}); @@ -93,7 +93,7 @@ private void fillIndex(Directory dir, int start, int numDocs) throws IOException { - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.setMergeFactor(2); writer.setMaxBufferedDocs(2); Index: src/test/org/apache/lucene/index/TestNorms.java =================================================================== --- src/test/org/apache/lucene/index/TestNorms.java (revision 826601) +++ src/test/org/apache/lucene/index/TestNorms.java (working copy) @@ -63,7 +63,7 @@ protected void setUp() throws Exception { super.setUp(); similarityOne = new SimilarityOne(); - anlzr = new StandardAnalyzer(); + anlzr = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); } /** Index: src/test/org/apache/lucene/index/TestOmitTf.java =================================================================== --- src/test/org/apache/lucene/index/TestOmitTf.java (revision 826601) +++ src/test/org/apache/lucene/index/TestOmitTf.java (working copy) @@ -57,7 +57,7 @@ // omitTermFreqAndPositions bit in the FieldInfo public void testOmitTermFreqAndPositions() throws Exception { Directory ram = new MockRAMDirectory(); - Analyzer analyzer = new StandardAnalyzer(); + Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); Document d = new Document(); @@ -103,7 +103,7 @@ // omitTermFreqAndPositions for the same field works public void testMixedMerge() throws Exception { Directory ram = new MockRAMDirectory(); - Analyzer analyzer = new StandardAnalyzer(); + Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(3); writer.setMergeFactor(2); @@ -156,7 +156,7 @@ // field, public void testMixedRAM() throws Exception { Directory ram = new MockRAMDirectory(); - Analyzer analyzer = new StandardAnalyzer(); + Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(10); writer.setMergeFactor(2); @@ -204,7 +204,7 @@ // Verifies no *.prx exists when all fields omit term freq: public void testNoPrxFile() throws Throwable { Directory ram = new MockRAMDirectory(); - Analyzer analyzer = new StandardAnalyzer(); + Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); IndexWriter writer = new IndexWriter(ram, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(3); writer.setMergeFactor(2); @@ -235,7 +235,7 @@ // Test scores with one field with Term Freqs and one without, otherwise with equal content public void testBasic() throws Exception { Directory dir = new MockRAMDirectory(); - Analyzer analyzer = new StandardAnalyzer(); + Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); writer.setMergeFactor(2); writer.setMaxBufferedDocs(2); Index: src/test/org/apache/lucene/index/TestParallelReader.java =================================================================== --- src/test/org/apache/lucene/index/TestParallelReader.java (revision 826601) +++ src/test/org/apache/lucene/index/TestParallelReader.java (working copy) @@ -105,7 +105,7 @@ // one document only: Directory dir2 = new MockRAMDirectory(); - IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); Document d3 = new Document(); d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED)); w2.addDocument(d3); @@ -150,13 +150,13 @@ Directory dir2 = getDir2(); // add another document to ensure that the indexes are not optimized - IndexWriter modifier = new IndexWriter(dir1, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter modifier = new IndexWriter(dir1, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); Document d = new Document(); d.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED)); modifier.addDocument(d); modifier.close(); - modifier = new IndexWriter(dir2, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + modifier = new IndexWriter(dir2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); d = new Document(); d.add(new Field("f2", "v2", Field.Store.YES, Field.Index.ANALYZED)); modifier.addDocument(d); @@ -169,7 +169,7 @@ assertFalse(pr.isOptimized()); pr.close(); - modifier = new IndexWriter(dir1, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + modifier = new IndexWriter(dir1, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); modifier.optimize(); modifier.close(); @@ -181,7 +181,7 @@ pr.close(); - modifier = new IndexWriter(dir2, new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + modifier = new IndexWriter(dir2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); modifier.optimize(); modifier.close(); @@ -232,7 +232,7 @@ // Fields 1-4 indexed together: private Searcher single() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); Document d1 = new Document(); d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED)); d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.ANALYZED)); @@ -262,7 +262,7 @@ private Directory getDir1() throws IOException { Directory dir1 = new MockRAMDirectory(); - IndexWriter w1 = new IndexWriter(dir1, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w1 = new IndexWriter(dir1, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); Document d1 = new Document(); d1.add(new Field("f1", "v1", Field.Store.YES, Field.Index.ANALYZED)); d1.add(new Field("f2", "v1", Field.Store.YES, Field.Index.ANALYZED)); @@ -277,7 +277,7 @@ private Directory getDir2() throws IOException { Directory dir2 = new RAMDirectory(); - IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w2 = new IndexWriter(dir2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); Document d3 = new Document(); d3.add(new Field("f3", "v1", Field.Store.YES, Field.Index.ANALYZED)); d3.add(new Field("f4", "v1", Field.Store.YES, Field.Index.ANALYZED)); Index: src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java =================================================================== --- src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java (revision 826601) +++ src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java (working copy) @@ -70,7 +70,7 @@ public void testSimple() throws Exception { String[] fields = {"b", "t"}; - MultiFieldQueryParser mfqp = new MultiFieldQueryParser(fields, new StandardAnalyzer()); + MultiFieldQueryParser mfqp = new MultiFieldQueryParser(fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); Query q = mfqp.parse("one"); assertEquals("b:one t:one", q.toString()); @@ -133,7 +133,7 @@ boosts.put("b", Float.valueOf(5)); boosts.put("t", Float.valueOf(10)); String[] fields = {"b", "t"}; - MultiFieldQueryParser mfqp = new MultiFieldQueryParser(fields, new StandardAnalyzer(), boosts); + MultiFieldQueryParser mfqp = new MultiFieldQueryParser(fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), boosts); //Check for simple @@ -159,24 +159,24 @@ public void testStaticMethod1() throws ParseException { String[] fields = {"b", "t"}; String[] queries = {"one", "two"}; - Query q = MultiFieldQueryParser.parse(queries, fields, new StandardAnalyzer()); + Query q = MultiFieldQueryParser.parse(queries, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("b:one t:two", q.toString()); String[] queries2 = {"+one", "+two"}; - q = MultiFieldQueryParser.parse(queries2, fields, new StandardAnalyzer()); + q = MultiFieldQueryParser.parse(queries2, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("(+b:one) (+t:two)", q.toString()); String[] queries3 = {"one", "+two"}; - q = MultiFieldQueryParser.parse(queries3, fields, new StandardAnalyzer()); + q = MultiFieldQueryParser.parse(queries3, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("b:one (+t:two)", q.toString()); String[] queries4 = {"one +more", "+two"}; - q = MultiFieldQueryParser.parse(queries4, fields, new StandardAnalyzer()); + q = MultiFieldQueryParser.parse(queries4, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("(b:one +b:more) (+t:two)", q.toString()); String[] queries5 = {"blah"}; try { - q = MultiFieldQueryParser.parse(queries5, fields, new StandardAnalyzer()); + q = MultiFieldQueryParser.parse(queries5, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -198,15 +198,15 @@ public void testStaticMethod2() throws ParseException { String[] fields = {"b", "t"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; - Query q = MultiFieldQueryParser.parse("one", fields, flags, new StandardAnalyzer()); + Query q = MultiFieldQueryParser.parse("one", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+b:one -t:one", q.toString()); - q = MultiFieldQueryParser.parse("one two", fields, flags, new StandardAnalyzer()); + q = MultiFieldQueryParser.parse("one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse("blah", fields, flags2, new StandardAnalyzer()); + q = MultiFieldQueryParser.parse("blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -217,17 +217,17 @@ String[] fields = {"b", "t"}; //int[] flags = {MultiFieldQueryParser.REQUIRED_FIELD, MultiFieldQueryParser.PROHIBITED_FIELD}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; - MultiFieldQueryParser parser = new MultiFieldQueryParser(fields, new StandardAnalyzer()); + MultiFieldQueryParser parser = new MultiFieldQueryParser(fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); - Query q = MultiFieldQueryParser.parse("one", fields, flags, new StandardAnalyzer());//, fields, flags, new StandardAnalyzer()); + Query q = MultiFieldQueryParser.parse("one", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));//, fields, flags, new StandardAnalyzer()); assertEquals("+b:one -t:one", q.toString()); - q = MultiFieldQueryParser.parse("one two", fields, flags, new StandardAnalyzer()); + q = MultiFieldQueryParser.parse("one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse("blah", fields, flags2, new StandardAnalyzer()); + q = MultiFieldQueryParser.parse("blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -239,12 +239,12 @@ String[] fields = {"f1", "f2", "f3"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD}; - Query q = MultiFieldQueryParser.parse(queries, fields, flags, new StandardAnalyzer()); + Query q = MultiFieldQueryParser.parse(queries, fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+f1:one -f2:two f3:three", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse(queries, fields, flags2, new StandardAnalyzer()); + q = MultiFieldQueryParser.parse(queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -255,12 +255,12 @@ String[] queries = {"one", "two"}; String[] fields = {"b", "t"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; - Query q = MultiFieldQueryParser.parse(queries, fields, flags, new StandardAnalyzer()); + Query q = MultiFieldQueryParser.parse(queries, fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+b:one -t:two", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse(queries, fields, flags2, new StandardAnalyzer()); + q = MultiFieldQueryParser.parse(queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -282,7 +282,7 @@ } public void testStopWordSearching() throws Exception { - Analyzer analyzer = new StandardAnalyzer(); + Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); Directory ramDir = new RAMDirectory(); IndexWriter iw = new IndexWriter(ramDir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); @@ -304,7 +304,7 @@ * Return empty tokens for field "f1". */ private static class AnalyzerReturningNull extends Analyzer { - StandardAnalyzer stdAnalyzer = new StandardAnalyzer(); + StandardAnalyzer stdAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); public AnalyzerReturningNull() { } Index: src/test/org/apache/lucene/queryParser/TestQueryParser.java =================================================================== --- src/test/org/apache/lucene/queryParser/TestQueryParser.java (revision 826601) +++ src/test/org/apache/lucene/queryParser/TestQueryParser.java (working copy) @@ -68,11 +68,11 @@ public class TestQueryParser extends LocalizedTestCase { public TestQueryParser(String name) { - super(name, new HashSet(Arrays.asList(new String[]{ + super(name, new HashSet(Arrays.asList( "testLegacyDateRange", "testDateRange", "testCJK", "testNumber", "testFarsiRangeCollating", "testLocalDateFormat" - }))); + ))); } public static Analyzer qpAnalyzer = new QPTestAnalyzer(); @@ -291,7 +291,7 @@ assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null, "+(title:dog title:cat) -author:\"bob dole\""); - QueryParser qp = new QueryParser("field", new StandardAnalyzer()); + QueryParser qp = new QueryParser("field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); // make sure OR is the default: assertEquals(QueryParser.OR_OPERATOR, qp.getDefaultOperator()); qp.setDefaultOperator(QueryParser.AND_OPERATOR); @@ -321,7 +321,7 @@ assertQueryEquals("term 1.0 1 2", null, "term"); assertQueryEquals("term term1 term2", null, "term term term"); - Analyzer a = new StandardAnalyzer(); + Analyzer a = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); assertQueryEquals("3", a, "3"); assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2"); assertQueryEquals("term term1 term2", a, "term term1 term2"); @@ -791,7 +791,7 @@ throws Exception { Set stopWords = new HashSet(1); stopWords.add("on"); - StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(stopWords); + StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, stopWords); QueryParser qp = new QueryParser("field", oneStopAnalyzer); Query q = qp.parse("on^1.0"); assertNotNull(q); @@ -804,7 +804,7 @@ q = qp.parse("\"on\"^1.0"); assertNotNull(q); - QueryParser qp2 = new QueryParser("field", new StandardAnalyzer()); + QueryParser qp2 = new QueryParser("field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); q = qp2.parse("the^3"); // "the" is a stop word so the result is an empty query: assertNotNull(q); @@ -944,7 +944,7 @@ } public void testStopwords() throws Exception { - QueryParser qp = new QueryParser("a", new StopAnalyzer(new String[]{"the", "foo"})); + QueryParser qp = new QueryParser("a", new StopAnalyzer(StopFilter.makeStopSet("the", "foo"), true)); Query result = qp.parse("a:the OR a:foo"); assertNotNull("result is null and it shouldn't be", result); assertTrue("result is not a BooleanQuery", result instanceof BooleanQuery); @@ -960,25 +960,19 @@ } public void testPositionIncrement() throws Exception { - boolean dflt = StopFilter.getEnablePositionIncrementsDefault(); - StopFilter.setEnablePositionIncrementsDefault(true); - try { - QueryParser qp = new QueryParser("a", new StopAnalyzer(new String[]{"the", "in", "are", "this"})); - qp.setEnablePositionIncrements(true); - String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\""; - // 0 2 5 7 8 - int expectedPositions[] = {1,3,4,6,9}; - PhraseQuery pq = (PhraseQuery) qp.parse(qtxt); - //System.out.println("Query text: "+qtxt); - //System.out.println("Result: "+pq); - Term t[] = pq.getTerms(); - int pos[] = pq.getPositions(); - for (int i = 0; i < t.length; i++) { - //System.out.println(i+". "+t[i]+" pos: "+pos[i]); - assertEquals("term "+i+" = "+t[i]+" has wrong term-position!",expectedPositions[i],pos[i]); - } - } finally { - StopFilter.setEnablePositionIncrementsDefault(dflt); + QueryParser qp = new QueryParser("a", new StopAnalyzer(StopFilter.makeStopSet("the", "in", "are", "this"), true)); + qp.setEnablePositionIncrements(true); + String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\""; + // 0 2 5 7 8 + int expectedPositions[] = {1,3,4,6,9}; + PhraseQuery pq = (PhraseQuery) qp.parse(qtxt); + //System.out.println("Query text: "+qtxt); + //System.out.println("Result: "+pq); + Term t[] = pq.getTerms(); + int pos[] = pq.getPositions(); + for (int i = 0; i < t.length; i++) { + //System.out.println(i+". "+t[i]+" pos: "+pos[i]); + assertEquals("term "+i+" = "+t[i]+" has wrong term-position!",expectedPositions[i],pos[i]); } } Index: src/test/org/apache/lucene/search/function/FunctionTestSetup.java =================================================================== --- src/test/org/apache/lucene/search/function/FunctionTestSetup.java (revision 826601) +++ src/test/org/apache/lucene/search/function/FunctionTestSetup.java (working copy) @@ -86,7 +86,7 @@ // prepare a small index with just a few documents. super.setUp(); dir = new RAMDirectory(); - anlzr = new StandardAnalyzer(); + anlzr = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT); IndexWriter iw = new IndexWriter(dir, anlzr, IndexWriter.MaxFieldLength.LIMITED); // add docs not exactly in natural ID order, to verify we do check the order of docs by scores Index: src/test/org/apache/lucene/search/spans/TestSpans.java =================================================================== --- src/test/org/apache/lucene/search/spans/TestSpans.java (revision 826601) +++ src/test/org/apache/lucene/search/spans/TestSpans.java (working copy) @@ -39,7 +39,7 @@ import org.apache.lucene.util.LuceneTestCase; import java.io.IOException; -import java.util.HashSet; +import java.util.Collections; public class TestSpans extends LuceneTestCase { private IndexSearcher searcher; @@ -449,7 +449,7 @@ // LUCENE-1404 public void testNPESpanQuery() throws Throwable { final Directory dir = new MockRAMDirectory(); - final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(new HashSet(0)), IndexWriter.MaxFieldLength.LIMITED); + final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, Collections.emptySet()), IndexWriter.MaxFieldLength.LIMITED); // Add documents addDoc(writer, "1", "the big dogs went running to the market"); Index: src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java =================================================================== --- src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java (revision 826601) +++ src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java (working copy) @@ -55,7 +55,7 @@ // create test index mDirectory = new RAMDirectory(); - final IndexWriter writer = new IndexWriter(mDirectory, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + final IndexWriter writer = new IndexWriter(mDirectory, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); addDocument(writer, "1", "I think it should work."); addDocument(writer, "2", "I think it should work."); addDocument(writer, "3", "I think it should work."); Index: src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java =================================================================== --- src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java (revision 826601) +++ src/test/org/apache/lucene/search/spans/TestSpansAdvanced2.java (working copy) @@ -39,7 +39,7 @@ super.setUp(); // create test index - final IndexWriter writer = new IndexWriter(mDirectory, new StandardAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + final IndexWriter writer = new IndexWriter(mDirectory, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED); addDocument(writer, "A", "Should we, could we, would we?"); addDocument(writer, "B", "It should. Should it?"); addDocument(writer, "C", "It shouldn't."); Index: src/test/org/apache/lucene/search/TestBooleanOr.java =================================================================== --- src/test/org/apache/lucene/search/TestBooleanOr.java (revision 826601) +++ src/test/org/apache/lucene/search/TestBooleanOr.java (working copy) @@ -135,7 +135,7 @@ RAMDirectory rd = new RAMDirectory(); // - IndexWriter writer = new IndexWriter(rd, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(rd, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); // Document d = new Document(); Index: src/test/org/apache/lucene/search/TestCachingWrapperFilter.java =================================================================== --- src/test/org/apache/lucene/search/TestCachingWrapperFilter.java (revision 826601) +++ src/test/org/apache/lucene/search/TestCachingWrapperFilter.java (working copy) @@ -32,7 +32,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase { public void testCachingWorks() throws Exception { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.close(); IndexReader reader = IndexReader.open(dir, true); @@ -71,7 +71,7 @@ public void testIsCacheAble() throws Exception { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.close(); IndexReader reader = IndexReader.open(dir, true); Index: src/test/org/apache/lucene/search/TestCustomSearcherSort.java =================================================================== --- src/test/org/apache/lucene/search/TestCustomSearcherSort.java (revision 826601) +++ src/test/org/apache/lucene/search/TestCustomSearcherSort.java (working copy) @@ -70,7 +70,7 @@ private Directory getIndex() throws IOException { RAMDirectory indexStore = new RAMDirectory (); - IndexWriter writer = new IndexWriter (indexStore, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter (indexStore, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); RandomGen random = new RandomGen(newRandom()); for (int i=0; i