Index: modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java (revision ) @@ -25,7 +25,6 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.core.LowerCaseFilter; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import static org.apache.lucene.analysis.VocabularyAssert.*; @@ -36,7 +35,7 @@ * */ public class TestGermanStemFilter extends BaseTokenStreamTestCase { - Analyzer analyzer = new ReusableAnalyzerBase() { + Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiAnalyzer.java (revision ) @@ -106,10 +106,10 @@ /** * Creates - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * used to tokenize all the text in the provided {@link Reader}. * - * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from a {@link IndicTokenizer} filtered with * {@link LowerCaseFilter}, {@link IndicNormalizationFilter}, * {@link HindiNormalizationFilter}, {@link KeywordMarkerFilter} Index: modules/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyFilter.java (revision 1169607) +++ modules/analysis/common/src/test/org/apache/lucene/collation/TestCollationKeyFilter.java (revision ) @@ -52,7 +52,7 @@ (collator.getCollationKey(secondRangeEndOriginal).toByteArray())); - public final class TestAnalyzer extends ReusableAnalyzerBase { + public final class TestAnalyzer extends Analyzer { private Collator _collator; TestAnalyzer(Collator collator) { Index: modules/analysis/common/src/java/org/apache/lucene/analysis/da/DanishAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/da/DanishAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/da/DanishAnalyzer.java (revision ) @@ -106,11 +106,11 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseAnalyzer.java (revision ) @@ -20,7 +20,6 @@ import java.io.Reader; import org.apache.lucene.analysis.standard.StandardAnalyzer; // javadoc @link -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Tokenizer; @@ -31,14 +30,14 @@ * This analyzer will be removed in Lucene 5.0 */ @Deprecated -public final class ChineseAnalyzer extends ReusableAnalyzerBase { +public final class ChineseAnalyzer extends Analyzer { /** * Creates - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * used to tokenize all the text in the provided {@link Reader}. * - * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from a {@link ChineseTokenizer} filtered with * {@link ChineseFilter} */ Index: modules/analysis/common/src/java/org/apache/lucene/analysis/id/IndonesianAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/id/IndonesianAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/id/IndonesianAnalyzer.java (revision ) @@ -106,10 +106,10 @@ /** * Creates - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * used to tokenize all the text in the provided {@link Reader}. * - * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, * {@link StopFilter}, {@link KeywordMarkerFilter} Index: lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (revision ) @@ -103,7 +103,7 @@ } public void testPositionIncrementGap() throws IOException { - Analyzer analyzer = new ReusableAnalyzerBase() { + Analyzer analyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { return new TokenStreamComponents(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); @@ -138,7 +138,7 @@ } public void testTokenReuse() throws IOException { - Analyzer analyzer = new ReusableAnalyzerBase() { + Analyzer analyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); Index: modules/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java (revision 1169607) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/commongrams/CommonGramsFilterTest.java (revision ) @@ -84,7 +84,7 @@ * @return Map */ public void testCommonGramsQueryFilter() throws Exception { - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override public TokenStreamComponents createComponents(String field, Reader in) { Tokenizer tokenizer = new MockTokenizer(in, MockTokenizer.WHITESPACE, false); @@ -154,7 +154,7 @@ } public void testCommonGramsFilter() throws Exception { - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override public TokenStreamComponents createComponents(String field, Reader in) { Tokenizer tokenizer = new MockTokenizer(in, MockTokenizer.WHITESPACE, false); Index: modules/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikAnalyzer.java =================================================================== --- modules/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikAnalyzer.java (revision 1160117) +++ modules/analysis/morfologik/src/java/org/apache/lucene/analysis/morfologik/MorfologikAnalyzer.java (revision ) @@ -20,10 +20,10 @@ import java.io.Reader; +import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.standard.StandardFilter; import org.apache.lucene.analysis.standard.StandardTokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.util.Version; import morfologik.stemming.PolishStemmer.DICTIONARY; @@ -32,7 +32,7 @@ * {@link org.apache.lucene.analysis.Analyzer} using Morfologik library. * @see Morfologik project page */ -public class MorfologikAnalyzer extends ReusableAnalyzerBase { +public class MorfologikAnalyzer extends Analyzer { private final DICTIONARY dictionary; private final Version version; @@ -62,14 +62,14 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @param field ignored field name * @param reader source of tokens * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter} and {@link MorfologikFilter}. */ Index: modules/analysis/common/src/java/org/apache/lucene/analysis/ca/CatalanAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/ca/CatalanAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/ca/CatalanAnalyzer.java (revision ) @@ -105,11 +105,11 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java (revision ) @@ -64,7 +64,7 @@ private static final byte[] payloadMultiField2 = new byte[]{4}; protected static Directory directory; - private static class PayloadAnalyzer extends ReusableAnalyzerBase { + private static class PayloadAnalyzer extends Analyzer { private PayloadAnalyzer() { super(new PerFieldReuseStrategy()); Index: modules/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianStemmer.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianStemmer.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/lv/TestLatvianStemmer.java (revision ) @@ -24,13 +24,12 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; /** * Basic tests for {@link LatvianStemmer} */ public class TestLatvianStemmer extends BaseTokenStreamTestCase { - private Analyzer a = new ReusableAnalyzerBase() { + private Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); Index: modules/facet/src/test/org/apache/lucene/facet/search/CategoryListIteratorTest.java =================================================================== --- modules/facet/src/test/org/apache/lucene/facet/search/CategoryListIteratorTest.java (revision 1169607) +++ modules/facet/src/test/org/apache/lucene/facet/search/CategoryListIteratorTest.java (revision ) @@ -137,7 +137,7 @@ DataTokenStream dts2 = new DataTokenStream("2",new SortingIntEncoder( new UniqueValuesIntEncoder(new DGapIntEncoder(new VInt8IntEncoder())))); // this test requires that no payloads ever be randomly present! - final Analyzer noPayloadsAnalyzer = new ReusableAnalyzerBase() { + final Analyzer noPayloadsAnalyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { return new TokenStreamComponents(new MockTokenizer(reader, MockTokenizer.KEYWORD, false)); Index: lucene/src/test-framework/org/apache/lucene/analysis/MockAnalyzer.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/analysis/MockAnalyzer.java (revision 1169607) +++ lucene/src/test-framework/org/apache/lucene/analysis/MockAnalyzer.java (revision ) @@ -42,7 +42,7 @@ * * @see MockTokenizer */ -public final class MockAnalyzer extends ReusableAnalyzerBase { +public final class MockAnalyzer extends Analyzer { private final CharacterRunAutomaton runAutomaton; private final boolean lowerCase; private final CharacterRunAutomaton filter; Index: modules/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/gl/GalicianAnalyzer.java (revision ) @@ -104,11 +104,11 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java (revision ) @@ -56,7 +56,7 @@ final static boolean VERBOSE = false; public void testSetPosition() throws Exception { - Analyzer analyzer = new ReusableAnalyzerBase() { + Analyzer analyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { return new TokenStreamComponents(new Tokenizer() { Index: lucene/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java (revision ) @@ -20,7 +20,7 @@ import java.io.IOException; import java.io.Reader; -import org.apache.lucene.analysis.ReusableAnalyzerBase; +import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; @@ -63,7 +63,7 @@ } } -final class BugReproAnalyzer extends ReusableAnalyzerBase { +final class BugReproAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String arg0, Reader arg1) { return new TokenStreamComponents(new BugReproAnalyzerTokenizer()); Index: modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/ru/TestRussianLightStemFilter.java (revision ) @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import static org.apache.lucene.analysis.VocabularyAssert.*; @@ -32,7 +31,7 @@ * Simple tests for {@link RussianLightStemFilter} */ public class TestRussianLightStemFilter extends BaseTokenStreamTestCase { - private Analyzer analyzer = new ReusableAnalyzerBase() { + private Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java (revision ) @@ -27,7 +27,6 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.standard.StandardFilter; import org.apache.lucene.analysis.standard.StandardTokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.util.Version; /** @@ -36,7 +35,7 @@ *

NOTE: This class uses the same {@link Version} * dependent settings as {@link StandardAnalyzer}.

*/ -public final class ThaiAnalyzer extends ReusableAnalyzerBase { +public final class ThaiAnalyzer extends Analyzer { private final Version matchVersion; public ThaiAnalyzer(Version matchVersion) { @@ -45,10 +44,10 @@ /** * Creates - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * used to tokenize all the text in the provided {@link Reader}. * - * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from a {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link ThaiWordFilter}, and * {@link StopFilter} Index: modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestUAX29URLEmailTokenizer.java (revision ) @@ -8,7 +8,6 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer; import org.apache.lucene.analysis.tokenattributes.TypeAttribute; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.util.Version; import java.io.BufferedReader; @@ -50,7 +49,7 @@ BaseTokenStreamTestCase.assertTokenStreamContents(tokenizer, new String[] { "testing", "1234" }); } - private Analyzer a = new ReusableAnalyzerBase() { + private Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents (String fieldName, Reader reader) { @@ -99,7 +98,7 @@ } } - private Analyzer urlAnalyzer = new ReusableAnalyzerBase() { + private Analyzer urlAnalyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(TEST_VERSION_CURRENT, reader); @@ -109,7 +108,7 @@ } }; - private Analyzer emailAnalyzer = new ReusableAnalyzerBase() { + private Analyzer emailAnalyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(TEST_VERSION_CURRENT, reader); @@ -431,7 +430,7 @@ /** @deprecated remove this and sophisticated backwards layer in 5.0 */ @Deprecated public void testCombiningMarksBackwards() throws Exception { - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents (String fieldName, Reader reader) { Index: modules/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java (revision ) @@ -126,10 +126,10 @@ /** * Creates - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * used to tokenize all the text in the provided {@link Reader}. * - * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link LowerCaseFilter}, {@link StopFilter}, * {@link ArabicNormalizationFilter}, {@link KeywordMarkerFilter} Index: modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStandardAnalyzer.java (revision ) @@ -5,7 +5,6 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.standard.StandardTokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.util.Version; import java.io.IOException; @@ -43,7 +42,7 @@ BaseTokenStreamTestCase.assertTokenStreamContents(tokenizer, new String[] { "testing", "1234" }); } - private Analyzer a = new ReusableAnalyzerBase() { + private Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents (String fieldName, Reader reader) { Index: modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianStemFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianStemFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/gl/TestGalicianStemFilter.java (revision ) @@ -28,13 +28,12 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.LowerCaseFilter; import org.apache.lucene.analysis.standard.StandardTokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; /** * Simple tests for {@link GalicianStemFilter} */ public class TestGalicianStemFilter extends BaseTokenStreamTestCase { - private Analyzer analyzer = new ReusableAnalyzerBase() { + private Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/common/src/java/org/apache/lucene/analysis/hy/ArmenianAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/hy/ArmenianAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/hy/ArmenianAnalyzer.java (revision ) @@ -105,11 +105,11 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiPhraseQueryParsing.java =================================================================== --- modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiPhraseQueryParsing.java (revision 1169607) +++ modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiPhraseQueryParsing.java (revision ) @@ -18,7 +18,6 @@ */ import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; @@ -42,7 +41,7 @@ } } - private static class CannedAnalyzer extends ReusableAnalyzerBase { + private static class CannedAnalyzer extends Analyzer { private final TokenAndPos[] tokens; public CannedAnalyzer(TokenAndPos[] tokens) { Index: modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseStemFilter.java (revision ) @@ -28,13 +28,12 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.LowerCaseFilter; import org.apache.lucene.analysis.standard.StandardTokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; /** * Simple tests for {@link PortugueseStemFilter} */ public class TestPortugueseStemFilter extends BaseTokenStreamTestCase { - private Analyzer analyzer = new ReusableAnalyzerBase() { + private Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: solr/core/src/java/org/apache/solr/schema/IndexSchema.java =================================================================== --- solr/core/src/java/org/apache/solr/schema/IndexSchema.java (revision 1173776) +++ solr/core/src/java/org/apache/solr/schema/IndexSchema.java (revision ) @@ -18,7 +18,7 @@ package org.apache.solr.schema; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.AnalyzerWrapper; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.similarities.DefaultSimilarity; import org.apache.lucene.search.similarities.Similarity; @@ -41,8 +41,6 @@ import javax.xml.xpath.XPath; import javax.xml.xpath.XPathConstants; import javax.xml.xpath.XPathExpressionException; -import java.io.Reader; -import java.io.IOException; import java.util.*; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -292,50 +290,38 @@ queryAnalyzer = new SolrQueryAnalyzer(); } - private class SolrIndexAnalyzer extends Analyzer { + private class SolrIndexAnalyzer extends AnalyzerWrapper { - protected final HashMap analyzers; + protected final HashMap analyzers; SolrIndexAnalyzer() { analyzers = analyzerCache(); } - protected HashMap analyzerCache() { + protected HashMap analyzerCache() { - HashMap cache = new HashMap(); + HashMap cache = new HashMap(); - for (SchemaField f : getFields().values()) { + for (SchemaField f : getFields().values()) { Analyzer analyzer = f.getType().getAnalyzer(); cache.put(f.getName(), analyzer); } return cache; } - protected Analyzer getAnalyzer(String fieldName) - { + @Override + protected Analyzer getWrappedAnalyzer(String fieldName) { Analyzer analyzer = analyzers.get(fieldName); - return analyzer!=null ? analyzer : getDynamicFieldType(fieldName).getAnalyzer(); + return analyzer != null ? analyzer : getDynamicFieldType(fieldName).getAnalyzer(); } @Override - public TokenStream tokenStream(String fieldName, Reader reader) - { - return getAnalyzer(fieldName).tokenStream(fieldName,reader); + protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { + return components; } - - @Override - public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException { - return getAnalyzer(fieldName).reusableTokenStream(fieldName,reader); - } + } - @Override - public int getPositionIncrementGap(String fieldName) { - return getAnalyzer(fieldName).getPositionIncrementGap(fieldName); - } - } - - private class SolrQueryAnalyzer extends SolrIndexAnalyzer { @Override - protected HashMap analyzerCache() { + protected HashMap analyzerCache() { - HashMap cache = new HashMap(); + HashMap cache = new HashMap(); for (SchemaField f : getFields().values()) { Analyzer analyzer = f.getType().getQueryAnalyzer(); cache.put(f.getName(), analyzer); @@ -344,10 +330,9 @@ } @Override - protected Analyzer getAnalyzer(String fieldName) - { + protected Analyzer getWrappedAnalyzer(String fieldName) { Analyzer analyzer = analyzers.get(fieldName); - return analyzer!=null ? analyzer : getDynamicFieldType(fieldName).getQueryAnalyzer(); + return analyzer != null ? analyzer : getDynamicFieldType(fieldName).getQueryAnalyzer(); } } Index: modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanMinimalStemFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanMinimalStemFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanMinimalStemFilter.java (revision ) @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import static org.apache.lucene.analysis.VocabularyAssert.*; @@ -32,7 +31,7 @@ * Simple tests for {@link GermanMinimalStemFilter} */ public class TestGermanMinimalStemFilter extends BaseTokenStreamTestCase { - private Analyzer analyzer = new ReusableAnalyzerBase() { + private Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java (revision ) @@ -111,7 +111,7 @@ assertEquals("Wrong payload for the target " + target + ": " + b.bytes[b.offset], (byte) target, b.bytes[b.offset]); } - private static class PayloadAnalyzer extends ReusableAnalyzerBase { + private static class PayloadAnalyzer extends Analyzer { private final AtomicInteger payloadCount = new AtomicInteger(-1); @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: lucene/src/test/org/apache/lucene/TestAssertions.java =================================================================== --- lucene/src/test/org/apache/lucene/TestAssertions.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/TestAssertions.java (revision ) @@ -19,7 +19,6 @@ import java.io.Reader; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; @@ -35,7 +34,7 @@ } } - static class TestAnalyzer1 extends ReusableAnalyzerBase { + static class TestAnalyzer1 extends Analyzer { @Override protected TokenStreamComponents createComponents(String fieldName, Reader aReader) { @@ -43,7 +42,7 @@ } } - static final class TestAnalyzer2 extends ReusableAnalyzerBase { + static final class TestAnalyzer2 extends Analyzer { @Override protected TokenStreamComponents createComponents(String fieldName, Reader aReader) { @@ -51,7 +50,7 @@ } } - static class TestAnalyzer3 extends ReusableAnalyzerBase { + static class TestAnalyzer3 extends Analyzer { @Override protected TokenStreamComponents createComponents(String fieldName, Reader aReader) { @@ -59,7 +58,7 @@ } } - static class TestAnalyzer4 extends ReusableAnalyzerBase { + static class TestAnalyzer4 extends Analyzer { @Override protected TokenStreamComponents createComponents(String fieldName, Reader aReader) { Index: modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiAnalyzer.java =================================================================== --- modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiAnalyzer.java (revision 1169607) +++ modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiAnalyzer.java (revision ) @@ -122,7 +122,7 @@ * Expands "multi" to "multi" and "multi2", both at the same position, * and expands "triplemulti" to "triplemulti", "multi3", and "multi2". */ - private class MultiAnalyzer extends ReusableAnalyzerBase { + private class MultiAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { @@ -192,7 +192,7 @@ * Analyzes "the quick brown" as: quick(incr=2) brown(incr=1). * Does not work correctly for input other than "the quick brown ...". */ - private class PosIncrementAnalyzer extends ReusableAnalyzerBase { + private class PosIncrementAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/hu/HungarianAnalyzer.java (revision ) @@ -106,11 +106,11 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilter.java =================================================================== --- modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilter.java (revision 1160117) +++ modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUTransformFilter.java (revision ) @@ -26,7 +26,6 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.analysis.TokenStream; import com.ibm.icu.text.Transliterator; @@ -92,7 +91,7 @@ /** blast some random strings through the analyzer */ public void testRandomStrings() throws Exception { final Transliterator transform = Transliterator.getInstance("Any-Latin"); - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader); Index: modules/analysis/common/src/test/org/apache/lucene/analysis/snowball/TestSnowballVocab.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/snowball/TestSnowballVocab.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/snowball/TestSnowballVocab.java (revision ) @@ -23,7 +23,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.KeywordTokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.util.LuceneTestCase; import static org.apache.lucene.analysis.VocabularyAssert.*; @@ -69,7 +68,7 @@ throws IOException { if (VERBOSE) System.out.println("checking snowball language: " + snowballLanguage); - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java (revision ) @@ -168,10 +168,10 @@ /** * Creates - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * used to tokenize all the text in the provided {@link Reader}. * - * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from a {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link ElisionFilter}, * {@link LowerCaseFilter}, {@link StopFilter}, Index: modules/analysis/common/src/java/org/apache/lucene/analysis/core/SimpleAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/core/SimpleAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/core/SimpleAnalyzer.java (revision ) @@ -21,7 +21,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.util.CharTokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.util.Version; /** An {@link Analyzer} that filters {@link LetterTokenizer} @@ -36,7 +35,7 @@ * *

**/ -public final class SimpleAnalyzer extends ReusableAnalyzerBase { +public final class SimpleAnalyzer extends Analyzer { private final Version matchVersion; Index: lucene/src/test/org/apache/lucene/search/spans/TestBasics.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestBasics.java (revision 1169654) +++ lucene/src/test/org/apache/lucene/search/spans/TestBasics.java (revision ) @@ -96,7 +96,7 @@ } } - static final Analyzer simplePayloadAnalyzer = new ReusableAnalyzerBase() { + static final Analyzer simplePayloadAnalyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapper.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapper.java (revision 1170942) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapper.java (revision ) @@ -17,11 +17,8 @@ * limitations under the License. */ -import java.io.IOException; -import java.io.Reader; - import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.AnalyzerWrapper; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.util.Version; @@ -31,7 +28,7 @@ * A shingle is another name for a token based n-gram. *

*/ -public final class ShingleAnalyzerWrapper extends Analyzer { +public final class ShingleAnalyzerWrapper extends AnalyzerWrapper { private final Analyzer defaultAnalyzer; private final int maxShingleSize; @@ -140,48 +137,18 @@ } @Override - public TokenStream tokenStream(String fieldName, Reader reader) { - TokenStream wrapped; - try { - wrapped = defaultAnalyzer.reusableTokenStream(fieldName, reader); - } catch (IOException e) { - wrapped = defaultAnalyzer.tokenStream(fieldName, reader); + protected Analyzer getWrappedAnalyzer(String fieldName) { + return defaultAnalyzer; - } + } - ShingleFilter filter = new ShingleFilter(wrapped, minShingleSize, maxShingleSize); + + @Override + protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { + ShingleFilter filter = new ShingleFilter(components.getTokenStream(), minShingleSize, maxShingleSize); filter.setMinShingleSize(minShingleSize); filter.setMaxShingleSize(maxShingleSize); filter.setTokenSeparator(tokenSeparator); filter.setOutputUnigrams(outputUnigrams); filter.setOutputUnigramsIfNoShingles(outputUnigramsIfNoShingles); - return filter; + return new TokenStreamComponents(components.getTokenizer(), filter); } - - private class SavedStreams { - TokenStream wrapped; - ShingleFilter shingle; - } +} - - @Override - public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException { - SavedStreams streams = (SavedStreams) getPreviousTokenStream(); - if (streams == null) { - streams = new SavedStreams(); - streams.wrapped = defaultAnalyzer.reusableTokenStream(fieldName, reader); - streams.shingle = new ShingleFilter(streams.wrapped); - setPreviousTokenStream(streams); - } else { - TokenStream result = defaultAnalyzer.reusableTokenStream(fieldName, reader); - if (result != streams.wrapped) { - /* the wrapped analyzer did not, create a new shingle around the new one */ - streams.wrapped = result; - streams.shingle = new ShingleFilter(streams.wrapped); - } - } - streams.shingle.setMaxShingleSize(maxShingleSize); - streams.shingle.setMinShingleSize(minShingleSize); - streams.shingle.setTokenSeparator(tokenSeparator); - streams.shingle.setOutputUnigrams(outputUnigrams); - streams.shingle.setOutputUnigramsIfNoShingles(outputUnigramsIfNoShingles); - return streams.shingle; - } -} Index: solr/core/src/java/org/apache/solr/schema/FieldType.java =================================================================== --- solr/core/src/java/org/apache/solr/schema/FieldType.java (revision 1169470) +++ solr/core/src/java/org/apache/solr/schema/FieldType.java (revision ) @@ -389,7 +389,7 @@ } @Override - public TokenStreamInfo getStream(String fieldName, Reader reader) { + public TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer ts = new Tokenizer(reader) { final char[] cbuf = new char[maxChars]; final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); @@ -406,7 +406,7 @@ } }; - return new TokenStreamInfo(ts, ts); + return new TokenStreamComponents(ts); } } Index: modules/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishAnalyzer.java (revision ) @@ -109,11 +109,11 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link TurkishLowerCaseFilter}, * {@link StopFilter}, {@link KeywordMarkerFilter} if a stem Index: modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.java (revision 1170942) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.java (revision ) @@ -18,14 +18,10 @@ */ import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.index.IndexableField; +import org.apache.lucene.analysis.AnalyzerWrapper; -import java.io.Reader; -import java.io.IOException; import java.util.Collections; import java.util.Map; -import java.util.HashMap; /** * This analyzer is used to facilitate scenarios where different @@ -50,7 +46,7 @@ *

A PerFieldAnalyzerWrapper can be used like any other analyzer, for both indexing * and query parsing. */ -public final class PerFieldAnalyzerWrapper extends Analyzer { +public final class PerFieldAnalyzerWrapper extends AnalyzerWrapper { private final Analyzer defaultAnalyzer; private final Map fieldAnalyzers; @@ -74,50 +70,23 @@ * used for those fields */ public PerFieldAnalyzerWrapper(Analyzer defaultAnalyzer, - Map fieldAnalyzers) { + Map fieldAnalyzers) { this.defaultAnalyzer = defaultAnalyzer; this.fieldAnalyzers = (fieldAnalyzers != null) ? fieldAnalyzers : Collections.emptyMap(); } @Override - public TokenStream tokenStream(String fieldName, Reader reader) { + protected Analyzer getWrappedAnalyzer(String fieldName) { Analyzer analyzer = fieldAnalyzers.get(fieldName); - if (analyzer == null) { - analyzer = defaultAnalyzer; + return (analyzer != null) ? analyzer : defaultAnalyzer; - } + } - return analyzer.tokenStream(fieldName, reader); - } - @Override - public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException { - Analyzer analyzer = fieldAnalyzers.get(fieldName); - if (analyzer == null) - analyzer = defaultAnalyzer; - - return analyzer.reusableTokenStream(fieldName, reader); + protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { + return components; } - /** Return the positionIncrementGap from the analyzer assigned to fieldName */ @Override - public int getPositionIncrementGap(String fieldName) { - Analyzer analyzer = fieldAnalyzers.get(fieldName); - if (analyzer == null) - analyzer = defaultAnalyzer; - return analyzer.getPositionIncrementGap(fieldName); - } - - /** Return the offsetGap from the analyzer assigned to field */ - @Override - public int getOffsetGap(IndexableField field) { - Analyzer analyzer = fieldAnalyzers.get(field.name()); - if (analyzer == null) { - analyzer = defaultAnalyzer; - } - return analyzer.getOffsetGap(field); - } - - @Override public String toString() { return "PerFieldAnalyzerWrapper(" + fieldAnalyzers + ", default=" + defaultAnalyzer + ")"; } Index: modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java =================================================================== --- modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java (revision 1169607) +++ modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiFieldQPHelper.java (revision ) @@ -338,7 +338,7 @@ /** * Return empty tokens for field "f1". */ - private static final class AnalyzerReturningNull extends ReusableAnalyzerBase { + private static final class AnalyzerReturningNull extends Analyzer { MockAnalyzer stdAnalyzer = new MockAnalyzer(random); public AnalyzerReturningNull() { Index: lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (revision ) @@ -55,7 +55,7 @@ private static byte[] payload2 = new byte[]{2}; private static byte[] payload4 = new byte[]{4}; - private static class PayloadAnalyzer extends ReusableAnalyzerBase { + private static class PayloadAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer result = new MockTokenizer(reader, MockTokenizer.SIMPLE, true); Index: modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseLightStemFilter.java (revision ) @@ -26,7 +26,6 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.LowerCaseFilter; import org.apache.lucene.analysis.standard.StandardTokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import static org.apache.lucene.analysis.VocabularyAssert.*; @@ -34,7 +33,7 @@ * Simple tests for {@link PortugueseLightStemFilter} */ public class TestPortugueseLightStemFilter extends BaseTokenStreamTestCase { - private Analyzer analyzer = new ReusableAnalyzerBase() { + private Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java (revision ) @@ -68,7 +68,7 @@ private void createIndex(int numHits) throws IOException { int numDocs = 500; - final Analyzer analyzer = new ReusableAnalyzerBase() { + final Analyzer analyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { return new TokenStreamComponents(new MockTokenizer(reader, MockTokenizer.WHITESPACE, true)); Index: modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java =================================================================== --- modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java (revision 1169607) +++ modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java (revision ) @@ -128,7 +128,7 @@ } } - public static final class QPTestAnalyzer extends ReusableAnalyzerBase { + public static final class QPTestAnalyzer extends Analyzer { /** Filters MockTokenizer with StopFilter. */ @Override @@ -345,7 +345,7 @@ } } - private class SimpleCJKAnalyzer extends ReusableAnalyzerBase { + private class SimpleCJKAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { return new TokenStreamComponents(new SimpleCJKTokenizer(reader)); @@ -1242,7 +1242,7 @@ } } - private class CannedAnalyzer extends ReusableAnalyzerBase { + private class CannedAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String ignored, Reader alsoIgnored) { return new TokenStreamComponents(new CannedTokenStream()); Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java (revision 1169607) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java (revision ) @@ -194,7 +194,7 @@ return phraseQuery; } - static final class BigramAnalyzer extends ReusableAnalyzerBase { + static final class BigramAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { return new TokenStreamComponents(new BasicNGramTokenizer(reader)); Index: modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemmer.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemmer.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestKStemmer.java (revision ) @@ -25,13 +25,12 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; /** * Tests for {@link KStemmer} */ public class TestKStemmer extends BaseTokenStreamTestCase { - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, true); Index: modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanLightStemFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanLightStemFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/de/TestGermanLightStemFilter.java (revision ) @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import static org.apache.lucene.analysis.VocabularyAssert.*; @@ -32,7 +31,7 @@ * Simple tests for {@link GermanLightStemFilter} */ public class TestGermanLightStemFilter extends BaseTokenStreamTestCase { - private Analyzer analyzer = new ReusableAnalyzerBase() { + private Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: lucene/src/test-framework/org/apache/lucene/analysis/MockPayloadAnalyzer.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/analysis/MockPayloadAnalyzer.java (revision 1169607) +++ lucene/src/test-framework/org/apache/lucene/analysis/MockPayloadAnalyzer.java (revision ) @@ -30,7 +30,7 @@ * * **/ -public final class MockPayloadAnalyzer extends ReusableAnalyzerBase { +public final class MockPayloadAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java (revision 1161484) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java (revision ) @@ -29,7 +29,6 @@ import org.apache.lucene.analysis.standard.StandardTokenizer; import org.apache.lucene.analysis.standard.StandardAnalyzer; // for javadoc import org.apache.lucene.analysis.util.CharArraySet; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.analysis.util.WordlistLoader; import org.apache.lucene.util.Version; @@ -66,7 +65,7 @@ *

NOTE: This class uses the same {@link Version} * dependent settings as {@link StandardAnalyzer}.

*/ -public final class DutchAnalyzer extends ReusableAnalyzerBase { +public final class DutchAnalyzer extends Analyzer { /** File containing default Dutch stopwords. */ public final static String DEFAULT_STOPWORD_FILE = "dutch_stop.txt"; Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java (revision ) @@ -175,7 +175,7 @@ Analyzer analyzer; if (random.nextBoolean()) { // no payloads - analyzer = new ReusableAnalyzerBase() { + analyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { return new TokenStreamComponents(new MockTokenizer(reader, MockTokenizer.WHITESPACE, true)); @@ -184,7 +184,7 @@ } else { // fixed length payloads final int length = random.nextInt(200); - analyzer = new ReusableAnalyzerBase() { + analyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, true); Index: modules/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/en/EnglishAnalyzer.java (revision ) @@ -89,11 +89,11 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: lucene/src/test/org/apache/lucene/index/TestTermVectorsReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestTermVectorsReader.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/index/TestTermVectorsReader.java (revision ) @@ -175,7 +175,7 @@ } } - private class MyAnalyzer extends ReusableAnalyzerBase { + private class MyAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { return new TokenStreamComponents(new MyTokenStream()); Index: modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishLightStemFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishLightStemFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/es/TestSpanishLightStemFilter.java (revision ) @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import static org.apache.lucene.analysis.VocabularyAssert.*; @@ -32,7 +31,7 @@ * Simple tests for {@link SpanishLightStemFilter} */ public class TestSpanishLightStemFilter extends BaseTokenStreamTestCase { - private Analyzer analyzer = new ReusableAnalyzerBase() { + private Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2Filter.java =================================================================== --- modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2Filter.java (revision 1169607) +++ modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUNormalizer2Filter.java (revision ) @@ -29,7 +29,7 @@ * Tests the ICUNormalizer2Filter */ public class TestICUNormalizer2Filter extends BaseTokenStreamTestCase { - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader); @@ -59,7 +59,7 @@ } public void testAlternate() throws IOException { - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader); Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (revision ) @@ -386,7 +386,7 @@ doc.add(newField("field", "a field", TextField.TYPE_STORED)); w.addDocument(doc); - Analyzer analyzer = new ReusableAnalyzerBase(new ReusableAnalyzerBase.PerFieldReuseStrategy()) { + Analyzer analyzer = new Analyzer(new Analyzer.PerFieldReuseStrategy()) { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { MockTokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); @@ -454,7 +454,7 @@ // LUCENE-1072 public void testExceptionFromTokenStream() throws IOException { Directory dir = newDirectory(); - IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new ReusableAnalyzerBase() { + IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { @@ -591,7 +591,7 @@ } public void testDocumentsWriterExceptions() throws IOException { - Analyzer analyzer = new ReusableAnalyzerBase(new ReusableAnalyzerBase.PerFieldReuseStrategy()) { + Analyzer analyzer = new Analyzer(new Analyzer.PerFieldReuseStrategy()) { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { MockTokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); @@ -687,7 +687,7 @@ } public void testDocumentsWriterExceptionThreads() throws Exception { - Analyzer analyzer = new ReusableAnalyzerBase(new ReusableAnalyzerBase.PerFieldReuseStrategy()) { + Analyzer analyzer = new Analyzer(new Analyzer.PerFieldReuseStrategy()) { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { MockTokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); Index: modules/analysis/stempel/src/java/org/apache/lucene/analysis/pl/PolishAnalyzer.java =================================================================== --- modules/analysis/stempel/src/java/org/apache/lucene/analysis/pl/PolishAnalyzer.java (revision 1160117) +++ modules/analysis/stempel/src/java/org/apache/lucene/analysis/pl/PolishAnalyzer.java (revision ) @@ -121,11 +121,11 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: modules/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java (revision ) @@ -107,11 +107,11 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: modules/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java (revision ) @@ -26,7 +26,6 @@ import org.apache.lucene.analysis.standard.StandardFilter; import org.apache.lucene.analysis.standard.StandardTokenizer; import org.apache.lucene.analysis.util.CharArraySet; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.analysis.util.StopwordAnalyzerBase; import org.apache.lucene.analysis.util.WordlistLoader; import org.apache.lucene.util.Version; @@ -122,10 +121,10 @@ /** * Creates - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * used to tokenize all the text in the provided {@link Reader}. * - * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from a {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , and {@link CzechStemFilter} (only if version is >= LUCENE_31). If @@ -135,7 +134,7 @@ * {@link CzechStemFilter}. */ @Override - protected ReusableAnalyzerBase.TokenStreamComponents createComponents(String fieldName, + protected TokenStreamComponents createComponents(String fieldName, Reader reader) { final Tokenizer source = new StandardTokenizer(matchVersion, reader); TokenStream result = new StandardFilter(matchVersion, source); Index: modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java =================================================================== --- modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java (revision 1169607) +++ modules/queryparser/src/test/org/apache/lucene/queryparser/classic/TestMultiFieldQueryParser.java (revision ) @@ -300,7 +300,7 @@ /** * Return empty tokens for field "f1". */ - private static class AnalyzerReturningNull extends ReusableAnalyzerBase { + private static class AnalyzerReturningNull extends Analyzer { MockAnalyzer stdAnalyzer = new MockAnalyzer(random); public AnalyzerReturningNull() { Index: modules/analysis/icu/src/java/org/apache/lucene/collation/ICUCollationKeyAnalyzer.java =================================================================== --- modules/analysis/icu/src/java/org/apache/lucene/collation/ICUCollationKeyAnalyzer.java (revision 1160117) +++ modules/analysis/icu/src/java/org/apache/lucene/collation/ICUCollationKeyAnalyzer.java (revision ) @@ -19,8 +19,8 @@ import com.ibm.icu.text.Collator; +import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.core.KeywordTokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.collation.CollationKeyAnalyzer; // javadocs import org.apache.lucene.util.IndexableBinaryStringTools; // javadocs import org.apache.lucene.util.Version; @@ -75,7 +75,7 @@ * versions will encode the bytes with {@link IndexableBinaryStringTools}. * */ -public final class ICUCollationKeyAnalyzer extends ReusableAnalyzerBase { +public final class ICUCollationKeyAnalyzer extends Analyzer { private final Collator collator; private final ICUCollationAttributeFactory factory; private final Version matchVersion; Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java (revision 1169607) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java (revision ) @@ -290,7 +290,7 @@ return token; } - public static final class TokenArrayAnalyzer extends ReusableAnalyzerBase { + public static final class TokenArrayAnalyzer extends Analyzer { final Token[] tokens; public TokenArrayAnalyzer(Token... tokens) { this.tokens = tokens; Index: modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java (revision 1169607) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestAnalyzers.java (revision ) @@ -117,7 +117,7 @@ String[] y = StandardTokenizer.TOKEN_TYPES; } - private static class LowerCaseWhitespaceAnalyzer extends ReusableAnalyzerBase { + private static class LowerCaseWhitespaceAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java =================================================================== --- modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java (revision 1169607) +++ modules/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java (revision ) @@ -98,7 +98,7 @@ } - public static final class QPTestAnalyzer extends ReusableAnalyzerBase { + public static final class QPTestAnalyzer extends Analyzer { /** Filters MockTokenizer with StopFilter. */ @Override @@ -240,7 +240,7 @@ } } - private class SimpleCJKAnalyzer extends ReusableAnalyzerBase { + private class SimpleCJKAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { return new TokenStreamComponents(new SimpleCJKTokenizer(reader)); @@ -343,7 +343,7 @@ assertQueryEquals("a OR -b", null, "a -b"); // +,-,! should be directly adjacent to operand (i.e. not separated by whitespace) to be treated as an operator - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { return new TokenStreamComponents(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); @@ -1157,7 +1157,7 @@ } /** whitespace+lowercase analyzer with synonyms */ - private class Analyzer1 extends ReusableAnalyzerBase { + private class Analyzer1 extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, true); @@ -1166,7 +1166,7 @@ } /** whitespace+lowercase analyzer without synonyms */ - private class Analyzer2 extends ReusableAnalyzerBase { + private class Analyzer2 extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { return new TokenStreamComponents(new MockTokenizer(reader, MockTokenizer.WHITESPACE, true)); @@ -1231,7 +1231,7 @@ } } - private class MockCollationAnalyzer extends ReusableAnalyzerBase { + private class MockCollationAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, true); Index: modules/analysis/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java (revision ) @@ -101,10 +101,10 @@ /** * Creates - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * used to tokenize all the text in the provided {@link Reader}. * - * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from a {@link StandardTokenizer} filtered with * {@link GreekLowerCaseFilter}, {@link StandardFilter}, * {@link StopFilter}, and {@link GreekStemFilter} Index: solr/core/src/java/org/apache/solr/analysis/TokenizerChain.java =================================================================== --- solr/core/src/java/org/apache/solr/analysis/TokenizerChain.java (revision 1144761) +++ solr/core/src/java/org/apache/solr/analysis/TokenizerChain.java (revision ) @@ -17,10 +17,7 @@ package org.apache.solr.analysis; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.CharStream; -import org.apache.lucene.analysis.CharReader; -import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.*; import java.io.Reader; @@ -52,11 +49,11 @@ public TokenFilterFactory[] getTokenFilterFactories() { return filters; } @Override - public Reader charStream(Reader reader){ + public Reader initReader(Reader reader) { - if( charFilters != null && charFilters.length > 0 ){ + if (charFilters != null && charFilters.length > 0) { CharStream cs = CharReader.get( reader ); - for (int i=0; i synonyms; public SynonymAnalyzer(Map synonyms) { Index: lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/search/TestTermRangeQuery.java (revision ) @@ -190,7 +190,7 @@ assertFalse("queries with different inclusive are not equal", query.equals(other)); } - private static class SingleCharAnalyzer extends ReusableAnalyzerBase { + private static class SingleCharAnalyzer extends Analyzer { private static class SingleCharTokenizer extends Tokenizer { char[] buffer = new char[1]; Index: modules/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyFilter.java =================================================================== --- modules/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyFilter.java (revision 1169607) +++ modules/analysis/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyFilter.java (revision ) @@ -44,7 +44,7 @@ (collator.getCollationKey(secondRangeEndOriginal).toByteArray())); - public final class TestAnalyzer extends ReusableAnalyzerBase { + public final class TestAnalyzer extends Analyzer { private Collator _collator; TestAnalyzer(Collator collator) { Index: lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (revision ) @@ -17,7 +17,6 @@ * limitations under the License. */ -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -346,7 +345,7 @@ } } - private static class CannedAnalyzer extends ReusableAnalyzerBase { + private static class CannedAnalyzer extends Analyzer { private final TokenAndPos[] tokens; public CannedAnalyzer(TokenAndPos[] tokens) { Index: modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java (revision 1169654) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestWordDelimiterFilter.java (revision ) @@ -213,7 +213,7 @@ final CharArraySet protWords = new CharArraySet(TEST_VERSION_CURRENT, new HashSet(Arrays.asList("NUTCH")), false); /* analyzer that uses whitespace + wdf */ - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override public TokenStreamComponents createComponents(String field, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); @@ -241,7 +241,7 @@ new int[] { 1, 1, 1 }); /* analyzer that will consume tokens with large position increments */ - Analyzer a2 = new ReusableAnalyzerBase() { + Analyzer a2 = new Analyzer() { @Override public TokenStreamComponents createComponents(String field, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); @@ -274,7 +274,7 @@ new int[] { 6, 14, 19 }, new int[] { 1, 11, 1 }); - Analyzer a3 = new ReusableAnalyzerBase() { + Analyzer a3 = new Analyzer() { @Override public TokenStreamComponents createComponents(String field, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); Index: lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java (revision ) @@ -55,7 +55,7 @@ @BeforeClass public static void beforeClass() throws Exception { directory = newDirectory(); - Analyzer analyzer = new ReusableAnalyzerBase() { + Analyzer analyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { return new TokenStreamComponents(new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); Index: modules/analysis/common/src/test/org/apache/lucene/analysis/cn/TestChineseTokenizer.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/cn/TestChineseTokenizer.java (revision 1169607) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/cn/TestChineseTokenizer.java (revision ) @@ -64,7 +64,7 @@ * Analyzer that just uses ChineseTokenizer, not ChineseFilter. * convenience to show the behavior of the tokenizer */ - private class JustChineseTokenizerAnalyzer extends ReusableAnalyzerBase { + private class JustChineseTokenizerAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { return new TokenStreamComponents(new ChineseTokenizer(reader)); @@ -75,7 +75,7 @@ * Analyzer that just uses ChineseFilter, not ChineseTokenizer. * convenience to show the behavior of the filter. */ - private class JustChineseFilterAnalyzer extends ReusableAnalyzerBase { + private class JustChineseFilterAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader); Index: modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishMinimalStemFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishMinimalStemFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestEnglishMinimalStemFilter.java (revision ) @@ -24,13 +24,12 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; /** * Simple tests for {@link EnglishMinimalStemFilter} */ public class TestEnglishMinimalStemFilter extends BaseTokenStreamTestCase { - private Analyzer analyzer = new ReusableAnalyzerBase() { + private Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilter.java =================================================================== --- modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilter.java (revision 1169607) +++ modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/TestICUFoldingFilter.java (revision ) @@ -27,7 +27,7 @@ * Tests ICUFoldingFilter */ public class TestICUFoldingFilter extends BaseTokenStreamTestCase { - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader); Index: modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianLightStemFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianLightStemFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/it/TestItalianLightStemFilter.java (revision ) @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import static org.apache.lucene.analysis.VocabularyAssert.*; @@ -32,7 +31,7 @@ * Simple tests for {@link ItalianLightStemFilter} */ public class TestItalianLightStemFilter extends BaseTokenStreamTestCase { - private Analyzer analyzer = new ReusableAnalyzerBase() { + private Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: lucene/src/java/org/apache/lucene/analysis/ReusableAnalyzerBase.java =================================================================== --- lucene/src/java/org/apache/lucene/analysis/ReusableAnalyzerBase.java (revision 1169607) +++ lucene/src/java/org/apache/lucene/analysis/ReusableAnalyzerBase.java (revision 1169607) @@ -1,308 +0,0 @@ -package org.apache.lucene.analysis; - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.util.CloseableThreadLocal; - -import java.io.IOException; -import java.io.Reader; -import java.util.HashMap; -import java.util.Map; - -/** - * An convenience subclass of Analyzer that makes it easy to implement - * {@link TokenStream} reuse. - *

- * ReusableAnalyzerBase is a simplification of Analyzer that supports easy reuse - * for the most common use-cases. Analyzers such as - * PerFieldAnalyzerWrapper that behave differently depending upon the - * field name need to subclass Analyzer directly instead. - *

- *

- * To prevent consistency problems, this class does not allow subclasses to - * extend {@link #reusableTokenStream(String, Reader)} or - * {@link #tokenStream(String, Reader)} directly. Instead, subclasses must - * implement {@link #createComponents(String, Reader)}. - *

- */ -public abstract class ReusableAnalyzerBase extends Analyzer { - - private final ReuseStrategy reuseStrategy; - - public ReusableAnalyzerBase() { - this(new GlobalReuseStrategy()); - } - - public ReusableAnalyzerBase(ReuseStrategy reuseStrategy) { - this.reuseStrategy = reuseStrategy; - } - - /** - * Creates a new {@link TokenStreamComponents} instance for this analyzer. - * - * @param fieldName - * the name of the fields content passed to the - * {@link TokenStreamComponents} sink as a reader - * @param aReader - * the reader passed to the {@link Tokenizer} constructor - * @return the {@link TokenStreamComponents} for this analyzer. - */ - protected abstract TokenStreamComponents createComponents(String fieldName, - Reader aReader); - - /** - * This method uses {@link #createComponents(String, Reader)} to obtain an - * instance of {@link TokenStreamComponents}. It returns the sink of the - * components and stores the components internally. Subsequent calls to this - * method will reuse the previously stored components if and only if the - * {@link TokenStreamComponents#reset(Reader)} method returned - * true. Otherwise a new instance of - * {@link TokenStreamComponents} is created. - * - * @param fieldName the name of the field the created TokenStream is used for - * @param reader the reader the streams source reads from - */ - @Override - public final TokenStream reusableTokenStream(final String fieldName, - final Reader reader) throws IOException { - TokenStreamComponents components = reuseStrategy.getReusableComponents(fieldName); - final Reader r = initReader(reader); - if (components == null) { - components = createComponents(fieldName, r); - reuseStrategy.setReusableComponents(fieldName, components); - } else { - components.reset(r); - } - return components.getTokenStream(); - } - - /** - * This method uses {@link #createComponents(String, Reader)} to obtain an - * instance of {@link TokenStreamComponents} and returns the sink of the - * components. Each calls to this method will create a new instance of - * {@link TokenStreamComponents}. Created {@link TokenStream} instances are - * never reused. - * - * @param fieldName the name of the field the created TokenStream is used for - * @param reader the reader the streams source reads from - */ - @Override - public final TokenStream tokenStream(final String fieldName, - final Reader reader) { - return createComponents(fieldName, initReader(reader)).getTokenStream(); - } - - /** - * Override this if you want to add a CharFilter chain. - */ - protected Reader initReader(Reader reader) { - return reader; - } - - /** - * {@inheritDoc} - */ - @Override - public void close() { - super.close(); - reuseStrategy.close(); - } - - /** - * This class encapsulates the outer components of a token stream. It provides - * access to the source ({@link Tokenizer}) and the outer end (sink), an - * instance of {@link TokenFilter} which also serves as the - * {@link TokenStream} returned by - * {@link Analyzer#tokenStream(String, Reader)} and - * {@link Analyzer#reusableTokenStream(String, Reader)}. - */ - public static class TokenStreamComponents { - protected final Tokenizer source; - protected final TokenStream sink; - - /** - * Creates a new {@link TokenStreamComponents} instance. - * - * @param source - * the analyzer's tokenizer - * @param result - * the analyzer's resulting token stream - */ - public TokenStreamComponents(final Tokenizer source, - final TokenStream result) { - this.source = source; - this.sink = result; - } - - /** - * Creates a new {@link TokenStreamComponents} instance. - * - * @param source - * the analyzer's tokenizer - */ - public TokenStreamComponents(final Tokenizer source) { - this.source = source; - this.sink = source; - } - - /** - * Resets the encapsulated components with the given reader. If the components - * cannot be reset, an Exception should be thrown. - * - * @param reader - * a reader to reset the source component - * @throws IOException - * if the component's reset method throws an {@link IOException} - */ - protected void reset(final Reader reader) throws IOException { - source.reset(reader); - } - - /** - * Returns the sink {@link TokenStream} - * - * @return the sink {@link TokenStream} - */ - protected TokenStream getTokenStream() { - return sink; - } - - } - - /** - * Strategy defining how TokenStreamComponents are reused per call to - * {@link ReusableAnalyzerBase#tokenStream(String, java.io.Reader)}. - */ - public static abstract class ReuseStrategy { - - private CloseableThreadLocal storedValue = new CloseableThreadLocal(); - - /** - * Gets the reusable TokenStreamComponents for the field with the given name - * - * @param fieldName Name of the field whose reusable TokenStreamComponents - * are to be retrieved - * @return Reusable TokenStreamComponents for the field, or {@code null} - * if there was no previous components for the field - */ - public abstract TokenStreamComponents getReusableComponents(String fieldName); - - /** - * Stores the given TokenStreamComponents as the reusable components for the - * field with the give name - * - * @param fieldName Name of the field whose TokenStreamComponents are being set - * @param components TokenStreamComponents which are to be reused for the field - */ - public abstract void setReusableComponents(String fieldName, TokenStreamComponents components); - - /** - * Returns the currently stored value - * - * @return Currently stored value or {@code null} if no value is stored - */ - protected final Object getStoredValue() { - try { - return storedValue.get(); - } catch (NullPointerException npe) { - if (storedValue == null) { - throw new AlreadyClosedException("this Analyzer is closed"); - } else { - throw npe; - } - } - } - - /** - * Sets the stored value - * - * @param storedValue Value to store - */ - protected final void setStoredValue(Object storedValue) { - try { - this.storedValue.set(storedValue); - } catch (NullPointerException npe) { - if (storedValue == null) { - throw new AlreadyClosedException("this Analyzer is closed"); - } else { - throw npe; - } - } - } - - /** - * Closes the ReuseStrategy, freeing any resources - */ - public void close() { - storedValue.close(); - storedValue = null; - } - } - - /** - * Implementation of {@link ReuseStrategy} that reuses the same components for - * every field. - */ - public final static class GlobalReuseStrategy extends ReuseStrategy { - - /** - * {@inheritDoc} - */ - public TokenStreamComponents getReusableComponents(String fieldName) { - return (TokenStreamComponents) getStoredValue(); - } - - /** - * {@inheritDoc} - */ - public void setReusableComponents(String fieldName, TokenStreamComponents components) { - setStoredValue(components); - } - } - - /** - * Implementation of {@link ReuseStrategy} that reuses components per-field by - * maintaining a Map of TokenStreamComponent per field name. - */ - public static class PerFieldReuseStrategy extends ReuseStrategy { - - /** - * {@inheritDoc} - */ - @SuppressWarnings("unchecked") - public TokenStreamComponents getReusableComponents(String fieldName) { - Map componentsPerField = (Map) getStoredValue(); - return componentsPerField != null ? componentsPerField.get(fieldName) : null; - } - - /** - * {@inheritDoc} - */ - @SuppressWarnings("unchecked") - public void setReusableComponents(String fieldName, TokenStreamComponents components) { - Map componentsPerField = (Map) getStoredValue(); - if (componentsPerField == null) { - componentsPerField = new HashMap(); - setStoredValue(componentsPerField); - } - componentsPerField.put(fieldName, components); - } - } - -} Index: modules/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java (revision 1170424) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java (revision ) @@ -16,20 +16,19 @@ * limitations under the License. */ +import org.apache.lucene.analysis.AnalyzerWrapper; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.Terms; import org.apache.lucene.index.MultiFields; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.StopFilter; import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.Version; import org.apache.lucene.util.BytesRef; import java.io.IOException; -import java.io.Reader; import java.util.*; /** @@ -42,7 +41,7 @@ * this term to take 2 seconds. *

*/ -public final class QueryAutoStopWordAnalyzer extends Analyzer { +public final class QueryAutoStopWordAnalyzer extends AnalyzerWrapper { private final Analyzer delegate; private final Map> stopWordsPerField = new HashMap>(); @@ -101,7 +100,7 @@ */ public QueryAutoStopWordAnalyzer( Version matchVersion, - Analyzer delegate, + Analyzer delegate, IndexReader indexReader, float maxPercentDocs) throws IOException { this(matchVersion, delegate, indexReader, indexReader.getFieldNames(IndexReader.FieldOption.INDEXED), maxPercentDocs); @@ -168,81 +167,20 @@ } @Override - public TokenStream tokenStream(String fieldName, Reader reader) { - TokenStream result; - try { - result = delegate.reusableTokenStream(fieldName, reader); - } catch (IOException e) { - result = delegate.tokenStream(fieldName, reader); + protected Analyzer getWrappedAnalyzer(String fieldName) { + return delegate; - } + } - Set stopWords = stopWordsPerField.get(fieldName); - if (stopWords != null) { - result = new StopFilter(matchVersion, result, stopWords); - } - return result; - } - + - private class SavedStreams { - /* the underlying stream */ - TokenStream wrapped; - - /* - * when there are no stopwords for the field, refers to wrapped. - * if there stopwords, it is a StopFilter around wrapped. - */ - TokenStream withStopFilter; - } - - @SuppressWarnings("unchecked") @Override - public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException { - /* map of SavedStreams for each field */ - Map streamMap = (Map) getPreviousTokenStream(); - if (streamMap == null) { - streamMap = new HashMap(); - setPreviousTokenStream(streamMap); - } - - SavedStreams streams = streamMap.get(fieldName); - if (streams == null) { - /* an entry for this field does not exist, create one */ - streams = new SavedStreams(); - streamMap.put(fieldName, streams); - streams.wrapped = delegate.reusableTokenStream(fieldName, reader); - - /* if there are any stopwords for the field, save the stopfilter */ + protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { - Set stopWords = stopWordsPerField.get(fieldName); + Set stopWords = stopWordsPerField.get(fieldName); - if (stopWords != null) { - streams.withStopFilter = new StopFilter(matchVersion, streams.wrapped, stopWords); - } else { - streams.withStopFilter = streams.wrapped; + if (stopWords == null) { + return components; - } + } - } else { - /* - * an entry for this field exists, verify the wrapped stream has not - * changed. if it has not, reuse it, otherwise wrap the new stream. - */ - TokenStream result = delegate.reusableTokenStream(fieldName, reader); - if (result == streams.wrapped) { - /* the wrapped analyzer reused the stream */ - } else { - /* - * the wrapped analyzer did not. if there are any stopwords for the - * field, create a new StopFilter around the new stream - */ - streams.wrapped = result; - Set stopWords = stopWordsPerField.get(fieldName); - if (stopWords != null) { - streams.withStopFilter = new StopFilter(matchVersion, streams.wrapped, stopWords); - } else { - streams.withStopFilter = streams.wrapped; + StopFilter stopFilter = new StopFilter(matchVersion, components.getTokenStream(), stopWords); + return new TokenStreamComponents(components.getTokenizer(), stopFilter); - } + } - } - } - return streams.withStopFilter; - } - /** * Provides information on which stop words have been identified for a field * Index: modules/analysis/common/src/java/org/apache/lucene/analysis/util/StopwordAnalyzerBase.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/util/StopwordAnalyzerBase.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/util/StopwordAnalyzerBase.java (revision ) @@ -20,15 +20,14 @@ import java.io.IOException; import java.util.Set; -import org.apache.lucene.analysis.ReusableAnalyzerBase; -import org.apache.lucene.analysis.util.WordlistLoader; +import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.util.Version; /** * Base class for Analyzers that need to make use of stopword sets. * */ -public abstract class StopwordAnalyzerBase extends ReusableAnalyzerBase { +public abstract class StopwordAnalyzerBase extends Analyzer { /** * An immutable stopword set @@ -92,7 +91,7 @@ * if loading the stopwords throws an {@link IOException} */ protected static CharArraySet loadStopwordSet(final boolean ignoreCase, - final Class aClass, final String resource, + final Class aClass, final String resource, final String comment) throws IOException { final Set wordSet = WordlistLoader.getWordSet(aClass, resource, comment); Index: modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenCountAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenCountAnalyzer.java (revision 1162347) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/LimitTokenCountAnalyzer.java (revision ) @@ -18,17 +18,13 @@ */ import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.index.IndexableField; +import org.apache.lucene.analysis.AnalyzerWrapper; -import java.io.Reader; -import java.io.IOException; - /** * This Analyzer limits the number of tokens while indexing. It is * a replacement for the maximum field length setting inside {@link org.apache.lucene.index.IndexWriter}. */ -public final class LimitTokenCountAnalyzer extends Analyzer { +public final class LimitTokenCountAnalyzer extends AnalyzerWrapper { private final Analyzer delegate; private final int maxTokenCount; @@ -39,32 +35,19 @@ this.delegate = delegate; this.maxTokenCount = maxTokenCount; } - + @Override - public TokenStream tokenStream(String fieldName, Reader reader) { - return new LimitTokenCountFilter( - delegate.tokenStream(fieldName, reader), maxTokenCount - ); + protected Analyzer getWrappedAnalyzer(String fieldName) { + return delegate; } - + @Override - public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException { - return new LimitTokenCountFilter( - delegate.reusableTokenStream(fieldName, reader), maxTokenCount - ); + protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { + return new TokenStreamComponents(components.getTokenizer(), + new LimitTokenCountFilter(components.getTokenStream(), maxTokenCount)); } @Override - public int getPositionIncrementGap(String fieldName) { - return delegate.getPositionIncrementGap(fieldName); - } - - @Override - public int getOffsetGap(IndexableField field) { - return delegate.getOffsetGap(field); - } - - @Override public String toString() { return "LimitTokenCountAnalyzer(" + delegate.toString() + ", maxTokenCount=" + maxTokenCount + ")"; } Index: modules/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballAnalyzer.java (revision ) @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.standard.*; import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter; import org.apache.lucene.analysis.util.CharArraySet; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.util.Version; import java.io.Reader; @@ -47,7 +46,7 @@ * This analyzer will be removed in Lucene 5.0 */ @Deprecated -public final class SnowballAnalyzer extends ReusableAnalyzerBase { +public final class SnowballAnalyzer extends Analyzer { private String name; private Set stopSet; private final Version matchVersion; Index: lucene/src/java/org/apache/lucene/analysis/Analyzer.java =================================================================== --- lucene/src/java/org/apache/lucene/analysis/Analyzer.java (revision 1172227) +++ lucene/src/java/org/apache/lucene/analysis/Analyzer.java (revision ) @@ -17,6 +17,7 @@ * limitations under the License. */ +import org.apache.lucene.index.IndexableField; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.CloseableThreadLocal; @@ -26,30 +27,27 @@ import java.util.Map; /** - * An convenience subclass of Analyzer that makes it easy to implement - * {@link TokenStream} reuse. + * An Analyzer builds TokenStreams, which analyze text. It thus represents a + * policy for extracting index terms from text. *

- * ReusableAnalyzerBase is a simplification of Analyzer that supports easy reuse - * for the most common use-cases. Analyzers such as - * PerFieldAnalyzerWrapper that behave differently depending upon the - * field name need to subclass Analyzer directly instead. - *

- *

* To prevent consistency problems, this class does not allow subclasses to * extend {@link #reusableTokenStream(String, Reader)} or * {@link #tokenStream(String, Reader)} directly. Instead, subclasses must * implement {@link #createComponents(String, Reader)}. *

+ *

The {@code Analyzer}-API in Lucene is based on the decorator pattern. + * Therefore all non-abstract subclasses must be final! This is checked + * when Java assertions are enabled. */ -public abstract class ReusableAnalyzerBase extends Analyzer { +public abstract class Analyzer { private final ReuseStrategy reuseStrategy; - public ReusableAnalyzerBase() { + public Analyzer() { this(new GlobalReuseStrategy()); } - public ReusableAnalyzerBase(ReuseStrategy reuseStrategy) { + public Analyzer(ReuseStrategy reuseStrategy) { this.reuseStrategy = reuseStrategy; } @@ -67,18 +65,21 @@ Reader aReader); /** + * Creates a TokenStream that is allowed to be re-use from the previous time + * that the same thread called this method. Callers that do not need to use + * more than one TokenStream at the same time from this analyzer should use + * this method for better performance. + *

* This method uses {@link #createComponents(String, Reader)} to obtain an * instance of {@link TokenStreamComponents}. It returns the sink of the * components and stores the components internally. Subsequent calls to this - * method will reuse the previously stored components if and only if the - * {@link TokenStreamComponents#reset(Reader)} method returned - * true. Otherwise a new instance of - * {@link TokenStreamComponents} is created. + * method will reuse the previously stored components after resetting them + * through {@link TokenStreamComponents#reset(Reader)}. + *

* * @param fieldName the name of the field the created TokenStream is used for * @param reader the reader the streams source reads from */ - @Override public final TokenStream reusableTokenStream(final String fieldName, final Reader reader) throws IOException { TokenStreamComponents components = reuseStrategy.getReusableComponents(fieldName); @@ -93,16 +94,19 @@ } /** + * Creates a TokenStream which tokenizes all the text in the provided + * Reader. + *

* This method uses {@link #createComponents(String, Reader)} to obtain an * instance of {@link TokenStreamComponents} and returns the sink of the * components. Each calls to this method will create a new instance of * {@link TokenStreamComponents}. Created {@link TokenStream} instances are * never reused. + *

* * @param fieldName the name of the field the created TokenStream is used for * @param reader the reader the streams source reads from */ - @Override public final TokenStream tokenStream(final String fieldName, final Reader reader) { return createComponents(fieldName, initReader(reader)).getTokenStream(); @@ -116,11 +120,43 @@ } /** - * {@inheritDoc} + * Invoked before indexing a IndexableField instance if + * terms have already been added to that field. This allows custom + * analyzers to place an automatic position increment gap between + * IndexbleField instances using the same field name. The default value + * position increment gap is 0. With a 0 position increment gap and + * the typical default token position increment of 1, all terms in a field, + * including across IndexableField instances, are in successive positions, allowing + * exact PhraseQuery matches, for instance, across IndexableField instance boundaries. + * + * @param fieldName IndexableField name being indexed. + * @return position increment gap, added to the next token emitted from {@link #tokenStream(String,Reader)} */ - @Override + public int getPositionIncrementGap(String fieldName) { + return 0; + } + + /** + * Just like {@link #getPositionIncrementGap}, except for + * Token offsets instead. By default this returns 1 for + * tokenized fields and, as if the fields were joined + * with an extra space character, and 0 for un-tokenized + * fields. This method is only called if the field + * produced at least one token for indexing. + * + * @param field the field just indexed + * @return offset gap, added to the next token emitted from {@link #tokenStream(String,Reader)} + */ + public int getOffsetGap(IndexableField field) { + if (field.fieldType().tokenized()) { + return 1; + } else { + return 0; + } + } + + /** Frees persistent resources used by this Analyzer */ public void close() { - super.close(); reuseStrategy.close(); } @@ -179,15 +215,23 @@ * * @return the sink {@link TokenStream} */ - protected TokenStream getTokenStream() { + public TokenStream getTokenStream() { return sink; } + /** + * Returns the component's {@link Tokenizer} + * + * @return Component's {@link Tokenizer} + */ + public Tokenizer getTokenizer() { + return source; - } + } + } /** * Strategy defining how TokenStreamComponents are reused per call to - * {@link ReusableAnalyzerBase#tokenStream(String, java.io.Reader)}. + * {@link Analyzer#tokenStream(String, java.io.Reader)}. */ public static abstract class ReuseStrategy { Index: lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (revision ) @@ -476,7 +476,7 @@ assertEquals(numSpans, cnt); } - final class PayloadAnalyzer extends ReusableAnalyzerBase { + final class PayloadAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { @@ -530,7 +530,7 @@ } } - public final class TestPayloadAnalyzer extends ReusableAnalyzerBase { + public final class TestPayloadAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchMinimalStemFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchMinimalStemFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchMinimalStemFilter.java (revision ) @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import static org.apache.lucene.analysis.VocabularyAssert.*; @@ -32,7 +31,7 @@ * Simple tests for {@link FrenchMinimalStemFilter} */ public class TestFrenchMinimalStemFilter extends BaseTokenStreamTestCase { - private Analyzer analyzer = new ReusableAnalyzerBase() { + private Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestWordnetSynonymParser.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestWordnetSynonymParser.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestWordnetSynonymParser.java (revision ) @@ -25,7 +25,6 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; public class TestWordnetSynonymParser extends BaseTokenStreamTestCase { Analyzer analyzer; @@ -46,7 +45,7 @@ parser.add(new StringReader(synonymsFile)); final SynonymMap map = parser.build(); - Analyzer analyzer = new ReusableAnalyzerBase() { + Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); Index: modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseMinimalStemFilter.java (revision ) @@ -26,7 +26,6 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.LowerCaseFilter; import org.apache.lucene.analysis.standard.StandardTokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import static org.apache.lucene.analysis.VocabularyAssert.*; @@ -34,7 +33,7 @@ * Simple tests for {@link PortugueseMinimalStemFilter} */ public class TestPortugueseMinimalStemFilter extends BaseTokenStreamTestCase { - private Analyzer analyzer = new ReusableAnalyzerBase() { + private Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java (revision ) @@ -117,10 +117,10 @@ /** * Creates - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * used to tokenize all the text in the provided {@link Reader}. * - * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from a {@link StandardTokenizer} filtered with * {@link LowerCaseFilter}, {@link StandardFilter}, {@link StopFilter} * , and {@link BrazilianStemFilter}. Index: modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchLightStemFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchLightStemFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/fr/TestFrenchLightStemFilter.java (revision ) @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import static org.apache.lucene.analysis.VocabularyAssert.*; @@ -32,7 +31,7 @@ * Simple tests for {@link FrenchLightStemFilter} */ public class TestFrenchLightStemFilter extends BaseTokenStreamTestCase { - private Analyzer analyzer = new ReusableAnalyzerBase() { + private Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/common/src/test/org/apache/lucene/analysis/hu/TestHungarianLightStemFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/hu/TestHungarianLightStemFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/hu/TestHungarianLightStemFilter.java (revision ) @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import static org.apache.lucene.analysis.VocabularyAssert.*; @@ -32,7 +31,7 @@ * Simple tests for {@link HungarianLightStemFilter} */ public class TestHungarianLightStemFilter extends BaseTokenStreamTestCase { - private Analyzer analyzer = new ReusableAnalyzerBase() { + private Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/core/WhitespaceAnalyzer.java (revision ) @@ -19,8 +19,8 @@ import java.io.Reader; +import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.util.CharTokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.util.Version; /** @@ -35,7 +35,7 @@ * *

**/ -public final class WhitespaceAnalyzer extends ReusableAnalyzerBase { +public final class WhitespaceAnalyzer extends Analyzer { private final Version matchVersion; Index: modules/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/sv/SwedishAnalyzer.java (revision ) @@ -106,11 +106,11 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (revision ) @@ -899,7 +899,7 @@ final Random r = random; Directory dir = newDirectory(); // note this test explicitly disables payloads - final Analyzer analyzer = new ReusableAnalyzerBase() { + final Analyzer analyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { return new TokenStreamComponents(new MockTokenizer(reader, MockTokenizer.WHITESPACE, true)); Index: modules/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/fa/PersianAnalyzer.java (revision ) @@ -107,10 +107,10 @@ /** * Creates - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * used to tokenize all the text in the provided {@link Reader}. * - * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from a {@link StandardTokenizer} filtered with * {@link LowerCaseFilter}, {@link ArabicNormalizationFilter}, * {@link PersianNormalizationFilter} and Persian Stop words Index: modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSynonymMapFilter.java (revision ) @@ -33,7 +33,6 @@ import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.*; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.util.CharsRef; import org.apache.lucene.util._TestUtil; @@ -387,7 +386,7 @@ final SynonymMap map = b.build(); final boolean ignoreCase = random.nextBoolean(); - final Analyzer analyzer = new ReusableAnalyzerBase() { + final Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.SIMPLE, true); @@ -409,7 +408,7 @@ parser.add(new StringReader(testFile)); final SynonymMap map = parser.build(); - Analyzer analyzer = new ReusableAnalyzerBase() { + Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, true); @@ -467,7 +466,7 @@ add("z x c v", "zxcv", keepOrig); add("x c", "xc", keepOrig); final SynonymMap map = b.build(); - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); @@ -507,7 +506,7 @@ add("a b", "ab", keepOrig); add("a b", "ab", keepOrig); final SynonymMap map = b.build(); - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); @@ -527,7 +526,7 @@ add("a b", "ab", keepOrig); add("a b", "ab", keepOrig); final SynonymMap map = b.build(); - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); @@ -545,7 +544,7 @@ final boolean keepOrig = false; add("zoo", "zoo", keepOrig); final SynonymMap map = b.build(); - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); @@ -564,7 +563,7 @@ add("zoo", "zoo", keepOrig); add("zoo", "zoo zoo", keepOrig); final SynonymMap map = b.build(); - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); @@ -588,7 +587,7 @@ add("z x c v", "zxcv", keepOrig); add("x c", "xc", keepOrig); final SynonymMap map = b.build(); - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); @@ -633,7 +632,7 @@ final boolean keepOrig = true; add("zoo zoo", "zoo", keepOrig); final SynonymMap map = b.build(); - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); @@ -652,7 +651,7 @@ add("zoo zoo", "zoo", keepOrig); add("zoo", "zoo zoo", keepOrig); final SynonymMap map = b.build(); - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false); Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/OffsetLimitTokenFilterTest.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/OffsetLimitTokenFilterTest.java (revision 1169607) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/OffsetLimitTokenFilterTest.java (revision ) @@ -49,7 +49,7 @@ assertTokenStreamContents(filter, new String[] {"short", "toolong", "evenmuchlongertext"}); - checkOneTermReuse(new ReusableAnalyzerBase() { + checkOneTermReuse(new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: solr/core/src/java/org/apache/solr/analysis/FSTSynonymFilterFactory.java =================================================================== --- solr/core/src/java/org/apache/solr/analysis/FSTSynonymFilterFactory.java (revision 1160117) +++ solr/core/src/java/org/apache/solr/analysis/FSTSynonymFilterFactory.java (revision ) @@ -37,7 +37,6 @@ import org.apache.lucene.analysis.synonym.SynonymMap; import org.apache.lucene.analysis.synonym.SolrSynonymParser; import org.apache.lucene.analysis.synonym.WordnetSynonymParser; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.util.Version; import org.apache.solr.common.ResourceLoader; import org.apache.solr.common.SolrException; @@ -70,7 +69,7 @@ final TokenizerFactory factory = tf == null ? null : loadTokenizerFactory(loader, tf, args); - Analyzer analyzer = new ReusableAnalyzerBase() { + Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = factory == null ? new WhitespaceTokenizer(Version.LUCENE_31, reader) : factory.create(reader); Index: modules/analysis/common/src/test/org/apache/lucene/analysis/id/TestIndonesianStemmer.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/id/TestIndonesianStemmer.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/id/TestIndonesianStemmer.java (revision ) @@ -24,14 +24,13 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.KeywordTokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; /** * Tests {@link IndonesianStemmer} */ public class TestIndonesianStemmer extends BaseTokenStreamTestCase { /* full stemming, no stopwords */ - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new KeywordTokenizer(reader); @@ -112,7 +111,7 @@ } /* inflectional-only stemming */ - Analyzer b = new ReusableAnalyzerBase() { + Analyzer b = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new KeywordTokenizer(reader); Index: modules/analysis/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java (revision ) @@ -158,10 +158,10 @@ /** * Creates - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * used to tokenize all the text in the provided {@link Reader}. * - * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from a {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: modules/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java =================================================================== --- modules/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java (revision 1169607) +++ modules/queryparser/src/test/org/apache/lucene/queryparser/analyzing/TestAnalyzingQueryParser.java (revision ) @@ -134,7 +134,7 @@ } } -final class ASCIIAnalyzer extends ReusableAnalyzerBase { +final class ASCIIAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java (revision 1169607) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java (revision ) @@ -27,7 +27,6 @@ import java.util.regex.Pattern; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.StopAnalyzer; @@ -67,7 +66,7 @@ * @deprecated (4.0) use the pattern-based analysis in the analysis/pattern package instead. */ @Deprecated -public final class PatternAnalyzer extends ReusableAnalyzerBase { +public final class PatternAnalyzer extends Analyzer { /** "\\W+"; Divides text at non-letters (NOT Character.isLetter(c)) */ public static final Pattern NON_WORD_PATTERN = Pattern.compile("\\W+"); Index: lucene/src/java/org/apache/lucene/analysis/Analyzer.java.old =================================================================== --- lucene/src/java/org/apache/lucene/analysis/Analyzer.java.old (revision ) +++ lucene/src/java/org/apache/lucene/analysis/Analyzer.java.old (revision ) @@ -0,0 +1,155 @@ +package org.apache.lucene.analysis; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.Reader; +import java.io.IOException; +import java.io.Closeable; +import java.lang.reflect.Modifier; + +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.util.CloseableThreadLocal; +import org.apache.lucene.store.AlreadyClosedException; + +/** An Analyzer builds TokenStreams, which analyze text. It thus represents a + * policy for extracting index terms from text. + *

+ * Typical implementations first build a Tokenizer, which breaks the stream of + * characters from the Reader into raw Tokens. One or more TokenFilters may + * then be applied to the output of the Tokenizer. + *

The {@code Analyzer}-API in Lucene is based on the decorator pattern. + * Therefore all non-abstract subclasses must be final or their {@link #tokenStream} + * and {@link #reusableTokenStream} implementations must be final! This is checked + * when Java assertions are enabled. + */ +public abstract class Analyzer implements Closeable { + + protected Analyzer() { + super(); + assert assertFinal(); + } + + private boolean assertFinal() { + try { + final Class clazz = getClass(); + if (!clazz.desiredAssertionStatus()) + return true; + assert clazz.isAnonymousClass() || + (clazz.getModifiers() & (Modifier.FINAL | Modifier.PRIVATE)) != 0 || + ( + Modifier.isFinal(clazz.getMethod("tokenStream", String.class, Reader.class).getModifiers()) && + Modifier.isFinal(clazz.getMethod("reusableTokenStream", String.class, Reader.class).getModifiers()) + ) : + "Analyzer implementation classes or at least their tokenStream() and reusableTokenStream() implementations must be final"; + return true; + } catch (NoSuchMethodException nsme) { + return false; + } + } + + /** Creates a TokenStream which tokenizes all the text in the provided + * Reader. Must be able to handle null field name for + * backward compatibility. + */ + public abstract TokenStream tokenStream(String fieldName, Reader reader); + + /** Creates a TokenStream that is allowed to be re-used + * from the previous time that the same thread called + * this method. Callers that do not need to use more + * than one TokenStream at the same time from this + * analyzer should use this method for better + * performance. + */ + public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException { + return tokenStream(fieldName, reader); + } + + private CloseableThreadLocal tokenStreams = new CloseableThreadLocal(); + + /** Used by Analyzers that implement reusableTokenStream + * to retrieve previously saved TokenStreams for re-use + * by the same thread. */ + protected Object getPreviousTokenStream() { + try { + return tokenStreams.get(); + } catch (NullPointerException npe) { + if (tokenStreams == null) { + throw new AlreadyClosedException("this Analyzer is closed"); + } else { + throw npe; + } + } + } + + /** Used by Analyzers that implement reusableTokenStream + * to save a TokenStream for later re-use by the same + * thread. */ + protected void setPreviousTokenStream(Object obj) { + try { + tokenStreams.set(obj); + } catch (NullPointerException npe) { + if (tokenStreams == null) { + throw new AlreadyClosedException("this Analyzer is closed"); + } else { + throw npe; + } + } + } + + /** + * Invoked before indexing a IndexableField instance if + * terms have already been added to that field. This allows custom + * analyzers to place an automatic position increment gap between + * IndexbleField instances using the same field name. The default value + * position increment gap is 0. With a 0 position increment gap and + * the typical default token position increment of 1, all terms in a field, + * including across IndexableField instances, are in successive positions, allowing + * exact PhraseQuery matches, for instance, across IndexableField instance boundaries. + * + * @param fieldName IndexableField name being indexed. + * @return position increment gap, added to the next token emitted from {@link #tokenStream(String,Reader)} + */ + public int getPositionIncrementGap(String fieldName) { + return 0; + } + + /** + * Just like {@link #getPositionIncrementGap}, except for + * Token offsets instead. By default this returns 1 for + * tokenized fields and, as if the fields were joined + * with an extra space character, and 0 for un-tokenized + * fields. This method is only called if the field + * produced at least one token for indexing. + * + * @param field the field just indexed + * @return offset gap, added to the next token emitted from {@link #tokenStream(String,Reader)} + */ + public int getOffsetGap(IndexableField field) { + if (field.fieldType().tokenized()) { + return 1; + } else { + return 0; + } + } + + /** Frees persistent resources used by this Analyzer */ + public void close() { + tokenStreams.close(); + tokenStreams = null; + } +} Index: lucene/src/test/org/apache/lucene/index/TestPayloads.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestPayloads.java (revision 1169654) +++ lucene/src/test/org/apache/lucene/index/TestPayloads.java (revision ) @@ -405,7 +405,7 @@ /** * This Analyzer uses an WhitespaceTokenizer and PayloadFilter. */ - private static class PayloadAnalyzer extends ReusableAnalyzerBase { + private static class PayloadAnalyzer extends Analyzer { Map fieldToData = new HashMap(); public PayloadAnalyzer() { Index: modules/analysis/common/src/java/org/apache/lucene/analysis/lv/LatvianAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/lv/LatvianAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/lv/LatvianAnalyzer.java (revision ) @@ -104,11 +104,11 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: solr/core/src/java/org/apache/solr/schema/BoolField.java =================================================================== --- solr/core/src/java/org/apache/solr/schema/BoolField.java (revision 1162347) +++ solr/core/src/java/org/apache/solr/schema/BoolField.java (revision ) @@ -30,7 +30,6 @@ import org.apache.lucene.util.mutable.MutableValue; import org.apache.lucene.util.mutable.MutableValueBool; import org.apache.solr.search.QParser; -import org.apache.solr.search.function.*; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; @@ -70,7 +69,7 @@ protected final static Analyzer boolAnalyzer = new SolrAnalyzer() { @Override - public TokenStreamInfo getStream(String fieldName, Reader reader) { + public TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new Tokenizer(reader) { final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); boolean done = false; @@ -95,7 +94,7 @@ } }; - return new TokenStreamInfo(tokenizer, tokenizer); + return new TokenStreamComponents(tokenizer); } }; Index: solr/core/src/java/org/apache/solr/analysis/SolrAnalyzer.java =================================================================== --- solr/core/src/java/org/apache/solr/analysis/SolrAnalyzer.java (revision 1144761) +++ solr/core/src/java/org/apache/solr/analysis/SolrAnalyzer.java (revision ) @@ -26,10 +26,10 @@ * */ public abstract class SolrAnalyzer extends Analyzer { - int posIncGap=0; + int posIncGap = 0; - + public void setPositionIncrementGap(int gap) { - posIncGap=gap; + posIncGap = gap; } @Override @@ -38,43 +38,13 @@ } /** wrap the reader in a CharStream, if appropriate */ + @Deprecated - public Reader charStream(Reader reader){ + public Reader charStream(Reader reader) { return reader; } @Override - public TokenStream tokenStream(String fieldName, Reader reader) { - return getStream(fieldName, reader).getTokenStream(); + protected Reader initReader(Reader reader) { + return charStream(reader); } - - public static class TokenStreamInfo { - private final Tokenizer tokenizer; - private final TokenStream tokenStream; - public TokenStreamInfo(Tokenizer tokenizer, TokenStream tokenStream) { - this.tokenizer = tokenizer; - this.tokenStream = tokenStream; - } +} - public Tokenizer getTokenizer() { return tokenizer; } - public TokenStream getTokenStream() { return tokenStream; } - } - - - public abstract TokenStreamInfo getStream(String fieldName, Reader reader); - - @Override - public TokenStream reusableTokenStream(String fieldName, Reader reader) throws IOException { - // if (true) return tokenStream(fieldName, reader); - TokenStreamInfo tsi = (TokenStreamInfo)getPreviousTokenStream(); - if (tsi != null) { - tsi.getTokenizer().reset(charStream(reader)); - // the consumer will currently call reset() on the TokenStream to hit all the filters. - // this isn't necessarily guaranteed by the APIs... but is currently done - // by lucene indexing in DocInverterPerField, and in the QueryParser - return tsi.getTokenStream(); - } else { - tsi = getStream(fieldName, reader); - setPreviousTokenStream(tsi); - return tsi.getTokenStream(); - } - } -} Index: modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/sv/TestSwedishLightStemFilter.java (revision ) @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import static org.apache.lucene.analysis.VocabularyAssert.*; @@ -32,7 +31,7 @@ * Simple tests for {@link SwedishLightStemFilter} */ public class TestSwedishLightStemFilter extends BaseTokenStreamTestCase { - private Analyzer analyzer = new ReusableAnalyzerBase() { + private Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/fi/FinnishAnalyzer.java (revision ) @@ -106,11 +106,11 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: solr/webapp/web/admin/analysis.jsp =================================================================== --- solr/webapp/web/admin/analysis.jsp (revision 1144761) +++ solr/webapp/web/admin/analysis.jsp (revision ) @@ -204,7 +204,7 @@ } } - TokenStream tstream = tfac.create(tchain.charStream(new StringReader(val))); + TokenStream tstream = tfac.create(tchain.initReader(new StringReader(val))); List tokens = getTokens(tstream); if (verbose) { writeHeader(out, tfac.getClass(), tfac.getArgs()); Index: modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiAnalyzerQPHelper.java =================================================================== --- modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiAnalyzerQPHelper.java (revision 1169607) +++ modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestMultiAnalyzerQPHelper.java (revision ) @@ -143,7 +143,7 @@ * Expands "multi" to "multi" and "multi2", both at the same position, and * expands "triplemulti" to "triplemulti", "multi3", and "multi2". */ - private class MultiAnalyzer extends ReusableAnalyzerBase { + private class MultiAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { @@ -210,7 +210,7 @@ * Analyzes "the quick brown" as: quick(incr=2) brown(incr=1). Does not work * correctly for input other than "the quick brown ...". */ - private class PosIncrementAnalyzer extends ReusableAnalyzerBase { + private class PosIncrementAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/common/src/java/org/apache/lucene/analysis/core/StopAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/core/StopAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/core/StopAnalyzer.java (revision ) @@ -95,10 +95,10 @@ /** * Creates - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * used to tokenize all the text in the provided {@link Reader}. * - * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from a {@link LowerCaseTokenizer} filtered with * {@link StopFilter} */ Index: solr/core/src/test/org/apache/solr/schema/IndexSchemaRuntimeFieldTest.java =================================================================== --- solr/core/src/test/org/apache/solr/schema/IndexSchemaRuntimeFieldTest.java (revision ) +++ solr/core/src/test/org/apache/solr/schema/IndexSchemaRuntimeFieldTest.java (revision ) @@ -0,0 +1,72 @@ +package org.apache.solr.schema; + +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.apache.solr.SolrTestCaseJ4; +import org.apache.solr.client.solrj.SolrQuery; +import org.apache.solr.core.SolrCore; +import org.apache.solr.request.LocalSolrQueryRequest; +import org.apache.solr.request.SolrQueryRequest; +import org.junit.BeforeClass; +import org.junit.Test; + +public class IndexSchemaRuntimeFieldTest extends SolrTestCaseJ4 { + + @BeforeClass + public static void beforeClass() throws Exception { + initCore("solrconfig.xml","schema.xml"); + } + + @Test + public void testRuntimeFieldCreation() { + // any field manipulation needs to happen when you know the core will not + // be accepting any requests. Typically this is done within the inform() + // method. Since this is a single threaded test, we can change the fields + // willi-nilly + + SolrCore core = h.getCore(); + IndexSchema schema = core.getSchema(); + final String fieldName = "runtimefield"; + SchemaField sf = new SchemaField( fieldName, schema.getFieldTypes().get( "string" ) ); + schema.getFields().put( fieldName, sf ); + + // also register a new copy field (from our new field) + schema.registerCopyField( fieldName, "dynamic_runtime" ); + schema.refreshAnalyzers(); + + assertU(adoc("id", "10", "title", "test", fieldName, "aaa")); + assertU(commit()); + + SolrQuery query = new SolrQuery( fieldName+":aaa" ); + query.set( "indent", "true" ); + SolrQueryRequest req = new LocalSolrQueryRequest( core, query ); + + assertQ("Make sure they got in", req + ,"//*[@numFound='1']" + ,"//result/doc[1]/int[@name='id'][.='10']" + ); + + // Check to see if our copy field made it out safely + query.setQuery( "dynamic_runtime:aaa" ); + assertQ("Make sure they got in", req + ,"//*[@numFound='1']" + ,"//result/doc[1]/int[@name='id'][.='10']" + ); + clearIndex(); + } +} Index: modules/analysis/common/src/java/org/apache/lucene/collation/CollationKeyAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/collation/CollationKeyAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/collation/CollationKeyAnalyzer.java (revision ) @@ -18,8 +18,8 @@ */ +import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.core.KeywordTokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.util.IndexableBinaryStringTools; // javadoc @link import org.apache.lucene.util.Version; @@ -82,7 +82,7 @@ * versions will encode the bytes with {@link IndexableBinaryStringTools}. * */ -public final class CollationKeyAnalyzer extends ReusableAnalyzerBase { +public final class CollationKeyAnalyzer extends Analyzer { private final Collator collator; private final CollationAttributeFactory factory; private final Version matchVersion; Index: modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishLightStemFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishLightStemFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/fi/TestFinnishLightStemFilter.java (revision ) @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import static org.apache.lucene.analysis.VocabularyAssert.*; @@ -32,7 +31,7 @@ * Simple tests for {@link FinnishLightStemFilter} */ public class TestFinnishLightStemFilter extends BaseTokenStreamTestCase { - private Analyzer analyzer = new ReusableAnalyzerBase() { + private Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: lucene/src/test/org/apache/lucene/index/TestTermdocPerf.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestTermdocPerf.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/index/TestTermdocPerf.java (revision ) @@ -22,7 +22,6 @@ import java.util.Random; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.document.Document; @@ -77,7 +76,7 @@ void addDocs(final Random random, Directory dir, final int ndocs, String field, final String val, final int maxTF, final float percentDocs) throws IOException { final RepeatingTokenStream ts = new RepeatingTokenStream(val, random, percentDocs, maxTF); - Analyzer analyzer = new ReusableAnalyzerBase() { + Analyzer analyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { return new TokenStreamComponents(ts); Index: modules/analysis/common/src/java/org/apache/lucene/analysis/ro/RomanianAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/ro/RomanianAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/ro/RomanianAnalyzer.java (revision ) @@ -110,11 +110,11 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: modules/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseAnalyzer.java =================================================================== --- modules/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseAnalyzer.java (revision 1169607) +++ modules/analysis/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseAnalyzer.java (revision ) @@ -25,7 +25,6 @@ import java.util.Set; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.analysis.en.PorterStemFilter; import org.apache.lucene.analysis.util.WordlistLoader; import org.apache.lucene.analysis.TokenStream; @@ -55,7 +54,7 @@ *

* @lucene.experimental */ -public final class SmartChineseAnalyzer extends ReusableAnalyzerBase { +public final class SmartChineseAnalyzer extends Analyzer { private final Set stopWords; Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java (revision 1169607) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java (revision ) @@ -48,7 +48,7 @@ public class TokenSourcesTest extends LuceneTestCase { private static final String FIELD = "text"; - private static final class OverlapAnalyzer extends ReusableAnalyzerBase { + private static final class OverlapAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: solr/core/src/test/org/apache/solr/schema/IndexSchemaTest.java =================================================================== --- solr/core/src/test/org/apache/solr/schema/IndexSchemaTest.java (revision 1173776) +++ solr/core/src/test/org/apache/solr/schema/IndexSchemaTest.java (revision ) @@ -17,27 +17,26 @@ package org.apache.solr.schema; -import java.util.HashMap; -import java.util.Map; - +import org.apache.lucene.search.similarities.SimilarityProvider; import org.apache.solr.SolrTestCaseJ4; -import org.apache.solr.client.solrj.SolrQuery; import org.apache.solr.common.params.CommonParams; import org.apache.solr.common.params.MapSolrParams; import org.apache.solr.core.SolrCore; import org.apache.solr.request.LocalSolrQueryRequest; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.search.similarities.MockConfigurableSimilarityProvider; -import org.apache.lucene.search.similarities.SimilarityProvider; import org.junit.BeforeClass; import org.junit.Test; +import java.util.HashMap; +import java.util.Map; + public class IndexSchemaTest extends SolrTestCaseJ4 { @BeforeClass public static void beforeClass() throws Exception { initCore("solrconfig.xml","schema.xml"); - } + } /** * This test assumes the schema includes: @@ -45,22 +44,22 @@ * */ @Test - public void testDynamicCopy() + public void testDynamicCopy() { SolrCore core = h.getCore(); assertU(adoc("id", "10", "title", "test", "aaa_dynamic", "aaa")); assertU(commit()); - + Map args = new HashMap(); args.put( CommonParams.Q, "title:test" ); args.put( "indent", "true" ); SolrQueryRequest req = new LocalSolrQueryRequest( core, new MapSolrParams( args) ); - + assertQ("Make sure they got in", req ,"//*[@numFound='1']" ,"//result/doc[1]/int[@name='id'][.='10']" ); - + args = new HashMap(); args.put( CommonParams.Q, "aaa_dynamic:aaa" ); args.put( "indent", "true" ); @@ -80,46 +79,15 @@ ); clearIndex(); } - + @Test - public void testRuntimeFieldCreation() - { - // any field manipulation needs to happen when you know the core will not - // be accepting any requests. Typically this is done within the inform() - // method. Since this is a single threaded test, we can change the fields - // willi-nilly - + public void testSimilarityProviderFactory() { SolrCore core = h.getCore(); - IndexSchema schema = core.getSchema(); - final String fieldName = "runtimefield"; - SchemaField sf = new SchemaField( fieldName, schema.getFieldTypes().get( "string" ) ); - schema.getFields().put( fieldName, sf ); - - // also register a new copy field (from our new field) - schema.registerCopyField( fieldName, "dynamic_runtime" ); - schema.refreshAnalyzers(); - - assertU(adoc("id", "10", "title", "test", fieldName, "aaa")); - assertU(commit()); - - SolrQuery query = new SolrQuery( fieldName+":aaa" ); - query.set( "indent", "true" ); - SolrQueryRequest req = new LocalSolrQueryRequest( core, query ); - - assertQ("Make sure they got in", req - ,"//*[@numFound='1']" - ,"//result/doc[1]/int[@name='id'][.='10']" - ); - - // Check to see if our copy field made it out safely - query.setQuery( "dynamic_runtime:aaa" ); - assertQ("Make sure they got in", req - ,"//*[@numFound='1']" - ,"//result/doc[1]/int[@name='id'][.='10']" - ); - clearIndex(); + SimilarityProvider similarityProvider = core.getSchema().getSimilarityProvider(); + assertTrue("wrong class", similarityProvider instanceof MockConfigurableSimilarityProvider); + assertEquals("is there an echo?", ((MockConfigurableSimilarityProvider)similarityProvider).getPassthrough()); } - + @Test public void testIsDynamicField() throws Exception { SolrCore core = h.getCore(); @@ -134,6 +102,5 @@ SolrCore core = h.getCore(); IndexSchema schema = core.getSchema(); assertFalse(schema.getField("id").multiValued()); - } } Index: modules/analysis/common/src/java/org/apache/lucene/analysis/eu/BasqueAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/eu/BasqueAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/eu/BasqueAnalyzer.java (revision ) @@ -105,11 +105,11 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: lucene/src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (revision ) @@ -1706,7 +1706,7 @@ dir.close(); } - static final class StringSplitAnalyzer extends ReusableAnalyzerBase { + static final class StringSplitAnalyzer extends Analyzer { @Override public TokenStreamComponents createComponents(String fieldName, Reader reader) { return new TokenStreamComponents(new StringSplitTokenizer(reader)); Index: solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java =================================================================== --- solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java (revision 1144761) +++ solr/core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java (revision ) @@ -113,7 +113,7 @@ } } - TokenStream tokenStream = tfac.create(tokenizerChain.charStream(new StringReader(value))); + TokenStream tokenStream = tfac.create(tokenizerChain.initReader(new StringReader(value))); List tokens = analyzeTokenStream(tokenStream); namedList.add(tokenStream.getClass().getName(), convertTokensToNamedLists(tokens, context)); @@ -197,7 +197,7 @@ /** * Converts the list of Tokens to a list of NamedLists representing the tokens. * - * @param tokens Tokens to convert + * @param tokenList Tokens to convert * @param context The analysis context * * @return List of NamedLists containing the relevant information taken from the tokens Index: lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java =================================================================== --- lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java (revision 1169607) +++ lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java (revision ) @@ -55,7 +55,7 @@ public IndexReader reader; - public final class PayloadAnalyzer extends ReusableAnalyzerBase { + public final class PayloadAnalyzer extends Analyzer { public PayloadAnalyzer() { super(new PerFieldReuseStrategy()); Index: modules/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/core/KeywordAnalyzer.java (revision ) @@ -19,13 +19,13 @@ import java.io.Reader; -import org.apache.lucene.analysis.ReusableAnalyzerBase; +import org.apache.lucene.analysis.Analyzer; /** * "Tokenizes" the entire stream as a single token. This is useful * for data like zip codes, ids, and some product names. */ -public final class KeywordAnalyzer extends ReusableAnalyzerBase { +public final class KeywordAnalyzer extends Analyzer { public KeywordAnalyzer() { } Index: modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java =================================================================== --- modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java (revision 1160117) +++ modules/analysis/icu/src/test/org/apache/lucene/analysis/icu/segmentation/TestICUTokenizer.java (revision ) @@ -22,7 +22,6 @@ import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.icu.ICUNormalizer2Filter; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import java.io.IOException; import java.io.Reader; @@ -61,7 +60,7 @@ assertTokenStreamContents(tokenizer, expected); } - private Analyzer a = new ReusableAnalyzerBase() { + private Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/synonym/TestSolrSynonymParser.java (revision ) @@ -27,7 +27,6 @@ import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.en.EnglishAnalyzer; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.junit.Test; /** @@ -48,7 +47,7 @@ parser.add(new StringReader(testFile)); final SynonymMap map = parser.build(); - Analyzer analyzer = new ReusableAnalyzerBase() { + Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.WHITESPACE, true); @@ -121,7 +120,7 @@ SolrSynonymParser parser = new SolrSynonymParser(true, true, new MockAnalyzer(random, MockTokenizer.KEYWORD, false)); parser.add(new StringReader(testFile)); final SynonymMap map = parser.build(); - Analyzer analyzer = new ReusableAnalyzerBase() { + Analyzer analyzer = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Tokenizer tokenizer = new MockTokenizer(reader, MockTokenizer.KEYWORD, false); Index: modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestPorterStemFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestPorterStemFilter.java (revision 1160117) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/en/TestPorterStemFilter.java (revision ) @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.miscellaneous.KeywordMarkerFilter; import org.apache.lucene.analysis.util.CharArraySet; -import org.apache.lucene.analysis.ReusableAnalyzerBase; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; @@ -36,7 +35,7 @@ * Test the PorterStemFilter with Martin Porter's test data. */ public class TestPorterStemFilter extends BaseTokenStreamTestCase { - Analyzer a = new ReusableAnalyzerBase() { + Analyzer a = new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName, Reader reader) { Index: lucene/src/java/org/apache/lucene/analysis/AnalyzerWrapper.java =================================================================== --- lucene/src/java/org/apache/lucene/analysis/AnalyzerWrapper.java (revision ) +++ lucene/src/java/org/apache/lucene/analysis/AnalyzerWrapper.java (revision ) @@ -0,0 +1,89 @@ +package org.apache.lucene.analysis; + +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.apache.lucene.index.IndexableField; + +import java.io.Reader; + +/** + * Extension to {@link Analyzer} suitable for Analyzers which wrap + * other Analyzers. + *

+ * {@link #getWrappedAnalyzer(String)} allows the Analyzer + * to wrap multiple Analyzers which are selected on a per field basis. + *

+ * {@link #wrapComponents(String, Analyzer.TokenStreamComponents)} allows the + * TokenStreamComponents of the wrapped Analyzer to then be wrapped + * (such as adding a new {@link TokenFilter} to form new TokenStreamComponents. + */ +public abstract class AnalyzerWrapper extends Analyzer { + + /** + * Creates a new AnalyzerWrapper. Since the {@link Analyzer.ReuseStrategy} of + * the wrapped Analyzers are unknown, {@link Analyzer.PerFieldReuseStrategy} is assumed + */ + protected AnalyzerWrapper() { + super(new PerFieldReuseStrategy()); + } + + /** + * Retrieves the wrapped Analyzer appropriate for analyzing the field with + * the given name + * + * @param fieldName Name of the field which is to be analyzed + * @return Analyzer for the field with the given name. Assumed to be non-null + */ + protected abstract Analyzer getWrappedAnalyzer(String fieldName); + + /** + * Wraps / alters the given TokenStreamComponents, taken from the wrapped + * Analyzer, to form new components. It is through this method that new + * TokenFilters can be added by AnalyzerWrappers. + * + * + * @param fieldName Name of the field which is to be analyzed + * @param components TokenStreamComponents taken from the wrapped Analyzer + * @return Wrapped / altered TokenStreamComponents. + */ + protected abstract TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components); + + /** + * {@inheritDoc} + */ + @Override + protected final TokenStreamComponents createComponents(String fieldName, Reader aReader) { + return wrapComponents(fieldName, getWrappedAnalyzer(fieldName).createComponents(fieldName, aReader)); + } + + /** + * {@inheritDoc} + */ + @Override + public final int getPositionIncrementGap(String fieldName) { + return getWrappedAnalyzer(fieldName).getPositionIncrementGap(fieldName); + } + + /** + * {@inheritDoc} + */ + @Override + public final int getOffsetGap(IndexableField field) { + return getWrappedAnalyzer(field.name()).getOffsetGap(field); + } +} Index: modules/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java (revision ) @@ -139,10 +139,10 @@ /** * Creates - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * used to tokenize all the text in the provided {@link Reader}. * - * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from a {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: modules/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/pt/PortugueseAnalyzer.java (revision ) @@ -106,11 +106,11 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: modules/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/no/NorwegianAnalyzer.java (revision ) @@ -106,11 +106,11 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: modules/analysis/common/src/java/org/apache/lucene/analysis/it/ItalianAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/it/ItalianAnalyzer.java (revision 1160117) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/it/ItalianAnalyzer.java (revision ) @@ -123,11 +123,11 @@ /** * Creates a - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * which tokenizes all the text in the provided {@link Reader}. * * @return A - * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents} + * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} * built from an {@link StandardTokenizer} filtered with * {@link StandardFilter}, {@link ElisionFilter}, {@link LowerCaseFilter}, {@link StopFilter} * , {@link KeywordMarkerFilter} if a stem exclusion set is Index: modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java =================================================================== --- modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java (revision 1169607) +++ modules/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java (revision ) @@ -112,7 +112,7 @@ } } - public static final class QPTestAnalyzer extends ReusableAnalyzerBase { + public static final class QPTestAnalyzer extends Analyzer { /** Filters MockTokenizer with StopFilter. */ @Override