Index: solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java =================================================================== --- solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java (revision 1144761) +++ solr/core/src/test/org/apache/solr/analysis/TestReversedWildcardFilterFactory.java (revision ) @@ -86,7 +86,7 @@ String text = "one two three si\uD834\uDD1Ex"; // field one - TokenStream input = a.tokenStream("one", new StringReader(text)); + TokenStream input = a.reusableTokenStream("one", new StringReader(text)); assertTokenStreamContents(input, new String[] { "\u0001eno", "one", "\u0001owt", "two", "\u0001eerht", "three", "\u0001x\uD834\uDD1Eis", "si\uD834\uDD1Ex" }, @@ -95,7 +95,7 @@ new int[] { 1, 0, 1, 0, 1, 0, 1, 0 } ); // field two - input = a.tokenStream("two", new StringReader(text)); + input = a.reusableTokenStream("two", new StringReader(text)); assertTokenStreamContents(input, new String[] { "\u0001eno", "\u0001owt", "\u0001eerht", "\u0001x\uD834\uDD1Eis" }, @@ -104,7 +104,7 @@ new int[] { 1, 1, 1, 1 } ); // field three - input = a.tokenStream("three", new StringReader(text)); + input = a.reusableTokenStream("three", new StringReader(text)); assertTokenStreamContents(input, new String[] { "one", "two", "three", "si\uD834\uDD1Ex" }, new int[] { 0, 4, 8, 14 }, Index: lucene/src/test/org/apache/lucene/index/TestLongPostings.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestLongPostings.java (revision 1162347) +++ lucene/src/test/org/apache/lucene/index/TestLongPostings.java (revision ) @@ -47,7 +47,7 @@ if (other != null && s.equals(other)) { continue; } - final TokenStream ts = a.tokenStream("foo", new StringReader(s)); + final TokenStream ts = a.reusableTokenStream("foo", new StringReader(s)); final TermToBytesRefAttribute termAtt = ts.getAttribute(TermToBytesRefAttribute.class); final BytesRef termBytes = termAtt.getBytesRef(); int count = 0; Index: modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java (revision 1162347) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java (revision ) @@ -98,8 +98,9 @@ // LUCENE-1441 public void testOffsets() throws Exception { - TokenStream stream = new KeywordAnalyzer().tokenStream("field", new StringReader("abcd")); + TokenStream stream = new KeywordAnalyzer().reusableTokenStream("field", new StringReader("abcd")); OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class); + stream.reset(); assertTrue(stream.incrementToken()); assertEquals(0, offsetAtt.startOffset()); assertEquals(4, offsetAtt.endOffset()); Index: modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java (revision 1040463) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestStopAnalyzer.java (revision ) @@ -48,9 +48,10 @@ public void testDefaults() throws IOException { assertTrue(stop != null); StringReader reader = new StringReader("This is a test of the english stop analyzer"); - TokenStream stream = stop.tokenStream("test", reader); + TokenStream stream = stop.reusableTokenStream("test", reader); assertTrue(stream != null); CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class); + stream.reset(); while (stream.incrementToken()) { assertFalse(inValidTokens.contains(termAtt.toString())); @@ -64,7 +65,7 @@ stopWordsSet.add("analyzer"); StopAnalyzer newStop = new StopAnalyzer(Version.LUCENE_40, stopWordsSet); StringReader reader = new StringReader("This is a good test of the english stop analyzer"); - TokenStream stream = newStop.tokenStream("test", reader); + TokenStream stream = newStop.reusableTokenStream("test", reader); assertNotNull(stream); CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class); @@ -82,7 +83,7 @@ StopAnalyzer newStop = new StopAnalyzer(TEST_VERSION_CURRENT, stopWordsSet); StringReader reader = new StringReader("This is a good test of the english stop analyzer with positions"); int expectedIncr[] = { 1, 1, 1, 3, 1, 1, 1, 2, 1}; - TokenStream stream = newStop.tokenStream("test", reader); + TokenStream stream = newStop.reusableTokenStream("test", reader); assertNotNull(stream); int i = 0; CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class); Index: lucene/src/test-framework/org/apache/lucene/analysis/BaseTokenStreamTestCase.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/analysis/BaseTokenStreamTestCase.java (revision 1145158) +++ lucene/src/test-framework/org/apache/lucene/analysis/BaseTokenStreamTestCase.java (revision ) @@ -183,7 +183,7 @@ } public static void assertAnalyzesTo(Analyzer a, String input, String[] output, int startOffsets[], int endOffsets[], String types[], int posIncrements[]) throws IOException { - assertTokenStreamContents(a.tokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, input.length()); + assertTokenStreamContents(a.reusableTokenStream("dummy", new StringReader(input)), output, startOffsets, endOffsets, types, posIncrements, input.length()); } public static void assertAnalyzesTo(Analyzer a, String input, String[] output) throws IOException { Index: modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java =================================================================== --- modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java (revision 1175297) +++ modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java (revision ) @@ -322,7 +322,8 @@ * as one might think - kudos to the Sun regex developers. */ private static final class PatternTokenizer extends Tokenizer { - + + private final Pattern pattern; private String str; private final boolean toLowerCase; private Matcher matcher; @@ -332,6 +333,7 @@ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); public PatternTokenizer(String str, Pattern pattern, boolean toLowerCase) { + this.pattern = pattern; this.str = str; this.matcher = pattern.matcher(str); this.toLowerCase = toLowerCase; @@ -375,6 +377,7 @@ public void reset(Reader input) throws IOException { super.reset(input); this.str = PatternAnalyzer.toString(input); + this.matcher = pattern.matcher(this.str); } @Override Index: modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java (revision 1169607) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java (revision ) @@ -124,12 +124,12 @@ assertAnalyzesTo(analyzer, document, expected); // analysis with a "FastStringReader" - TokenStream ts = analyzer.tokenStream("dummy", + TokenStream ts = analyzer.reusableTokenStream("dummy", new PatternAnalyzer.FastStringReader(document)); assertTokenStreamContents(ts, expected); // analysis of a String, uses PatternAnalyzer.tokenStream(String, String) - TokenStream ts2 = analyzer.tokenStream("dummy", new StringReader(document)); + TokenStream ts2 = analyzer.reusableTokenStream("dummy", new StringReader(document)); assertTokenStreamContents(ts2, expected); } } Index: modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPerFieldAnalzyerWrapper.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPerFieldAnalzyerWrapper.java (revision 1170942) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPerFieldAnalzyerWrapper.java (revision ) @@ -36,18 +36,21 @@ PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), analyzerPerField); - TokenStream tokenStream = analyzer.tokenStream("field", + TokenStream tokenStream = analyzer.reusableTokenStream("field", new StringReader(text)); CharTermAttribute termAtt = tokenStream.getAttribute(CharTermAttribute.class); + tokenStream.reset(); assertTrue(tokenStream.incrementToken()); assertEquals("WhitespaceAnalyzer does not lowercase", "Qwerty", termAtt.toString()); - tokenStream = analyzer.tokenStream("special", + tokenStream = analyzer.reusableTokenStream("special", new StringReader(text)); termAtt = tokenStream.getAttribute(CharTermAttribute.class); + tokenStream.reset(); + assertTrue(tokenStream.incrementToken()); assertEquals("SimpleAnalyzer lowercases", "qwerty", Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (revision 1175297) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (revision ) @@ -100,6 +100,7 @@ TokenStream stream = TokenSources.getAnyTokenStream(searcher .getIndexReader(), hits.scoreDocs[i].doc, FIELD_NAME, doc, analyzer); + stream.reset(); Fragmenter fragmenter = new SimpleSpanFragmenter(scorer); @@ -155,7 +156,8 @@ */ private static String highlightField(Query query, String fieldName, String text) throws IOException, InvalidTokenOffsetsException { - TokenStream tokenStream = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true).tokenStream(fieldName, new StringReader(text)); + TokenStream tokenStream = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true) + .reusableTokenStream(fieldName, new StringReader(text)); // Assuming "", "" used to highlight SimpleHTMLFormatter formatter = new SimpleHTMLFormatter(); QueryScorer scorer = new QueryScorer(query, fieldName, FIELD_NAME); @@ -176,8 +178,9 @@ for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); highlighter.setTextFragmenter(new SimpleFragmenter(40)); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, @@ -255,7 +258,8 @@ for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -284,7 +288,8 @@ for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -313,7 +318,8 @@ for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -338,7 +344,8 @@ for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -362,7 +369,8 @@ for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -387,7 +395,8 @@ for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(NUMERIC_FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -415,7 +424,8 @@ for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, "..."); @@ -437,7 +447,8 @@ for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); QueryScorer scorer = new QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(this, scorer); @@ -467,7 +478,8 @@ for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer, 5)); @@ -490,7 +502,8 @@ for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer, 20)); @@ -521,7 +534,8 @@ for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME,new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME,new StringReader(text)); + tokenStream.reset(); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -592,7 +606,8 @@ int maxNumFragmentsRequired = 2; for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, "..."); @@ -767,7 +782,8 @@ QueryScorer scorer; TokenStream tokenStream; - tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text)); + tokenStream = analyzer.reusableTokenStream(HighlighterTest.FIELD_NAME, new StringReader(text)); + tokenStream.reset(); scorer = new QueryScorer(query, HighlighterTest.FIELD_NAME); @@ -795,7 +811,8 @@ QueryScorer scorer; TokenStream tokenStream; - tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text)); + tokenStream = analyzer.reusableTokenStream(HighlighterTest.FIELD_NAME, new StringReader(text)); + tokenStream.reset(); scorer = new QueryScorer(query, null); @@ -823,7 +840,8 @@ QueryScorer scorer; TokenStream tokenStream; - tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text)); + tokenStream = analyzer.reusableTokenStream(HighlighterTest.FIELD_NAME, new StringReader(text)); + tokenStream.reset(); scorer = new QueryScorer(query, "random_field", HighlighterTest.FIELD_NAME); @@ -996,7 +1014,8 @@ numHighlights = 0; for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this); @@ -1010,7 +1029,8 @@ numHighlights = 0; for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this); highlighter.getBestFragment(analyzer, FIELD_NAME, text); @@ -1022,7 +1042,8 @@ for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this); highlighter.getBestFragments(analyzer, FIELD_NAME, text, 10); @@ -1059,7 +1080,8 @@ Highlighter highlighter = getHighlighter(wTerms, HighlighterTest.this);// new // Highlighter(new // QueryTermScorer(wTerms)); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(texts[0])); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(texts[0])); + tokenStream.reset(); highlighter.setTextFragmenter(new SimpleFragmenter(2)); String result = highlighter.getBestFragment(tokenStream, texts[0]).trim(); @@ -1068,7 +1090,8 @@ // readjust weights wTerms[1].setWeight(50f); - tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(texts[0])); + tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(texts[0])); + tokenStream.reset(); highlighter = getHighlighter(wTerms, HighlighterTest.this); highlighter.setTextFragmenter(new SimpleFragmenter(2)); @@ -1101,12 +1124,14 @@ query.add(new TermQuery(new Term("bookid", "soccer")), Occur.SHOULD); query.add(new TermQuery(new Term("bookid", "footie")), Occur.SHOULD); - TokenStream tokenStream = analyzer.tokenStream(null, new StringReader(s)); + TokenStream tokenStream = analyzer.reusableTokenStream(null, new StringReader(s)); + tokenStream.reset(); Highlighter highlighter = getHighlighter(query, null, tokenStream, HighlighterTest.this); // Get 3 best fragments and seperate with a "..." - tokenStream = analyzer.tokenStream(null, new StringReader(s)); + tokenStream = analyzer.reusableTokenStream(null, new StringReader(s)); + tokenStream.reset(); String result = highlighter.getBestFragments(tokenStream, s, 3, "..."); String expectedResult = "football-soccer in the euro 2004 footie competition"; @@ -1131,7 +1156,8 @@ for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this); String result = highlighter.getBestFragment(tokenStream, text); @@ -1154,7 +1180,8 @@ for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this);// new Highlighter(this, new @@ -1162,7 +1189,8 @@ highlighter.setTextFragmenter(new SimpleFragmenter(20)); String stringResults[] = highlighter.getBestFragments(tokenStream, text, 10); - tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); TextFragment fragmentResults[] = highlighter.getBestTextFragments(tokenStream, text, true, 10); @@ -1192,7 +1220,8 @@ public void run() throws Exception { numHighlights = 0; doSearching(new TermQuery(new Term(FIELD_NAME, "meat"))); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(texts[0])); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(texts[0])); + tokenStream.reset(); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this);// new Highlighter(this, new // QueryTermScorer(query)); @@ -1230,8 +1259,9 @@ sb.append("stoppedtoken"); } SimpleHTMLFormatter fm = new SimpleHTMLFormatter(); - Highlighter hg = getHighlighter(query, "data", analyzer.tokenStream( - "data", new StringReader(sb.toString())), fm);// new Highlighter(fm, + TokenStream tokenStream = analyzer.reusableTokenStream("data", new StringReader(sb.toString())); + tokenStream.reset(); + Highlighter hg = getHighlighter(query, "data", tokenStream, fm);// new Highlighter(fm, // new // QueryTermScorer(query)); hg.setTextFragmenter(new NullFragmenter()); @@ -1266,7 +1296,10 @@ String text = "this is a text with searchterm in it"; SimpleHTMLFormatter fm = new SimpleHTMLFormatter(); - Highlighter hg = getHighlighter(query, "text", new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true).tokenStream("text", new StringReader(text)), fm); + TokenStream tokenStream = new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true) + .reusableTokenStream("text", new StringReader(text)); + tokenStream.reset(); + Highlighter hg = getHighlighter(query, "text", tokenStream, fm); hg.setTextFragmenter(new NullFragmenter()); hg.setMaxDocCharsToAnalyze(36); String match = hg.getBestFragment(new MockAnalyzer(random, MockTokenizer.SIMPLE, true, stopWords, true), "text", text); @@ -1308,7 +1341,8 @@ for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this, false); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -1337,7 +1371,8 @@ doSearching(new TermQuery(new Term(FIELD_NAME, "aninvalidquerywhichshouldyieldnoresults"))); for (String text : texts) { - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this); String result = highlighter.getBestFragment(tokenStream, text); @@ -1377,7 +1412,8 @@ } }); highlighter.setTextFragmenter(new SimpleFragmenter(2000)); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(rawDocContent)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(rawDocContent)); + tokenStream.reset(); String encodedSnippet = highlighter.getBestFragments(tokenStream, rawDocContent, 1, ""); // An ugly bit of XML creation: @@ -1477,7 +1513,7 @@ @Override public boolean incrementToken() throws IOException { if(iter.hasNext()) { - Token token = iter.next(); + Token token = iter.next(); clearAttributes(); termAtt.setEmpty().append(token); posIncrAtt.setPositionIncrement(token.getPositionIncrement()); @@ -1486,7 +1522,12 @@ } return false; } - + + @Override + public void reset() throws IOException { + super.reset(); + iter = lst.iterator(); + } }; } @@ -1532,6 +1573,12 @@ } return false; } + + @Override + public void reset() throws IOException { + super.reset(); + iter = lst.iterator(); + } }; } @@ -1717,7 +1764,8 @@ final int expectedHighlights) throws Exception { for (int i = 0; i < hits.totalHits; i++) { String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); - TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(FIELD_NAME, new StringReader(text)); + tokenStream.reset(); QueryScorer scorer = new QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(this, scorer); @@ -1952,7 +2000,8 @@ int maxNumFragmentsRequired = 2; String fragmentSeparator = "..."; Scorer scorer = null; - TokenStream tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text)); + TokenStream tokenStream = analyzer.reusableTokenStream(HighlighterTest.FIELD_NAME, new StringReader(text)); + tokenStream.reset(); if (mode == QUERY) { scorer = new QueryScorer(query); } else if (mode == QUERY_TERM) { Index: modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java (revision 1162347) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java (revision ) @@ -23,6 +23,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; @@ -38,12 +39,11 @@ public void testLimitTokenCountAnalyzer() throws IOException { Analyzer a = new LimitTokenCountAnalyzer(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2); // dont use assertAnalyzesTo here, as the end offset is not the end of the string! - assertTokenStreamContents(a.tokenStream("dummy", new StringReader("1 2 3 4 5")), new String[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, 4); + assertTokenStreamContents(a.reusableTokenStream("dummy", new StringReader("1 2 3 4 5")), new String[] { "1", "2" }, new int[] { 0, 3 }, new int[] { 1, 4 }, 4); assertTokenStreamContents(a.reusableTokenStream("dummy", new StringReader("1 2 3 4 5")), new String[] { "1", "2" }, new int[] { 0, 2 }, new int[] { 1, 3 }, 3); a = new LimitTokenCountAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT), 2); // dont use assertAnalyzesTo here, as the end offset is not the end of the string! - assertTokenStreamContents(a.tokenStream("dummy", new StringReader("1 2 3 4 5")), new String[] { "1", "2" }, new int[] { 0, 2 }, new int[] { 1, 3 }, 3); assertTokenStreamContents(a.reusableTokenStream("dummy", new StringReader("1 2 3 4 5")), new String[] { "1", "2" }, new int[] { 0, 2 }, new int[] { 1, 3 }, 3); } Index: modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java (revision 1170424) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java (revision ) @@ -133,7 +133,7 @@ QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer( TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false), reader, 10); - TokenStream ts = a.tokenStream("repetitiveField", new StringReader("this boring")); + TokenStream ts = a.reusableTokenStream("repetitiveField", new StringReader("this boring")); assertTokenStreamContents(ts, new String[] { "this" }); } } Index: modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java (revision 1162347) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java (revision ) @@ -87,7 +87,9 @@ Analyzer analyzer = new MockAnalyzer(random, MockTokenizer.WHITESPACE, false); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); Document doc = new Document(); - TeeSinkTokenFilter tee = new TeeSinkTokenFilter(analyzer.tokenStream("field", new StringReader("abcd "))); + TokenStream tokenStream = analyzer.reusableTokenStream("field", new StringReader("abcd ")); + tokenStream.reset(); + TeeSinkTokenFilter tee = new TeeSinkTokenFilter(tokenStream); TokenStream sink = tee.newSinkTokenStream(); FieldType ft = new FieldType(TextField.TYPE_UNSTORED); ft.setStoreTermVectors(true); Index: modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java =================================================================== --- modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java (revision 1143415) +++ modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java (revision ) @@ -958,8 +958,8 @@ private void assertEqualCollation(Analyzer a1, Analyzer a2, String text) throws Exception { - TokenStream ts1 = a1.tokenStream("bogus", new StringReader(text)); - TokenStream ts2 = a2.tokenStream("bogus", new StringReader(text)); + TokenStream ts1 = a1.reusableTokenStream("bogus", new StringReader(text)); + TokenStream ts2 = a2.reusableTokenStream("bogus", new StringReader(text)); ts1.reset(); ts2.reset(); TermToBytesRefAttribute termAtt1 = ts1.addAttribute(TermToBytesRefAttribute.class); @@ -1007,7 +1007,7 @@ // Default analyzer, maxShingleSize, and outputUnigrams Benchmark benchmark = execBenchmark(getShingleConfig("")); - benchmark.getRunData().getAnalyzer().tokenStream + benchmark.getRunData().getAnalyzer().reusableTokenStream ("bogus", new StringReader(text)).close(); assertEqualShingle(benchmark.getRunData().getAnalyzer(), text, new String[] {"one", "one two", "two", "two three", Index: solr/core/src/test/org/apache/solr/highlight/HighlighterTest.java =================================================================== --- solr/core/src/test/org/apache/solr/highlight/HighlighterTest.java (revision 1144761) +++ solr/core/src/test/org/apache/solr/highlight/HighlighterTest.java (revision ) @@ -155,8 +155,11 @@ public void testTermOffsetsTokenStream() throws Exception { String[] multivalued = { "a b c d", "e f g", "h", "i j k l m n" }; Analyzer a1 = new WhitespaceAnalyzer(TEST_VERSION_CURRENT); + TokenStream tokenStream = a1.reusableTokenStream("", new StringReader("a b c d e f g h i j k l m n")); + tokenStream.reset(); + TermOffsetsTokenStream tots = new TermOffsetsTokenStream( - a1.tokenStream( "", new StringReader( "a b c d e f g h i j k l m n" ) ) ); + tokenStream); for( String v : multivalued ){ TokenStream ts1 = tots.getMultiValuedTokenStream( v.length() ); Analyzer a2 = new WhitespaceAnalyzer(TEST_VERSION_CURRENT); Index: lucene/src/test/org/apache/lucene/index/TestTermVectorsWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestTermVectorsWriter.java (revision 1167668) +++ lucene/src/test/org/apache/lucene/index/TestTermVectorsWriter.java (revision ) @@ -136,7 +136,7 @@ Analyzer analyzer = new MockAnalyzer(random); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)); Document doc = new Document(); - TokenStream stream = analyzer.tokenStream("field", new StringReader("abcd ")); + TokenStream stream = analyzer.reusableTokenStream("field", new StringReader("abcd ")); stream.reset(); // TODO: wierd to reset before wrapping with CachingTokenFilter... correct? stream = new CachingTokenFilter(stream); FieldType customType = new FieldType(TextField.TYPE_UNSTORED); Index: modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (revision 1170942) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (revision ) @@ -88,7 +88,7 @@ PhraseQuery q = new PhraseQuery(); - TokenStream ts = analyzer.tokenStream("content", new StringReader("this sentence")); + TokenStream ts = analyzer.reusableTokenStream("content", new StringReader("this sentence")); int j = -1; PositionIncrementAttribute posIncrAtt = ts.addAttribute(PositionIncrementAttribute.class); @@ -117,7 +117,7 @@ BooleanQuery q = new BooleanQuery(); - TokenStream ts = analyzer.tokenStream("content", new StringReader("test sentence")); + TokenStream ts = analyzer.reusableTokenStream("content", new StringReader("test sentence")); CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);