Index: solr/src/test/org/apache/solr/analysis/TestBufferedTokenStream.java
===================================================================
--- solr/src/test/org/apache/solr/analysis/TestBufferedTokenStream.java (revision 948225)
+++ solr/src/test/org/apache/solr/analysis/TestBufferedTokenStream.java (working copy)
@@ -35,9 +35,9 @@
public static class AB_Q_Stream extends BufferedTokenStream {
public AB_Q_Stream(TokenStream input) {super(input);}
protected Token process(Token t) throws IOException {
- if ("A".equals(new String(t.termBuffer(), 0, t.termLength()))) {
+ if ("A".equals(new String(t.buffer(), 0, t.length()))) {
Token t2 = read();
- if (t2!=null && "B".equals(new String(t2.termBuffer(), 0, t2.termLength()))) t.setTermBuffer("Q");
+ if (t2!=null && "B".equals(new String(t2.buffer(), 0, t2.length()))) t.setEmpty().append("Q");
if (t2!=null) pushBack(t2);
}
return t;
@@ -48,8 +48,8 @@
public static class AB_AAB_Stream extends BufferedTokenStream {
public AB_AAB_Stream(TokenStream input) {super(input);}
protected Token process(Token t) throws IOException {
- if ("A".equals(new String(t.termBuffer(), 0, t.termLength())) &&
- "B".equals(new String(peek(1).termBuffer(), 0, peek(1).termLength())))
+ if ("A".equals(new String(t.buffer(), 0, t.length())) &&
+ "B".equals(new String(peek(1).buffer(), 0, peek(1).length())))
write((Token)t.clone());
return t;
}
Index: solr/src/test/org/apache/solr/analysis/TestRemoveDuplicatesTokenFilterFactory.java
===================================================================
--- solr/src/test/org/apache/solr/analysis/TestRemoveDuplicatesTokenFilterFactory.java (revision 948225)
+++ solr/src/test/org/apache/solr/analysis/TestRemoveDuplicatesTokenFilterFactory.java (working copy)
@@ -52,7 +52,7 @@
if (toks.hasNext()) {
clearAttributes();
Token tok = toks.next();
- termAtt.setEmpty().append(tok.term());
+ termAtt.setEmpty().append(tok);
offsetAtt.setOffset(tok.startOffset(), tok.endOffset());
posIncAtt.setPositionIncrement(tok.getPositionIncrement());
return true;
Index: solr/src/test/org/apache/solr/analysis/TestSynonymMap.java
===================================================================
--- solr/src/test/org/apache/solr/analysis/TestSynonymMap.java (revision 948225)
+++ solr/src/test/org/apache/solr/analysis/TestSynonymMap.java (working copy)
@@ -262,7 +262,7 @@
Token[] tokens = ((SynonymMap)map.submap.get( src )).synonyms;
boolean inc = false;
for( Token token : tokens ){
- if( exp.equals( new String(token.termBuffer(), 0, token.termLength()) ) )
+ if( exp.equals( new String(token.buffer(), 0, token.length()) ) )
inc = true;
}
assertTrue( inc );
Index: solr/src/test/org/apache/solr/spelling/SimpleQueryConverter.java
===================================================================
--- solr/src/test/org/apache/solr/spelling/SimpleQueryConverter.java (revision 948225)
+++ solr/src/test/org/apache/solr/spelling/SimpleQueryConverter.java (working copy)
@@ -54,7 +54,7 @@
ts.reset();
while (ts.incrementToken()){
Token tok = new Token();
- tok.setTermBuffer(termAtt.buffer(), 0, termAtt.length());
+ tok.copyBuffer(termAtt.buffer(), 0, termAtt.length());
tok.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
tok.setFlags(flagsAtt.getFlags());
tok.setPayload(payloadAtt.getPayload());
Index: solr/src/test/org/apache/solr/spelling/SpellingQueryConverterTest.java
===================================================================
--- solr/src/test/org/apache/solr/spelling/SpellingQueryConverterTest.java (revision 948225)
+++ solr/src/test/org/apache/solr/spelling/SpellingQueryConverterTest.java (working copy)
@@ -88,7 +88,7 @@
for (Token token : tokens) {
int start = token.startOffset();
int end = token.endOffset();
- if (!s.substring(start, end).equals(token.term())) return false;
+ if (!s.substring(start, end).equals(token.toString())) return false;
}
return true;
}
Index: solr/src/java/org/apache/solr/analysis/BufferedTokenStream.java
===================================================================
--- solr/src/java/org/apache/solr/analysis/BufferedTokenStream.java (revision 948225)
+++ solr/src/java/org/apache/solr/analysis/BufferedTokenStream.java (working copy)
@@ -150,7 +150,7 @@
return null;
} else {
Token token = new Token();
- token.setTermBuffer(termAtt.buffer(), 0, termAtt.length());
+ token.copyBuffer(termAtt.buffer(), 0, termAtt.length());
token.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
token.setType(typeAtt.type());
token.setFlags(flagsAtt.getFlags());
@@ -163,7 +163,7 @@
/** old api emulation for back compat */
private boolean writeToken(Token token) throws IOException {
clearAttributes();
- termAtt.copyBuffer(token.termBuffer(), 0, token.termLength());
+ termAtt.copyBuffer(token.buffer(), 0, token.length());
offsetAtt.setOffset(token.startOffset(), token.endOffset());
typeAtt.setType(token.type());
flagsAtt.setFlags(token.getFlags());
Index: solr/src/java/org/apache/solr/handler/component/SpellCheckComponent.java
===================================================================
--- solr/src/java/org/apache/solr/handler/component/SpellCheckComponent.java (revision 948225)
+++ solr/src/java/org/apache/solr/handler/component/SpellCheckComponent.java (working copy)
@@ -337,10 +337,7 @@
// create token
SpellCheckResponse.Suggestion suggestion = origVsSuggestion.get(original);
- Token token = new Token();
- token.setTermBuffer(original);
- token.setStartOffset(suggestion.getStartOffset());
- token.setEndOffset(suggestion.getEndOffset());
+ Token token = new Token(original, suggestion.getStartOffset(), suggestion.getEndOffset());
// get top 'count' suggestions out of 'sugQueue.size()' candidates
SuggestWord[] suggestions = new SuggestWord[Math.min(count, sugQueue.size())];
@@ -382,7 +379,7 @@
while (ts.incrementToken()){
Token token = new Token();
- token.setTermBuffer(termAtt.buffer(), 0, termAtt.length());
+ token.copyBuffer(termAtt.buffer(), 0, termAtt.length());
token.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
token.setType(typeAtt.type());
token.setFlags(flagsAtt.getFlags());
@@ -461,7 +458,7 @@
if (hasFreqInfo) {
isCorrectlySpelled = isCorrectlySpelled && spellingResult.getTokenFrequency(inputToken) > 0;
}
- result.add(new String(inputToken.termBuffer(), 0, inputToken.termLength()), suggestionList);
+ result.add(new String(inputToken.buffer(), 0, inputToken.length()), suggestionList);
}
}
if (hasFreqInfo) {
Index: solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java
===================================================================
--- solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java (revision 948225)
+++ solr/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java (working copy)
@@ -163,12 +163,12 @@
while (tokenStream.incrementToken()) {
Token token = new Token();
if (termAtt != null) {
- token.setTermBuffer(termAtt.toString());
+ token.setEmpty().append(termAtt);
}
if (bytesAtt != null) {
bytesAtt.toBytesRef(bytes);
// TODO: This is incorrect when numeric fields change in later lucene versions. It should use BytesRef directly!
- token.setTermBuffer(bytes.utf8ToString());
+ token.setEmpty().append(bytes.utf8ToString());
}
token.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
token.setType(typeAtt.type());
@@ -208,10 +208,10 @@
for (Token token : tokens) {
NamedList
*/
public final class BulgarianStemFilter extends TokenFilter {
- private final BulgarianStemmer stemmer;
- private final TermAttribute termAtt;
- private final KeywordAttribute keywordAttr;
+ private final BulgarianStemmer stemmer = new BulgarianStemmer();
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class);
public BulgarianStemFilter(final TokenStream input) {
super(input);
- stemmer = new BulgarianStemmer();
- termAtt = addAttribute(TermAttribute.class);
- keywordAttr = addAttribute(KeywordAttribute.class);
}
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
if(!keywordAttr.isKeyword()) {
- final int newlen = stemmer.stem(termAtt.termBuffer(), termAtt.termLength());
- termAtt.setTermLength(newlen);
+ final int newlen = stemmer.stem(termAtt.buffer(), termAtt.length());
+ termAtt.setLength(newlen);
}
return true;
} else {
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/de/GermanStemFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/de/GermanStemFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/de/GermanStemFilter.java (working copy)
@@ -24,7 +24,7 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* A {@link TokenFilter} that stems German words.
@@ -45,11 +45,11 @@
/**
* The actual token in the input stream.
*/
- private GermanStemmer stemmer = null;
+ private GermanStemmer stemmer = new GermanStemmer();
private Set> exclusionSet = null;
- private final TermAttribute termAtt;
- private final KeywordAttribute keywordAttr;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class);
/**
* Creates a {@link GermanStemFilter} instance
@@ -58,9 +58,6 @@
public GermanStemFilter( TokenStream in )
{
super(in);
- stemmer = new GermanStemmer();
- termAtt = addAttribute(TermAttribute.class);
- keywordAttr = addAttribute(KeywordAttribute.class);
}
/**
@@ -80,13 +77,13 @@
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
- String term = termAtt.term();
+ String term = termAtt.toString();
// Check the exclusion table.
if (!keywordAttr.isKeyword() && (exclusionSet == null || !exclusionSet.contains(term))) {
String s = stemmer.stem(term);
// If not stemmed, don't waste the time adjusting the token.
if ((s != null) && !s.equals(term))
- termAtt.setTermBuffer(s);
+ termAtt.setEmpty().append(s);
}
return true;
} else {
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiNormalizationFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiNormalizationFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiNormalizationFilter.java (working copy)
@@ -23,7 +23,7 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* A {@link TokenFilter} that applies {@link HindiNormalizer} to normalize the
@@ -39,7 +39,7 @@
public final class HindiNormalizationFilter extends TokenFilter {
private final HindiNormalizer normalizer = new HindiNormalizer();
- private final TermAttribute termAtt = addAttribute(TermAttribute.class);
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final KeywordAttribute keywordAtt = addAttribute(KeywordAttribute.class);
public HindiNormalizationFilter(TokenStream input) {
@@ -50,8 +50,8 @@
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
if (!keywordAtt.isKeyword())
- termAtt.setTermLength(normalizer.normalize(termAtt.termBuffer(),
- termAtt.termLength()));
+ termAtt.setLength(normalizer.normalize(termAtt.buffer(),
+ termAtt.length()));
return true;
}
return false;
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiStemFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiStemFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/hi/HindiStemFilter.java (working copy)
@@ -22,13 +22,13 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* A {@link TokenFilter} that applies {@link HindiStemmer} to stem Hindi words.
*/
public final class HindiStemFilter extends TokenFilter {
- private final TermAttribute termAtt = addAttribute(TermAttribute.class);
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final KeywordAttribute keywordAtt = addAttribute(KeywordAttribute.class);
private final HindiStemmer stemmer = new HindiStemmer();
@@ -40,7 +40,7 @@
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
if (!keywordAtt.isKeyword())
- termAtt.setTermLength(stemmer.stem(termAtt.termBuffer(), termAtt.termLength()));
+ termAtt.setLength(stemmer.stem(termAtt.buffer(), termAtt.length()));
return true;
} else {
return false;
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/SingleTokenTokenStream.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/SingleTokenTokenStream.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/SingleTokenTokenStream.java (working copy)
@@ -22,7 +22,7 @@
import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* A {@link TokenStream} containing a single token.
@@ -41,7 +41,7 @@
assert token != null;
this.singleToken = (Token) token.clone();
- tokenAtt = (AttributeImpl) addAttribute(TermAttribute.class);
+ tokenAtt = (AttributeImpl) addAttribute(CharTermAttribute.class);
assert (tokenAtt instanceof Token);
}
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/StemmerOverrideFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/StemmerOverrideFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/StemmerOverrideFilter.java (working copy)
@@ -23,7 +23,7 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.util.CharArrayMap;
import org.apache.lucene.util.Version;
@@ -34,7 +34,7 @@
public final class StemmerOverrideFilter extends TokenFilter {
private final CharArrayMap dictionary;
- private final TermAttribute termAtt = addAttribute(TermAttribute.class);
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final KeywordAttribute keywordAtt = addAttribute(KeywordAttribute.class);
/**
@@ -56,9 +56,9 @@
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
if (!keywordAtt.isKeyword()) { // don't muck with already-keyworded terms
- String stem = dictionary.get(termAtt.termBuffer(), 0, termAtt.termLength());
+ String stem = dictionary.get(termAtt.buffer(), 0, termAtt.length());
if (stem != null) {
- termAtt.setTermBuffer(stem);
+ termAtt.setEmpty().append(stem);
keywordAtt.setKeyword(true);
}
}
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java (working copy)
@@ -30,8 +30,8 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.core.StopFilter;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.util.Version;
@@ -332,8 +332,8 @@
private Matcher matcher;
private int pos = 0;
private static final Locale locale = Locale.getDefault();
- private TermAttribute termAtt = addAttribute(TermAttribute.class);
- private OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
public PatternTokenizer(String str, Pattern pattern, boolean toLowerCase) {
this.str = str;
@@ -360,7 +360,7 @@
if (start != end) { // non-empty match (header/trailer)
String text = str.substring(start, end);
if (toLowerCase) text = text.toLowerCase(locale);
- termAtt.setTermBuffer(text);
+ termAtt.setEmpty().append(text);
offsetAtt.setOffset(start, end);
return true;
}
@@ -392,8 +392,8 @@
private final boolean toLowerCase;
private final Set> stopWords;
private static final Locale locale = Locale.getDefault();
- private TermAttribute termAtt = addAttribute(TermAttribute.class);
- private OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
public FastStringTokenizer(String str, boolean isLetter, boolean toLowerCase, Set> stopWords) {
this.str = str;
@@ -446,7 +446,7 @@
{
return false;
}
- termAtt.setTermBuffer(text);
+ termAtt.setEmpty().append(text);
offsetAtt.setOffset(start, i);
return true;
}
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PrefixAwareTokenFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PrefixAwareTokenFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/miscellaneous/PrefixAwareTokenFilter.java (working copy)
@@ -23,7 +23,7 @@
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.index.Payload;
@@ -44,14 +44,14 @@
private TokenStream prefix;
private TokenStream suffix;
- private TermAttribute termAtt;
+ private CharTermAttribute termAtt;
private PositionIncrementAttribute posIncrAtt;
private PayloadAttribute payloadAtt;
private OffsetAttribute offsetAtt;
private TypeAttribute typeAtt;
private FlagsAttribute flagsAtt;
- private TermAttribute p_termAtt;
+ private CharTermAttribute p_termAtt;
private PositionIncrementAttribute p_posIncrAtt;
private PayloadAttribute p_payloadAtt;
private OffsetAttribute p_offsetAtt;
@@ -64,14 +64,14 @@
this.prefix = prefix;
prefixExhausted = false;
- termAtt = addAttribute(TermAttribute.class);
+ termAtt = addAttribute(CharTermAttribute.class);
posIncrAtt = addAttribute(PositionIncrementAttribute.class);
payloadAtt = addAttribute(PayloadAttribute.class);
offsetAtt = addAttribute(OffsetAttribute.class);
typeAtt = addAttribute(TypeAttribute.class);
flagsAtt = addAttribute(FlagsAttribute.class);
- p_termAtt = prefix.addAttribute(TermAttribute.class);
+ p_termAtt = prefix.addAttribute(CharTermAttribute.class);
p_posIncrAtt = prefix.addAttribute(PositionIncrementAttribute.class);
p_payloadAtt = prefix.addAttribute(PayloadAttribute.class);
p_offsetAtt = prefix.addAttribute(OffsetAttribute.class);
@@ -115,7 +115,7 @@
private void setCurrentToken(Token token) {
if (token == null) return;
clearAttributes();
- termAtt.setTermBuffer(token.termBuffer(), 0, token.termLength());
+ termAtt.copyBuffer(token.buffer(), 0, token.length());
posIncrAtt.setPositionIncrement(token.getPositionIncrement());
flagsAtt.setFlags(token.getFlags());
offsetAtt.setOffset(token.startOffset(), token.endOffset());
@@ -125,7 +125,7 @@
private Token getNextPrefixInputToken(Token token) throws IOException {
if (!prefix.incrementToken()) return null;
- token.setTermBuffer(p_termAtt.termBuffer(), 0, p_termAtt.termLength());
+ token.copyBuffer(p_termAtt.buffer(), 0, p_termAtt.length());
token.setPositionIncrement(p_posIncrAtt.getPositionIncrement());
token.setFlags(p_flagsAtt.getFlags());
token.setOffset(p_offsetAtt.startOffset(), p_offsetAtt.endOffset());
@@ -136,7 +136,7 @@
private Token getNextSuffixInputToken(Token token) throws IOException {
if (!suffix.incrementToken()) return null;
- token.setTermBuffer(termAtt.termBuffer(), 0, termAtt.termLength());
+ token.copyBuffer(termAtt.buffer(), 0, termAtt.length());
token.setPositionIncrement(posIncrAtt.getPositionIncrement());
token.setFlags(flagsAtt.getFlags());
token.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianStemFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianStemFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/br/BrazilianStemFilter.java (working copy)
@@ -24,7 +24,7 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* A {@link TokenFilter} that applies {@link BrazilianStemmer}.
@@ -41,10 +41,10 @@
/**
* {@link BrazilianStemmer} in use by this filter.
*/
- private BrazilianStemmer stemmer = null;
+ private BrazilianStemmer stemmer = new BrazilianStemmer();
private Set> exclusions = null;
- private final TermAttribute termAtt;
- private final KeywordAttribute keywordAttr;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class);
/**
* Creates a new BrazilianStemFilter
@@ -53,9 +53,6 @@
*/
public BrazilianStemFilter(TokenStream in) {
super(in);
- stemmer = new BrazilianStemmer();
- termAtt = addAttribute(TermAttribute.class);
- keywordAttr = addAttribute(KeywordAttribute.class);
}
/**
@@ -74,13 +71,13 @@
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
- final String term = termAtt.term();
+ final String term = termAtt.toString();
// Check the exclusion table.
if (!keywordAttr.isKeyword() && (exclusions == null || !exclusions.contains(term))) {
final String s = stemmer.stem(term);
// If not stemmed, don't waste the time adjusting the token.
if ((s != null) && !s.equals(term))
- termAtt.setTermBuffer(s);
+ termAtt.setEmpty().append(s);
}
return true;
} else {
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizer.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizer.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenizer.java (working copy)
@@ -18,8 +18,8 @@
*/
import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.AttributeSource;
import java.io.IOException;
@@ -39,8 +39,8 @@
private String inStr;
private boolean started = false;
- private TermAttribute termAtt;
- private OffsetAttribute offsetAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
/**
* Creates NGramTokenizer with given min and max n-grams.
@@ -94,9 +94,6 @@
}
this.minGram = minGram;
this.maxGram = maxGram;
-
- this.termAtt = addAttribute(TermAttribute.class);
- this.offsetAtt = addAttribute(OffsetAttribute.class);
}
/** Returns the next token in the stream, or null at EOS. */
@@ -123,7 +120,7 @@
int oldPos = pos;
pos++;
- termAtt.setTermBuffer(inStr, oldPos, gramSize);
+ termAtt.setEmpty().append(inStr, oldPos, oldPos+gramSize);
offsetAtt.setOffset(correctOffset(oldPos), correctOffset(oldPos+gramSize));
return true;
}
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java (working copy)
@@ -20,7 +20,7 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import java.io.IOException;
@@ -72,8 +72,8 @@
private int curGramSize;
private int tokStart;
- private final TermAttribute termAtt;
- private final OffsetAttribute offsetAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
/**
* Creates EdgeNGramTokenFilter that can generate n-grams in the sizes of the given range
@@ -101,8 +101,6 @@
this.minGram = minGram;
this.maxGram = maxGram;
this.side = side;
- this.termAtt = addAttribute(TermAttribute.class);
- this.offsetAtt = addAttribute(OffsetAttribute.class);
}
/**
@@ -124,8 +122,8 @@
if (!input.incrementToken()) {
return false;
} else {
- curTermBuffer = termAtt.termBuffer().clone();
- curTermLength = termAtt.termLength();
+ curTermBuffer = termAtt.buffer().clone();
+ curTermLength = termAtt.length();
curGramSize = minGram;
tokStart = offsetAtt.startOffset();
}
@@ -138,7 +136,7 @@
int end = start + curGramSize;
clearAttributes();
offsetAtt.setOffset(tokStart + start, tokStart + end);
- termAtt.setTermBuffer(curTermBuffer, start, curGramSize);
+ termAtt.copyBuffer(curTermBuffer, start, curGramSize);
curGramSize++;
return true;
}
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenizer.java (working copy)
@@ -18,8 +18,8 @@
*/
import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.AttributeSource;
import java.io.IOException;
@@ -37,8 +37,8 @@
public static final int DEFAULT_MAX_GRAM_SIZE = 1;
public static final int DEFAULT_MIN_GRAM_SIZE = 1;
- private TermAttribute termAtt;
- private OffsetAttribute offsetAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
/** Specifies which side of the input the n-gram should be generated from */
public static enum Side {
@@ -173,10 +173,6 @@
this.minGram = minGram;
this.maxGram = maxGram;
this.side = side;
-
- this.termAtt = addAttribute(TermAttribute.class);
- this.offsetAtt = addAttribute(OffsetAttribute.class);
-
}
/** Returns the next token in the stream, or null at EOS. */
@@ -206,7 +202,7 @@
// grab gramSize chars from front or back
int start = side == Side.FRONT ? 0 : inLen - gramSize;
int end = start + gramSize;
- termAtt.setTermBuffer(inStr, start, gramSize);
+ termAtt.setEmpty().append(inStr, start, end);
offsetAtt.setOffset(correctOffset(start), correctOffset(end));
gramSize++;
return true;
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java (working copy)
@@ -22,7 +22,7 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* Tokenizes the input into n-grams of the given size(s).
@@ -39,8 +39,8 @@
private int curPos;
private int tokStart;
- private TermAttribute termAtt;
- private OffsetAttribute offsetAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
/**
* Creates NGramTokenFilter with given min and max n-grams.
@@ -58,9 +58,6 @@
}
this.minGram = minGram;
this.maxGram = maxGram;
-
- this.termAtt = addAttribute(TermAttribute.class);
- this.offsetAtt = addAttribute(OffsetAttribute.class);
}
/**
@@ -79,8 +76,8 @@
if (!input.incrementToken()) {
return false;
} else {
- curTermBuffer = termAtt.termBuffer().clone();
- curTermLength = termAtt.termLength();
+ curTermBuffer = termAtt.buffer().clone();
+ curTermLength = termAtt.length();
curGramSize = minGram;
curPos = 0;
tokStart = offsetAtt.startOffset();
@@ -89,7 +86,7 @@
while (curGramSize <= maxGram) {
while (curPos+curGramSize <= curTermLength) { // while there is input
clearAttributes();
- termAtt.setTermBuffer(curTermBuffer, curPos, curGramSize);
+ termAtt.copyBuffer(curTermBuffer, curPos, curGramSize);
offsetAtt.setOffset(tokStart + curPos, tokStart + curPos + curGramSize);
curPos++;
return true;
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchStemFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchStemFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/fr/FrenchStemFilter.java (working copy)
@@ -22,7 +22,7 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import java.io.IOException;
import java.util.HashSet;
@@ -51,17 +51,14 @@
/**
* The actual token in the input stream.
*/
- private FrenchStemmer stemmer = null;
+ private FrenchStemmer stemmer = new FrenchStemmer();
private Set> exclusions = null;
- private final TermAttribute termAtt;
- private final KeywordAttribute keywordAttr;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class);
public FrenchStemFilter( TokenStream in ) {
- super(in);
- stemmer = new FrenchStemmer();
- termAtt = addAttribute(TermAttribute.class);
- keywordAttr = addAttribute(KeywordAttribute.class);
+ super(in);
}
/**
@@ -82,14 +79,14 @@
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
- String term = termAtt.term();
+ String term = termAtt.toString();
// Check the exclusion table
if ( !keywordAttr.isKeyword() && (exclusions == null || !exclusions.contains( term )) ) {
String s = stemmer.stem( term );
// If not stemmed, don't waste the time adjusting the token.
if ((s != null) && !s.equals( term ) )
- termAtt.setTermBuffer(s);
+ termAtt.setEmpty().append(s);
}
return true;
} else {
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/fr/ElisionFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/fr/ElisionFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/fr/ElisionFilter.java (working copy)
@@ -23,7 +23,7 @@
import org.apache.lucene.analysis.standard.StandardTokenizer; // for javadocs
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.TokenFilter;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.util.Version;
@@ -37,7 +37,7 @@
*/
public final class ElisionFilter extends TokenFilter {
private CharArraySet articles = CharArraySet.EMPTY_SET;
- private final TermAttribute termAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private static final CharArraySet DEFAULT_ARTICLES = CharArraySet.unmodifiableSet(
new CharArraySet(Version.LUCENE_CURRENT, Arrays.asList(
"l", "m", "t", "qu", "n", "s", "j"), true));
@@ -100,7 +100,6 @@
super(input);
this.articles = CharArraySet.unmodifiableSet(
new CharArraySet(matchVersion, articles, true));
- termAtt = addAttribute(TermAttribute.class);
}
/**
@@ -115,13 +114,13 @@
}
/**
- * Increments the {@link TokenStream} with a {@link TermAttribute} without elisioned start
+ * Increments the {@link TokenStream} with a {@link CharTermAttribute} without elisioned start
*/
@Override
public final boolean incrementToken() throws IOException {
if (input.incrementToken()) {
- char[] termBuffer = termAtt.termBuffer();
- int termLength = termAtt.termLength();
+ char[] termBuffer = termAtt.buffer();
+ int termLength = termAtt.length();
int minPoz = Integer.MAX_VALUE;
for (int i = 0; i < apostrophes.length; i++) {
@@ -137,8 +136,8 @@
// An apostrophe has been found. If the prefix is an article strip it off.
if (minPoz != Integer.MAX_VALUE
- && articles.contains(termAtt.termBuffer(), 0, minPoz)) {
- termAtt.setTermBuffer(termAtt.termBuffer(), minPoz + 1, termAtt.termLength() - (minPoz + 1));
+ && articles.contains(termAtt.buffer(), 0, minPoz)) {
+ termAtt.copyBuffer(termAtt.buffer(), minPoz + 1, termAtt.length() - (minPoz + 1));
}
return true;
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/nl/DutchStemFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/nl/DutchStemFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/nl/DutchStemFilter.java (working copy)
@@ -28,7 +28,7 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* A {@link TokenFilter} that stems Dutch words.
@@ -52,17 +52,14 @@
/**
* The actual token in the input stream.
*/
- private DutchStemmer stemmer = null;
+ private DutchStemmer stemmer = new DutchStemmer();
private Set> exclusions = null;
- private final TermAttribute termAtt;
- private final KeywordAttribute keywordAttr;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class);
public DutchStemFilter(TokenStream _in) {
super(_in);
- stemmer = new DutchStemmer();
- termAtt = addAttribute(TermAttribute.class);
- keywordAttr = addAttribute(KeywordAttribute.class);
}
/**
@@ -99,14 +96,14 @@
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
- final String term = termAtt.term();
+ final String term = termAtt.toString();
// Check the exclusion table.
if (!keywordAttr.isKeyword() && (exclusions == null || !exclusions.contains(term))) {
final String s = stemmer.stem(term);
// If not stemmed, don't waste the time adjusting the token.
if ((s != null) && !s.equals(term))
- termAtt.setTermBuffer(s);
+ termAtt.setEmpty().append(s);
}
return true;
} else {
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/reverse/ReverseStringFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/reverse/ReverseStringFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/reverse/ReverseStringFilter.java (working copy)
@@ -19,7 +19,7 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.util.Version;
import java.io.IOException;
@@ -42,7 +42,7 @@
*/
public final class ReverseStringFilter extends TokenFilter {
- private TermAttribute termAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final char marker;
private final Version matchVersion;
private static final char NOMARKER = '\uFFFF';
@@ -131,20 +131,19 @@
super(in);
this.matchVersion = matchVersion;
this.marker = marker;
- termAtt = addAttribute(TermAttribute.class);
}
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
- int len = termAtt.termLength();
+ int len = termAtt.length();
if (marker != NOMARKER) {
len++;
- termAtt.resizeTermBuffer(len);
- termAtt.termBuffer()[len - 1] = marker;
+ termAtt.resizeBuffer(len);
+ termAtt.buffer()[len - 1] = marker;
}
- reverse( matchVersion, termAtt.termBuffer(), 0, len );
- termAtt.setTermLength(len);
+ reverse( matchVersion, termAtt.buffer(), 0, len );
+ termAtt.setLength(len);
return true;
} else {
return false;
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilter.java (working copy)
@@ -21,7 +21,7 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
@@ -39,15 +39,13 @@
public final class DelimitedPayloadTokenFilter extends TokenFilter {
public static final char DEFAULT_DELIMITER = '|';
private final char delimiter;
- private final TermAttribute termAtt;
- private final PayloadAttribute payAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final PayloadAttribute payAtt = addAttribute(PayloadAttribute.class);
private final PayloadEncoder encoder;
public DelimitedPayloadTokenFilter(TokenStream input, char delimiter, PayloadEncoder encoder) {
super(input);
- termAtt = addAttribute(TermAttribute.class);
- payAtt = addAttribute(PayloadAttribute.class);
this.delimiter = delimiter;
this.encoder = encoder;
}
@@ -55,12 +53,12 @@
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
- final char[] buffer = termAtt.termBuffer();
- final int length = termAtt.termLength();
+ final char[] buffer = termAtt.buffer();
+ final int length = termAtt.length();
for (int i = 0; i < length; i++) {
if (buffer[i] == delimiter) {
payAtt.setPayload(encoder.encode(buffer, i + 1, (length - (i + 1))));
- termAtt.setTermLength(i); // simply set a new length
+ termAtt.setLength(i); // simply set a new length
return true;
}
}
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilter.java (working copy)
@@ -35,16 +35,14 @@
private String typeMatch;
private Payload thePayload;
- private PayloadAttribute payloadAtt;
- private TypeAttribute typeAtt;
+ private final PayloadAttribute payloadAtt = addAttribute(PayloadAttribute.class);
+ private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
public NumericPayloadTokenFilter(TokenStream input, float payload, String typeMatch) {
super(input);
//Need to encode the payload
thePayload = new Payload(PayloadHelper.encodeFloat(payload));
this.typeMatch = typeMatch;
- payloadAtt = addAttribute(PayloadAttribute.class);
- typeAtt = addAttribute(TypeAttribute.class);
}
@Override
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilter.java (working copy)
@@ -33,13 +33,11 @@
*
**/
public class TypeAsPayloadTokenFilter extends TokenFilter {
- private PayloadAttribute payloadAtt;
- private TypeAttribute typeAtt;
+ private final PayloadAttribute payloadAtt = addAttribute(PayloadAttribute.class);
+ private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
public TypeAsPayloadTokenFilter(TokenStream input) {
super(input);
- payloadAtt = addAttribute(PayloadAttribute.class);
- typeAtt = addAttribute(TypeAttribute.class);
}
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilter.java (working copy)
@@ -33,13 +33,11 @@
*
**/
public class TokenOffsetPayloadTokenFilter extends TokenFilter {
- protected OffsetAttribute offsetAtt;
- protected PayloadAttribute payAtt;
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+ private final PayloadAttribute payAtt = addAttribute(PayloadAttribute.class);
public TokenOffsetPayloadTokenFilter(TokenStream input) {
super(input);
- offsetAtt = addAttribute(OffsetAttribute.class);
- payAtt = addAttribute(PayloadAttribute.class);
}
@Override
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/snowball/SnowballFilter.java (working copy)
@@ -23,7 +23,7 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.LowerCaseFilter;
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter; // javadoc @link
import org.tartarus.snowball.SnowballProgram;
@@ -42,7 +42,7 @@
private final SnowballProgram stemmer;
- private final TermAttribute termAtt = addAttribute(TermAttribute.class);
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class);
public SnowballFilter(TokenStream input, SnowballProgram stemmer) {
@@ -75,16 +75,16 @@
public final boolean incrementToken() throws IOException {
if (input.incrementToken()) {
if (!keywordAttr.isKeyword()) {
- char termBuffer[] = termAtt.termBuffer();
- final int length = termAtt.termLength();
+ char termBuffer[] = termAtt.buffer();
+ final int length = termAtt.length();
stemmer.setCurrent(termBuffer, length);
stemmer.stem();
final char finalTerm[] = stemmer.getCurrentBuffer();
final int newLength = stemmer.getCurrentBufferLength();
if (finalTerm != termBuffer)
- termAtt.setTermBuffer(finalTerm, 0, newLength);
+ termAtt.copyBuffer(finalTerm, 0, newLength);
else
- termAtt.setTermLength(newLength);
+ termAtt.setLength(newLength);
}
return true;
} else {
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/tr/TurkishLowerCaseFilter.java (working copy)
@@ -21,7 +21,7 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* Normalizes Turkish token text to lower case.
@@ -37,7 +37,7 @@
private static final int LATIN_SMALL_LETTER_I = '\u0069';
private static final int LATIN_SMALL_LETTER_DOTLESS_I = '\u0131';
private static final int COMBINING_DOT_ABOVE = '\u0307';
- private final TermAttribute termAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
/**
* Create a new TurkishLowerCaseFilter, that normalizes Turkish token text
@@ -47,7 +47,6 @@
*/
public TurkishLowerCaseFilter(TokenStream in) {
super(in);
- termAtt = addAttribute(TermAttribute.class);
}
@Override
@@ -55,8 +54,8 @@
boolean iOrAfter = false;
if (input.incrementToken()) {
- final char[] buffer = termAtt.termBuffer();
- int length = termAtt.termLength();
+ final char[] buffer = termAtt.buffer();
+ int length = termAtt.length();
for (int i = 0; i < length;) {
final int ch = Character.codePointAt(buffer, i);
@@ -88,7 +87,7 @@
i += Character.toChars(Character.toLowerCase(ch), buffer, i);
}
- termAtt.setTermLength(length);
+ termAtt.setLength(length);
return true;
} else
return false;
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLowerCaseFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLowerCaseFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianLowerCaseFilter.java (working copy)
@@ -22,7 +22,7 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.LowerCaseFilter;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* Normalizes token text to lower case.
@@ -32,20 +32,19 @@
@Deprecated
public final class RussianLowerCaseFilter extends TokenFilter
{
- private TermAttribute termAtt;
+ private CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
public RussianLowerCaseFilter(TokenStream in)
{
super(in);
- termAtt = addAttribute(TermAttribute.class);
}
@Override
public final boolean incrementToken() throws IOException
{
if (input.incrementToken()) {
- char[] chArray = termAtt.termBuffer();
- int chLen = termAtt.termLength();
+ char[] chArray = termAtt.buffer();
+ int chLen = termAtt.length();
for (int i = 0; i < chLen; i++)
{
chArray[i] = Character.toLowerCase(chArray[i]);
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianStemFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianStemFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/ru/RussianStemFilter.java (working copy)
@@ -22,7 +22,7 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.ru.RussianStemmer;//javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter; // javadoc @link
@@ -51,17 +51,14 @@
/**
* The actual token in the input stream.
*/
- private RussianStemmer stemmer = null;
+ private RussianStemmer stemmer = new RussianStemmer();
- private final TermAttribute termAtt;
- private final KeywordAttribute keywordAttr;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class);
public RussianStemFilter(TokenStream in)
{
super(in);
- stemmer = new RussianStemmer();
- termAtt = addAttribute(TermAttribute.class);
- keywordAttr = addAttribute(KeywordAttribute.class);
}
/**
* Returns the next token in the stream, or null at EOS
@@ -71,10 +68,10 @@
{
if (input.incrementToken()) {
if(!keywordAttr.isKeyword()) {
- final String term = termAtt.term();
+ final String term = termAtt.toString();
final String s = stemmer.stem(term);
if (s != null && !s.equals(term))
- termAtt.setTermBuffer(s);
+ termAtt.setEmpty().append(s);
}
return true;
} else {
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java (working copy)
@@ -21,7 +21,7 @@
import java.text.ParseException;
import java.util.Date;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.util.AttributeSource;
/**
@@ -34,7 +34,7 @@
public static final String DATE_TYPE = "date";
protected DateFormat dateFormat;
- protected TermAttribute termAtt;
+ protected CharTermAttribute termAtt;
/**
* Uses {@link java.text.SimpleDateFormat#getDateInstance()} as the {@link java.text.DateFormat} object.
@@ -50,10 +50,10 @@
@Override
public boolean accept(AttributeSource source) {
if (termAtt == null) {
- termAtt = source.addAttribute(TermAttribute.class);
+ termAtt = source.addAttribute(CharTermAttribute.class);
}
try {
- Date date = dateFormat.parse(termAtt.term());//We don't care about the date, just that we can parse it as a date
+ Date date = dateFormat.parse(termAtt.toString());//We don't care about the date, just that we can parse it as a date
if (date != null) {
return true;
}
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java (working copy)
@@ -137,10 +137,10 @@
*/
private boolean isOutputHere = false;
- private final CharTermAttribute termAtt;
- private final OffsetAttribute offsetAtt;
- private final PositionIncrementAttribute posIncrAtt;
- private final TypeAttribute typeAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+ private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+ private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
/**
@@ -155,10 +155,6 @@
super(input);
setMaxShingleSize(maxShingleSize);
setMinShingleSize(minShingleSize);
- this.termAtt = addAttribute(CharTermAttribute.class);
- this.offsetAtt = addAttribute(OffsetAttribute.class);
- this.posIncrAtt = addAttribute(PositionIncrementAttribute.class);
- this.typeAtt = addAttribute(TypeAttribute.class);
}
/**
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java (working copy)
@@ -31,11 +31,11 @@
import org.apache.lucene.analysis.miscellaneous.EmptyTokenStream;
import org.apache.lucene.analysis.payloads.PayloadHelper;
import org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix.Column.Row;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.index.Payload;
@@ -193,14 +193,14 @@
private TokenStream input;
- private TermAttribute termAtt;
+ private CharTermAttribute termAtt;
private PositionIncrementAttribute posIncrAtt;
private PayloadAttribute payloadAtt;
private OffsetAttribute offsetAtt;
private TypeAttribute typeAtt;
private FlagsAttribute flagsAtt;
- private TermAttribute in_termAtt;
+ private CharTermAttribute in_termAtt;
private PositionIncrementAttribute in_posIncrAtt;
private PayloadAttribute in_payloadAtt;
private OffsetAttribute in_offsetAtt;
@@ -229,7 +229,7 @@
this.ignoringSinglePrefixOrSuffixShingle = ignoringSinglePrefixOrSuffixShingle;
this.settingsCodec = settingsCodec;
- termAtt = addAttribute(TermAttribute.class);
+ termAtt = addAttribute(CharTermAttribute.class);
posIncrAtt = addAttribute(PositionIncrementAttribute.class);
payloadAtt = addAttribute(PayloadAttribute.class);
offsetAtt = addAttribute(OffsetAttribute.class);
@@ -239,7 +239,7 @@
// set the input to be an empty token stream, we already have the data.
this.input = new EmptyTokenStream();
- in_termAtt = input.addAttribute(TermAttribute.class);
+ in_termAtt = input.addAttribute(CharTermAttribute.class);
in_posIncrAtt = input.addAttribute(PositionIncrementAttribute.class);
in_payloadAtt = input.addAttribute(PayloadAttribute.class);
in_offsetAtt = input.addAttribute(OffsetAttribute.class);
@@ -311,14 +311,14 @@
this.spacerCharacter = spacerCharacter;
this.ignoringSinglePrefixOrSuffixShingle = ignoringSinglePrefixOrSuffixShingle;
this.settingsCodec = settingsCodec;
- termAtt = addAttribute(TermAttribute.class);
+ termAtt = addAttribute(CharTermAttribute.class);
posIncrAtt = addAttribute(PositionIncrementAttribute.class);
payloadAtt = addAttribute(PayloadAttribute.class);
offsetAtt = addAttribute(OffsetAttribute.class);
typeAtt = addAttribute(TypeAttribute.class);
flagsAtt = addAttribute(FlagsAttribute.class);
- in_termAtt = input.addAttribute(TermAttribute.class);
+ in_termAtt = input.addAttribute(CharTermAttribute.class);
in_posIncrAtt = input.addAttribute(PositionIncrementAttribute.class);
in_payloadAtt = input.addAttribute(PayloadAttribute.class);
in_offsetAtt = input.addAttribute(OffsetAttribute.class);
@@ -377,7 +377,7 @@
if (token == null) return false;
clearAttributes();
- termAtt.setTermBuffer(token.termBuffer(), 0, token.termLength());
+ termAtt.copyBuffer(token.buffer(), 0, token.length());
posIncrAtt.setPositionIncrement(token.getPositionIncrement());
flagsAtt.setFlags(token.getFlags());
offsetAtt.setOffset(token.startOffset(), token.endOffset());
@@ -388,7 +388,7 @@
private Token getNextInputToken(Token token) throws IOException {
if (!input.incrementToken()) return null;
- token.setTermBuffer(in_termAtt.termBuffer(), 0, in_termAtt.termLength());
+ token.copyBuffer(in_termAtt.buffer(), 0, in_termAtt.length());
token.setPositionIncrement(in_posIncrAtt.getPositionIncrement());
token.setFlags(in_flagsAtt.getFlags());
token.setOffset(in_offsetAtt.startOffset(), in_offsetAtt.endOffset());
@@ -399,7 +399,7 @@
private Token getNextToken(Token token) throws IOException {
if (!this.incrementToken()) return null;
- token.setTermBuffer(termAtt.termBuffer(), 0, termAtt.termLength());
+ token.copyBuffer(termAtt.buffer(), 0, termAtt.length());
token.setPositionIncrement(posIncrAtt.getPositionIncrement());
token.setFlags(flagsAtt.getFlags());
token.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
@@ -441,7 +441,7 @@
for (int i = 0; i < currentShingleLength; i++) {
Token shingleToken = currentPermuationTokens.get(i + currentPermutationTokensStartOffset);
- termLength += shingleToken.termLength();
+ termLength += shingleToken.length();
shingle.add(shingleToken);
}
if (spacerCharacter != null) {
@@ -459,9 +459,9 @@
if (spacerCharacter != null && sb.length() > 0) {
sb.append(spacerCharacter);
}
- sb.append(shingleToken.termBuffer(), 0, shingleToken.termLength());
+ sb.append(shingleToken.buffer(), 0, shingleToken.length());
}
- reusableToken.setTermBuffer(sb.toString());
+ reusableToken.setEmpty().append(sb);
updateToken(reusableToken, shingle, currentPermutationTokensStartOffset, currentPermutationRows, currentPermuationTokens);
return reusableToken;
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseFilter.java (working copy)
@@ -23,7 +23,7 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.StopFilter;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.util.Version;
@@ -61,21 +61,20 @@
private CharArraySet stopTable;
- private TermAttribute termAtt;
+ private CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
public ChineseFilter(TokenStream in) {
super(in);
stopTable = new CharArraySet(Version.LUCENE_CURRENT, Arrays.asList(STOP_WORDS), false);
- termAtt = addAttribute(TermAttribute.class);
}
@Override
public boolean incrementToken() throws IOException {
while (input.incrementToken()) {
- char text[] = termAtt.termBuffer();
- int termLength = termAtt.termLength();
+ char text[] = termAtt.buffer();
+ int termLength = termAtt.length();
// why not key off token type here assuming ChineseTokenizer comes first?
if (!stopTable.contains(text, 0, termLength)) {
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseTokenizer.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseTokenizer.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/cn/ChineseTokenizer.java (working copy)
@@ -23,8 +23,8 @@
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.AttributeSource;
@@ -62,24 +62,16 @@
public ChineseTokenizer(Reader in) {
super(in);
- init();
}
public ChineseTokenizer(AttributeSource source, Reader in) {
super(source, in);
- init();
}
public ChineseTokenizer(AttributeFactory factory, Reader in) {
super(factory, in);
- init();
}
-
- private void init() {
- termAtt = addAttribute(TermAttribute.class);
- offsetAtt = addAttribute(OffsetAttribute.class);
- }
-
+
private int offset = 0, bufferIndex=0, dataLen=0;
private final static int MAX_WORD_LEN = 255;
private final static int IO_BUFFER_SIZE = 1024;
@@ -90,8 +82,8 @@
private int length;
private int start;
- private TermAttribute termAtt;
- private OffsetAttribute offsetAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
private final void push(char c) {
@@ -105,7 +97,7 @@
if (length>0) {
//System.out.println(new String(buffer, 0,
//length));
- termAtt.setTermBuffer(buffer, 0, length);
+ termAtt.copyBuffer(buffer, 0, length);
offsetAtt.setOffset(correctOffset(start), correctOffset(start+length));
return true;
}
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicStemFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicStemFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicStemFilter.java (working copy)
@@ -23,7 +23,7 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* A {@link TokenFilter} that applies {@link ArabicStemmer} to stem Arabic words..
@@ -35,24 +35,20 @@
* @see KeywordMarkerFilter */
public final class ArabicStemFilter extends TokenFilter {
-
- private final ArabicStemmer stemmer;
- private final TermAttribute termAtt;
- private final KeywordAttribute keywordAttr;
+ private final ArabicStemmer stemmer = new ArabicStemmer();
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class);
public ArabicStemFilter(TokenStream input) {
super(input);
- stemmer = new ArabicStemmer();
- termAtt = addAttribute(TermAttribute.class);
- keywordAttr = addAttribute(KeywordAttribute.class);
}
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
if(!keywordAttr.isKeyword()) {
- final int newlen = stemmer.stem(termAtt.termBuffer(), termAtt.termLength());
- termAtt.setTermLength(newlen);
+ final int newlen = stemmer.stem(termAtt.buffer(), termAtt.length());
+ termAtt.setLength(newlen);
}
return true;
} else {
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.java (working copy)
@@ -21,7 +21,7 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* A {@link TokenFilter} that applies {@link ArabicNormalizer} to normalize the orthography.
@@ -29,21 +29,18 @@
*/
public final class ArabicNormalizationFilter extends TokenFilter {
-
- private final ArabicNormalizer normalizer;
- private final TermAttribute termAtt;
+ private final ArabicNormalizer normalizer = new ArabicNormalizer();
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
public ArabicNormalizationFilter(TokenStream input) {
super(input);
- normalizer = new ArabicNormalizer();
- termAtt = addAttribute(TermAttribute.class);
}
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
- int newlen = normalizer.normalize(termAtt.termBuffer(), termAtt.termLength());
- termAtt.setTermLength(newlen);
+ int newlen = normalizer.normalize(termAtt.buffer(), termAtt.length());
+ termAtt.setLength(newlen);
return true;
}
return false;
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/position/PositionFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/position/PositionFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/position/PositionFilter.java (working copy)
@@ -35,7 +35,7 @@
/** The first token must have non-zero positionIncrement **/
private boolean firstTokenPositioned = false;
- private PositionIncrementAttribute posIncrAtt;
+ private PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
/**
* Constructs a PositionFilter that assigns a position increment of zero to
@@ -45,7 +45,6 @@
*/
public PositionFilter(final TokenStream input) {
super(input);
- posIncrAtt = addAttribute(PositionIncrementAttribute.class);
}
/**
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizationFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizationFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/in/IndicNormalizationFilter.java (working copy)
@@ -21,14 +21,14 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* A {@link TokenFilter} that applies {@link IndicNormalizer} to normalize text
* in Indian Languages.
*/
public final class IndicNormalizationFilter extends TokenFilter {
- private final TermAttribute termAtt = addAttribute(TermAttribute.class);
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final IndicNormalizer normalizer = new IndicNormalizer();
public IndicNormalizationFilter(TokenStream input) {
@@ -38,7 +38,7 @@
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
- termAtt.setTermLength(normalizer.normalize(termAtt.termBuffer(), termAtt.termLength()));
+ termAtt.setLength(normalizer.normalize(termAtt.buffer(), termAtt.length()));
return true;
} else {
return false;
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizer.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizer.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizer.java (working copy)
@@ -18,10 +18,10 @@
package org.apache.lucene.analysis.wikipedia;
import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.AttributeSource;
@@ -116,11 +116,11 @@
private Set untokenizedTypes = Collections.emptySet();
private Iterator tokens = null;
- private OffsetAttribute offsetAtt;
- private TypeAttribute typeAtt;
- private PositionIncrementAttribute posIncrAtt;
- private TermAttribute termAtt;
- private FlagsAttribute flagsAtt;
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+ private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
+ private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final FlagsAttribute flagsAtt = addAttribute(FlagsAttribute.class);
/**
* Creates a new instance of the {@link WikipediaTokenizer}. Attaches the
@@ -176,12 +176,7 @@
private void init(int tokenOutput, Set untokenizedTypes) {
this.tokenOutput = tokenOutput;
- this.untokenizedTypes = untokenizedTypes;
- this.offsetAtt = addAttribute(OffsetAttribute.class);
- this.typeAtt = addAttribute(TypeAttribute.class);
- this.posIncrAtt = addAttribute(PositionIncrementAttribute.class);
- this.termAtt = addAttribute(TermAttribute.class);
- this.flagsAtt = addAttribute(FlagsAttribute.class);
+ this.untokenizedTypes = untokenizedTypes;
}
/*
@@ -245,8 +240,9 @@
lastPos = currPos + numAdded;
}
//trim the buffer
+ // TODO: this is inefficient
String s = buffer.toString().trim();
- termAtt.setTermBuffer(s.toCharArray(), 0, s.length());
+ termAtt.setEmpty().append(s);
offsetAtt.setOffset(correctOffset(theStart), correctOffset(theStart + s.length()));
flagsAtt.setFlags(UNTOKENIZED_TOKEN_FLAG);
//The way the loop is written, we will have proceeded to the next token. We need to pushback the scanner to lastPos
@@ -283,8 +279,9 @@
lastPos = currPos + numAdded;
}
//trim the buffer
+ // TODO: this is inefficient
String s = buffer.toString().trim();
- termAtt.setTermBuffer(s.toCharArray(), 0, s.length());
+ termAtt.setEmpty().append(s);
offsetAtt.setOffset(correctOffset(theStart), correctOffset(theStart + s.length()));
flagsAtt.setFlags(UNTOKENIZED_TOKEN_FLAG);
//The way the loop is written, we will have proceeded to the next token. We need to pushback the scanner to lastPos
@@ -298,7 +295,7 @@
private void setupToken() {
scanner.getText(termAtt);
final int start = scanner.yychar();
- offsetAtt.setOffset(correctOffset(start), correctOffset(start + termAtt.termLength()));
+ offsetAtt.setOffset(correctOffset(start), correctOffset(start + termAtt.length()));
}
/*
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java (working copy)
@@ -1,4 +1,4 @@
-/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 17.05.10 14:51 */
+/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 5/31/10 3:11 PM */
package org.apache.lucene.analysis.wikipedia;
@@ -19,14 +19,14 @@
* limitations under the License.
*/
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* This class is a scanner generated by
* JFlex 1.5.0-SNAPSHOT
- * on 17.05.10 14:51 from the specification file
- * C:/Users/Uwe Schindler/Projects/lucene/newtrunk/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex
+ * on 5/31/10 3:11 PM from the specification file
+ * C:/Users/rmuir/workspace/solrcene/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex
*/
class WikipediaTokenizerImpl {
@@ -37,16 +37,16 @@
private static final int ZZ_BUFFERSIZE = 16384;
/** lexical states */
- public static final int CATEGORY_STATE = 2;
+ public static final int THREE_SINGLE_QUOTES_STATE = 10;
+ public static final int EXTERNAL_LINK_STATE = 6;
public static final int DOUBLE_EQUALS_STATE = 14;
- public static final int EXTERNAL_LINK_STATE = 6;
public static final int INTERNAL_LINK_STATE = 4;
public static final int DOUBLE_BRACE_STATE = 16;
+ public static final int CATEGORY_STATE = 2;
+ public static final int YYINITIAL = 0;
+ public static final int STRING = 18;
public static final int FIVE_SINGLE_QUOTES_STATE = 12;
- public static final int STRING = 18;
public static final int TWO_SINGLE_QUOTES_STATE = 8;
- public static final int YYINITIAL = 0;
- public static final int THREE_SINGLE_QUOTES_STATE = 10;
/**
* ZZ_LEXSTATE[l] is the state in the DFA for the lexical state l
@@ -487,8 +487,8 @@
/**
* Fills Lucene token with the current token text.
*/
-final void getText(TermAttribute t) {
- t.setTermBuffer(zzBuffer, zzStartRead, zzMarkedPos-zzStartRead);
+final void getText(CharTermAttribute t) {
+ t.copyBuffer(zzBuffer, zzStartRead, zzMarkedPos-zzStartRead);
}
final int setText(StringBuilder buffer){
@@ -803,184 +803,184 @@
zzMarkedPos = zzMarkedPosL;
switch (zzAction < 0 ? zzAction : ZZ_ACTION[zzAction]) {
- case 25:
- { numWikiTokensSeen = 0; positionInc = 1; currentTokType = CITATION; yybegin(DOUBLE_BRACE_STATE);
+ case 16:
+ { currentTokType = HEADING; yybegin(DOUBLE_EQUALS_STATE); numWikiTokensSeen++; return currentTokType;
}
case 46: break;
- case 30:
- { numBalanced = 0;currentTokType = ALPHANUM; yybegin(YYINITIAL);/*end italics*/
+ case 39:
+ { positionInc = 1; return ACRONYM;
}
case 47: break;
- case 41:
- { numBalanced = 0;currentTokType = ALPHANUM; yybegin(YYINITIAL);/*end bold italics*/
+ case 8:
+ { /* ignore */
}
case 48: break;
- case 14:
- { yybegin(STRING); numWikiTokensSeen++; return currentTokType;
+ case 20:
+ { numBalanced = 0; numWikiTokensSeen = 0; currentTokType = EXTERNAL_LINK;yybegin(EXTERNAL_LINK_STATE);
}
case 49: break;
- case 23:
- { numWikiTokensSeen = 0; positionInc = 1; yybegin(DOUBLE_EQUALS_STATE);
+ case 35:
+ { positionInc = 1; return COMPANY;
}
case 50: break;
- case 34:
- { positionInc = 1; return NUM;
+ case 4:
+ { numWikiTokensSeen = 0; positionInc = 1; currentTokType = EXTERNAL_LINK_URL; yybegin(EXTERNAL_LINK_STATE);
}
case 51: break;
- case 18:
- { /* ignore STRING */
+ case 25:
+ { numWikiTokensSeen = 0; positionInc = 1; currentTokType = CITATION; yybegin(DOUBLE_BRACE_STATE);
}
case 52: break;
- case 12:
- { currentTokType = ITALICS; numWikiTokensSeen++; yybegin(STRING); return currentTokType;/*italics*/
+ case 43:
+ { numWikiTokensSeen = 0; positionInc = 1; currentTokType = CATEGORY; yybegin(CATEGORY_STATE);
}
case 53: break;
- case 37:
- { numBalanced = 0;currentTokType = ALPHANUM;yybegin(YYINITIAL);/*end bold*/
+ case 22:
+ { numWikiTokensSeen = 0; positionInc = 1; if (numBalanced == 0){numBalanced++;yybegin(TWO_SINGLE_QUOTES_STATE);} else{numBalanced = 0;}
}
case 54: break;
- case 31:
- { numBalanced = 0; numWikiTokensSeen = 0; currentTokType = INTERNAL_LINK;yybegin(INTERNAL_LINK_STATE);
+ case 34:
+ { positionInc = 1; return NUM;
}
case 55: break;
- case 10:
- { numLinkToks = 0; positionInc = 0; yybegin(YYINITIAL);
+ case 32:
+ { positionInc = 1; return APOSTROPHE;
}
case 56: break;
- case 38:
- { numBalanced = 0;currentTokType = ALPHANUM; yybegin(YYINITIAL);/*end sub header*/
+ case 23:
+ { numWikiTokensSeen = 0; positionInc = 1; yybegin(DOUBLE_EQUALS_STATE);
}
case 57: break;
- case 19:
- { yybegin(STRING); numWikiTokensSeen++; return currentTokType;/* STRING ALPHANUM*/
+ case 21:
+ { yybegin(STRING); return currentTokType;/*pipe*/
}
case 58: break;
- case 11:
- { currentTokType = BOLD; yybegin(THREE_SINGLE_QUOTES_STATE);
+ case 2:
+ { positionInc = 1; return ALPHANUM;
}
case 59: break;
- case 1:
- { numWikiTokensSeen = 0; positionInc = 1;
+ case 29:
+ { currentTokType = INTERNAL_LINK; numWikiTokensSeen = 0; yybegin(INTERNAL_LINK_STATE);
}
case 60: break;
- case 33:
- { positionInc = 1; return HOST;
+ case 17:
+ { yybegin(DOUBLE_BRACE_STATE); numWikiTokensSeen = 0; return currentTokType;
}
case 61: break;
- case 3:
- { positionInc = 1; return CJ;
+ case 44:
+ { currentTokType = CATEGORY; numWikiTokensSeen = 0; yybegin(CATEGORY_STATE);
}
case 62: break;
- case 17:
- { yybegin(DOUBLE_BRACE_STATE); numWikiTokensSeen = 0; return currentTokType;
+ case 26:
+ { yybegin(YYINITIAL);
}
case 63: break;
- case 32:
- { positionInc = 1; return APOSTROPHE;
+ case 3:
+ { positionInc = 1; return CJ;
}
case 64: break;
- case 8:
- { /* ignore */
+ case 38:
+ { numBalanced = 0;currentTokType = ALPHANUM; yybegin(YYINITIAL);/*end sub header*/
}
case 65: break;
- case 4:
- { numWikiTokensSeen = 0; positionInc = 1; currentTokType = EXTERNAL_LINK_URL; yybegin(EXTERNAL_LINK_STATE);
+ case 15:
+ { currentTokType = SUB_HEADING; numWikiTokensSeen = 0; yybegin(STRING);
}
case 66: break;
- case 2:
- { positionInc = 1; return ALPHANUM;
+ case 30:
+ { numBalanced = 0;currentTokType = ALPHANUM; yybegin(YYINITIAL);/*end italics*/
}
case 67: break;
- case 26:
- { yybegin(YYINITIAL);
+ case 6:
+ { yybegin(CATEGORY_STATE); numWikiTokensSeen++; return currentTokType;
}
case 68: break;
- case 43:
- { numWikiTokensSeen = 0; positionInc = 1; currentTokType = CATEGORY; yybegin(CATEGORY_STATE);
+ case 5:
+ { positionInc = 1;
}
case 69: break;
- case 36:
- { currentTokType = BOLD_ITALICS; yybegin(FIVE_SINGLE_QUOTES_STATE);
+ case 19:
+ { yybegin(STRING); numWikiTokensSeen++; return currentTokType;/* STRING ALPHANUM*/
}
case 70: break;
- case 13:
- { currentTokType = EXTERNAL_LINK; numWikiTokensSeen = 0; yybegin(EXTERNAL_LINK_STATE);
+ case 42:
+ { positionInc = 1; numWikiTokensSeen++; yybegin(EXTERNAL_LINK_STATE); return currentTokType;
}
case 71: break;
- case 24:
- { numWikiTokensSeen = 0; positionInc = 1; currentTokType = INTERNAL_LINK; yybegin(INTERNAL_LINK_STATE);
- }
- case 72: break;
case 27:
{ numLinkToks = 0; yybegin(YYINITIAL);
}
+ case 72: break;
+ case 11:
+ { currentTokType = BOLD; yybegin(THREE_SINGLE_QUOTES_STATE);
+ }
case 73: break;
- case 15:
- { currentTokType = SUB_HEADING; numWikiTokensSeen = 0; yybegin(STRING);
+ case 13:
+ { currentTokType = EXTERNAL_LINK; numWikiTokensSeen = 0; yybegin(EXTERNAL_LINK_STATE);
}
case 74: break;
- case 28:
- { currentTokType = INTERNAL_LINK; numWikiTokensSeen = 0; yybegin(INTERNAL_LINK_STATE);
+ case 14:
+ { yybegin(STRING); numWikiTokensSeen++; return currentTokType;
}
case 75: break;
- case 39:
- { positionInc = 1; return ACRONYM;
+ case 45:
+ { numBalanced = 0; numWikiTokensSeen = 0; currentTokType = CATEGORY;yybegin(CATEGORY_STATE);
}
case 76: break;
- case 29:
- { currentTokType = INTERNAL_LINK; numWikiTokensSeen = 0; yybegin(INTERNAL_LINK_STATE);
+ case 28:
+ { currentTokType = INTERNAL_LINK; numWikiTokensSeen = 0; yybegin(INTERNAL_LINK_STATE);
}
case 77: break;
- case 7:
- { yybegin(INTERNAL_LINK_STATE); numWikiTokensSeen++; return currentTokType;
+ case 37:
+ { numBalanced = 0;currentTokType = ALPHANUM;yybegin(YYINITIAL);/*end bold*/
}
case 78: break;
- case 16:
- { currentTokType = HEADING; yybegin(DOUBLE_EQUALS_STATE); numWikiTokensSeen++; return currentTokType;
+ case 9:
+ { if (numLinkToks == 0){positionInc = 0;} else{positionInc = 1;} numWikiTokensSeen++; currentTokType = EXTERNAL_LINK; yybegin(EXTERNAL_LINK_STATE); numLinkToks++; return currentTokType;
}
case 79: break;
- case 20:
- { numBalanced = 0; numWikiTokensSeen = 0; currentTokType = EXTERNAL_LINK;yybegin(EXTERNAL_LINK_STATE);
+ case 7:
+ { yybegin(INTERNAL_LINK_STATE); numWikiTokensSeen++; return currentTokType;
}
case 80: break;
- case 35:
- { positionInc = 1; return COMPANY;
+ case 24:
+ { numWikiTokensSeen = 0; positionInc = 1; currentTokType = INTERNAL_LINK; yybegin(INTERNAL_LINK_STATE);
}
case 81: break;
case 40:
{ positionInc = 1; return EMAIL;
}
case 82: break;
- case 42:
- { positionInc = 1; numWikiTokensSeen++; yybegin(EXTERNAL_LINK_STATE); return currentTokType;
+ case 1:
+ { numWikiTokensSeen = 0; positionInc = 1;
}
case 83: break;
- case 6:
- { yybegin(CATEGORY_STATE); numWikiTokensSeen++; return currentTokType;
+ case 18:
+ { /* ignore STRING */
}
case 84: break;
- case 44:
- { currentTokType = CATEGORY; numWikiTokensSeen = 0; yybegin(CATEGORY_STATE);
+ case 36:
+ { currentTokType = BOLD_ITALICS; yybegin(FIVE_SINGLE_QUOTES_STATE);
}
case 85: break;
- case 5:
- { positionInc = 1;
+ case 33:
+ { positionInc = 1; return HOST;
}
case 86: break;
- case 9:
- { if (numLinkToks == 0){positionInc = 0;} else{positionInc = 1;} numWikiTokensSeen++; currentTokType = EXTERNAL_LINK; yybegin(EXTERNAL_LINK_STATE); numLinkToks++; return currentTokType;
+ case 31:
+ { numBalanced = 0; numWikiTokensSeen = 0; currentTokType = INTERNAL_LINK;yybegin(INTERNAL_LINK_STATE);
}
case 87: break;
- case 45:
- { numBalanced = 0; numWikiTokensSeen = 0; currentTokType = CATEGORY;yybegin(CATEGORY_STATE);
+ case 41:
+ { numBalanced = 0;currentTokType = ALPHANUM; yybegin(YYINITIAL);/*end bold italics*/
}
case 88: break;
- case 22:
- { numWikiTokensSeen = 0; positionInc = 1; if (numBalanced == 0){numBalanced++;yybegin(TWO_SINGLE_QUOTES_STATE);} else{numBalanced = 0;}
+ case 12:
+ { currentTokType = ITALICS; numWikiTokensSeen++; yybegin(STRING); return currentTokType;/*italics*/
}
case 89: break;
- case 21:
- { yybegin(STRING); return currentTokType;/*pipe*/
+ case 10:
+ { numLinkToks = 0; positionInc = 0; yybegin(YYINITIAL);
}
case 90: break;
default:
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex (working copy)
@@ -17,7 +17,7 @@
* limitations under the License.
*/
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
%%
@@ -81,8 +81,8 @@
/**
* Fills Lucene token with the current token text.
*/
-final void getText(TermAttribute t) {
- t.setTermBuffer(zzBuffer, zzStartRead, zzMarkedPos-zzStartRead);
+final void getText(CharTermAttribute t) {
+ t.copyBuffer(zzBuffer, zzStartRead, zzMarkedPos-zzStartRead);
}
final int setText(StringBuilder buffer){
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKTokenizer.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKTokenizer.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/cjk/CJKTokenizer.java (working copy)
@@ -22,7 +22,7 @@
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.AttributeSource;
@@ -98,9 +98,9 @@
*/
private boolean preIsTokened = false;
- private TermAttribute termAtt;
- private OffsetAttribute offsetAtt;
- private TypeAttribute typeAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+ private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
//~ Constructors -----------------------------------------------------------
@@ -111,25 +111,16 @@
*/
public CJKTokenizer(Reader in) {
super(in);
- init();
}
public CJKTokenizer(AttributeSource source, Reader in) {
super(source, in);
- init();
}
public CJKTokenizer(AttributeFactory factory, Reader in) {
super(factory, in);
- init();
}
- private void init() {
- termAtt = addAttribute(TermAttribute.class);
- offsetAtt = addAttribute(OffsetAttribute.class);
- typeAtt = addAttribute(TypeAttribute.class);
- }
-
//~ Methods ----------------------------------------------------------------
/**
@@ -287,7 +278,7 @@
}
if (length > 0) {
- termAtt.setTermBuffer(buffer, 0, length);
+ termAtt.copyBuffer(buffer, 0, length);
offsetAtt.setOffset(correctOffset(start), correctOffset(start+length));
typeAtt.setType(TOKEN_TYPE_NAMES[tokenType]);
return true;
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechStemFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechStemFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/cz/CzechStemFilter.java (working copy)
@@ -6,7 +6,7 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
@@ -37,23 +37,20 @@
* @see KeywordMarkerFilter
*/
public final class CzechStemFilter extends TokenFilter {
- private final CzechStemmer stemmer;
- private final TermAttribute termAtt;
- private final KeywordAttribute keywordAttr;
+ private final CzechStemmer stemmer = new CzechStemmer();
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class);
public CzechStemFilter(TokenStream input) {
super(input);
- stemmer = new CzechStemmer();
- termAtt = addAttribute(TermAttribute.class);
- keywordAttr = addAttribute(KeywordAttribute.class);
}
@Override
public boolean incrementToken() throws IOException {
if (input.incrementToken()) {
if(!keywordAttr.isKeyword()) {
- final int newlen = stemmer.stem(termAtt.termBuffer(), termAtt.termLength());
- termAtt.setTermLength(newlen);
+ final int newlen = stemmer.stem(termAtt.buffer(), termAtt.length());
+ termAtt.setLength(newlen);
}
return true;
} else {
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymFilter.java (working copy)
@@ -133,7 +133,7 @@
OffsetAttribute lastOffsetAtt = lastTok.addAttribute(OffsetAttribute.class);
newOffsetAtt.setOffset(newOffsetAtt.startOffset(), lastOffsetAtt.endOffset());
- newTermAtt.copyBuffer(repTok.termBuffer(), 0, repTok.termLength());
+ newTermAtt.copyBuffer(repTok.buffer(), 0, repTok.length());
repPos += repTok.getPositionIncrement();
if (i==0) repPos=origPos; // make position of first token equal to original
Index: modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java
===================================================================
--- modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java (revision 948225)
+++ modules/analysis/common/src/java/org/apache/lucene/analysis/synonym/SynonymMap.java (working copy)
@@ -103,8 +103,7 @@
List ret = new ArrayList(strings.size());
for (String str : strings) {
//Token newTok = new Token(str,0,0,"SYNONYM");
- Token newTok = new Token(0,0,"SYNONYM");
- newTok.setTermBuffer(str.toCharArray(), 0, str.length());
+ Token newTok = new Token(str, 0,0,"SYNONYM");
ret.add(newTok);
}
return ret;
@@ -137,7 +136,7 @@
while(tok1!=null || tok2!=null) {
while (tok1 != null && (pos1 <= pos2 || tok2==null)) {
Token tok = new Token(tok1.startOffset(), tok1.endOffset(), tok1.type());
- tok.setTermBuffer(tok1.termBuffer(), 0, tok1.termLength());
+ tok.copyBuffer(tok1.buffer(), 0, tok1.length());
tok.setPositionIncrement(pos1-pos);
result.add(tok);
pos=pos1;
@@ -146,7 +145,7 @@
}
while (tok2 != null && (pos2 <= pos1 || tok1==null)) {
Token tok = new Token(tok2.startOffset(), tok2.endOffset(), tok2.type());
- tok.setTermBuffer(tok2.termBuffer(), 0, tok2.termLength());
+ tok.copyBuffer(tok2.buffer(), 0, tok2.length());
tok.setPositionIncrement(pos2-pos);
result.add(tok);
pos=pos2;
Index: lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynLookup.java
===================================================================
--- lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynLookup.java (revision 948225)
+++ lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynLookup.java (working copy)
@@ -28,7 +28,7 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
@@ -125,10 +125,10 @@
// [1] Parse query into separate words so that when we expand we can avoid dups
TokenStream ts = a.tokenStream( field, new StringReader( query));
- TermAttribute termAtt = ts.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
while (ts.incrementToken()) {
- String word = termAtt.term();
+ String word = termAtt.toString();
if ( already.add( word))
top.add( word);
}
Index: lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymTokenFilter.java
===================================================================
--- lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymTokenFilter.java (revision 948225)
+++ lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymTokenFilter.java (working copy)
@@ -21,8 +21,8 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.AttributeSource;
@@ -45,9 +45,9 @@
private AttributeSource.State current = null;
private int todo = 0;
- private TermAttribute termAtt;
- private TypeAttribute typeAtt;
- private PositionIncrementAttribute posIncrAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
+ private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
/**
* Creates an instance for the given underlying stream and synonym table.
@@ -71,10 +71,6 @@
this.synonyms = synonyms;
this.maxSynonyms = maxSynonyms;
-
- this.termAtt = addAttribute(TermAttribute.class);
- this.typeAtt = addAttribute(TypeAttribute.class);
- this.posIncrAtt = addAttribute(PositionIncrementAttribute.class);
}
/** Returns the next token in the stream, or null at EOS. */
@@ -89,7 +85,7 @@
if (!input.incrementToken()) return false; // EOS; iterator exhausted
- stack = synonyms.getSynonyms(termAtt.term()); // push onto stack
+ stack = synonyms.getSynonyms(termAtt.toString()); // push onto stack
if (stack.length > maxSynonyms) randomize(stack);
index = 0;
current = captureState();
@@ -110,7 +106,7 @@
*/
protected boolean createToken(String synonym, AttributeSource.State current) {
restoreState(current);
- termAtt.setTermBuffer(synonym);
+ termAtt.setEmpty().append(synonym);
typeAtt.setType(SYNONYM_TOKEN_TYPE);
posIncrAtt.setPositionIncrement(0);
return true;
Index: lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java
===================================================================
--- lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java (revision 948225)
+++ lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java (working copy)
@@ -29,7 +29,7 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
@@ -117,10 +117,10 @@
// [1] Parse query into separate words so that when we expand we can avoid dups
TokenStream ts = a.tokenStream( field, new StringReader( query));
- TermAttribute termAtt = ts.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
while (ts.incrementToken()) {
- String word = termAtt.term();
+ String word = termAtt.toString();
if ( already.add( word))
top.add( word);
}
Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java
===================================================================
--- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java (revision 948225)
+++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java (working copy)
@@ -37,8 +37,8 @@
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.DateField;
import org.apache.lucene.document.DateTools;
@@ -96,8 +96,8 @@
public static Analyzer qpAnalyzer = new QPTestAnalyzer();
public static final class QPTestFilter extends TokenFilter {
- TermAttribute termAtt;
- OffsetAttribute offsetAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
/**
* Filter which discards the token 'stop' and which expands the token
@@ -105,8 +105,6 @@
*/
public QPTestFilter(TokenStream in) {
super(in);
- termAtt = addAttribute(TermAttribute.class);
- offsetAtt = addAttribute(OffsetAttribute.class);
}
boolean inPhrase = false;
@@ -117,19 +115,19 @@
if (inPhrase) {
inPhrase = false;
clearAttributes();
- termAtt.setTermBuffer("phrase2");
+ termAtt.setEmpty().append("phrase2");
offsetAtt.setOffset(savedStart, savedEnd);
return true;
} else
while (input.incrementToken()) {
- if (termAtt.term().equals("phrase")) {
+ if (termAtt.toString().equals("phrase")) {
inPhrase = true;
savedStart = offsetAtt.startOffset();
savedEnd = offsetAtt.endOffset();
- termAtt.setTermBuffer("phrase1");
+ termAtt.setEmpty().append("phrase1");
offsetAtt.setOffset(savedStart, savedEnd);
return true;
- } else if (!termAtt.term().equals("stop"))
+ } else if (!termAtt.toString().equals("stop"))
return true;
}
return false;
@@ -1158,7 +1156,7 @@
private class CannedTokenStream extends TokenStream {
private int upto = 0;
final PositionIncrementAttribute posIncr = addAttribute(PositionIncrementAttribute.class);
- final TermAttribute term = addAttribute(TermAttribute.class);
+ final CharTermAttribute term = addAttribute(CharTermAttribute.class);
@Override
public boolean incrementToken() {
clearAttributes();
@@ -1167,16 +1165,16 @@
}
if (upto == 0) {
posIncr.setPositionIncrement(1);
- term.setTermBuffer("a");
+ term.setEmpty().append("a");
} else if (upto == 1) {
posIncr.setPositionIncrement(1);
- term.setTermBuffer("b");
+ term.setEmpty().append("b");
} else if (upto == 2) {
posIncr.setPositionIncrement(0);
- term.setTermBuffer("c");
+ term.setEmpty().append("c");
} else {
posIncr.setPositionIncrement(0);
- term.setTermBuffer("d");
+ term.setEmpty().append("d");
}
upto++;
return true;
Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerQPHelper.java
===================================================================
--- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerQPHelper.java (revision 948225)
+++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerQPHelper.java (working copy)
@@ -23,9 +23,9 @@
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.queryParser.core.QueryNodeException;
import org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute.Operator;
@@ -163,24 +163,19 @@
private int prevStartOffset;
private int prevEndOffset;
- TermAttribute termAtt;
- PositionIncrementAttribute posIncrAtt;
- OffsetAttribute offsetAtt;
- TypeAttribute typeAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+ private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
public TestFilter(TokenStream in) {
super(in);
- termAtt = addAttribute(TermAttribute.class);
- posIncrAtt = addAttribute(PositionIncrementAttribute.class);
- offsetAtt = addAttribute(OffsetAttribute.class);
- typeAtt = addAttribute(TypeAttribute.class);
-
}
@Override
public final boolean incrementToken() throws java.io.IOException {
if (multiToken > 0) {
- termAtt.setTermBuffer("multi" + (multiToken + 1));
+ termAtt.setEmpty().append("multi" + (multiToken + 1));
offsetAtt.setOffset(prevStartOffset, prevEndOffset);
typeAtt.setType(prevType);
posIncrAtt.setPositionIncrement(0);
@@ -194,7 +189,7 @@
prevType = typeAtt.type();
prevStartOffset = offsetAtt.startOffset();
prevEndOffset = offsetAtt.endOffset();
- String text = termAtt.term();
+ String text = termAtt.toString();
if (text.equals("triplemulti")) {
multiToken = 2;
return true;
@@ -228,21 +223,19 @@
private class TestPosIncrementFilter extends TokenFilter {
- TermAttribute termAtt;
- PositionIncrementAttribute posIncrAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
public TestPosIncrementFilter(TokenStream in) {
super(in);
- termAtt = addAttribute(TermAttribute.class);
- posIncrAtt = addAttribute(PositionIncrementAttribute.class);
}
@Override
public final boolean incrementToken() throws java.io.IOException {
while (input.incrementToken()) {
- if (termAtt.term().equals("the")) {
+ if (termAtt.toString().equals("the")) {
// stopword, do nothing
- } else if (termAtt.term().equals("quick")) {
+ } else if (termAtt.toString().equals("quick")) {
posIncrAtt.setPositionIncrement(2);
return true;
} else {
Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerWrapper.java
===================================================================
--- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerWrapper.java (revision 948225)
+++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerWrapper.java (working copy)
@@ -23,9 +23,9 @@
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.util.LuceneTestCase;
@@ -157,24 +157,19 @@
private int prevStartOffset;
private int prevEndOffset;
- TermAttribute termAtt;
- PositionIncrementAttribute posIncrAtt;
- OffsetAttribute offsetAtt;
- TypeAttribute typeAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+ private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
public TestFilter(TokenStream in) {
super(in);
- termAtt = addAttribute(TermAttribute.class);
- posIncrAtt = addAttribute(PositionIncrementAttribute.class);
- offsetAtt = addAttribute(OffsetAttribute.class);
- typeAtt = addAttribute(TypeAttribute.class);
-
}
@Override
public final boolean incrementToken() throws java.io.IOException {
if (multiToken > 0) {
- termAtt.setTermBuffer("multi" + (multiToken + 1));
+ termAtt.setEmpty().append("multi" + (multiToken + 1));
offsetAtt.setOffset(prevStartOffset, prevEndOffset);
typeAtt.setType(prevType);
posIncrAtt.setPositionIncrement(0);
@@ -188,7 +183,7 @@
prevType = typeAtt.type();
prevStartOffset = offsetAtt.startOffset();
prevEndOffset = offsetAtt.endOffset();
- String text = termAtt.term();
+ String text = termAtt.toString();
if (text.equals("triplemulti")) {
multiToken = 2;
return true;
@@ -222,21 +217,19 @@
private class TestPosIncrementFilter extends TokenFilter {
- TermAttribute termAtt;
- PositionIncrementAttribute posIncrAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
public TestPosIncrementFilter(TokenStream in) {
super(in);
- termAtt = addAttribute(TermAttribute.class);
- posIncrAtt = addAttribute(PositionIncrementAttribute.class);
}
@Override
public final boolean incrementToken() throws java.io.IOException {
while (input.incrementToken()) {
- if (termAtt.term().equals("the")) {
+ if (termAtt.toString().equals("the")) {
// stopword, do nothing
- } else if (termAtt.term().equals("quick")) {
+ } else if (termAtt.toString().equals("quick")) {
posIncrAtt.setPositionIncrement(2);
return true;
} else {
Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java
===================================================================
--- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java (revision 948225)
+++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java (working copy)
@@ -36,7 +36,7 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.DateField;
import org.apache.lucene.document.DateTools;
import org.apache.lucene.document.Document;
@@ -93,8 +93,8 @@
public static Analyzer qpAnalyzer = new QPTestAnalyzer();
public static final class QPTestFilter extends TokenFilter {
- TermAttribute termAtt;
- OffsetAttribute offsetAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
/**
* Filter which discards the token 'stop' and which expands the token
@@ -102,8 +102,6 @@
*/
public QPTestFilter(TokenStream in) {
super(in);
- termAtt = addAttribute(TermAttribute.class);
- offsetAtt = addAttribute(OffsetAttribute.class);
}
boolean inPhrase = false;
@@ -114,19 +112,19 @@
if (inPhrase) {
inPhrase = false;
clearAttributes();
- termAtt.setTermBuffer("phrase2");
+ termAtt.setEmpty().append("phrase2");
offsetAtt.setOffset(savedStart, savedEnd);
return true;
} else
while (input.incrementToken()) {
- if (termAtt.term().equals("phrase")) {
+ if (termAtt.toString().equals("phrase")) {
inPhrase = true;
savedStart = offsetAtt.startOffset();
savedEnd = offsetAtt.endOffset();
- termAtt.setTermBuffer("phrase1");
+ termAtt.setEmpty().append("phrase1");
offsetAtt.setOffset(savedStart, savedEnd);
return true;
- } else if (!termAtt.term().equals("stop"))
+ } else if (!termAtt.toString().equals("stop"))
return true;
}
return false;
Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java
===================================================================
--- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java (revision 948225)
+++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java (working copy)
@@ -23,8 +23,8 @@
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.DateTools;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.FuzzyQuery;
@@ -68,7 +68,7 @@
boolean inPhrase = false;
int savedStart = 0, savedEnd = 0;
- TermAttribute termAtt = addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
@Override
@@ -76,19 +76,19 @@
clearAttributes();
if (inPhrase) {
inPhrase = false;
- termAtt.setTermBuffer("phrase2");
+ termAtt.setEmpty().append("phrase2");
offsetAtt.setOffset(savedStart, savedEnd);
return true;
} else
while(input.incrementToken())
- if (termAtt.term().equals("phrase")) {
+ if (termAtt.toString().equals("phrase")) {
inPhrase = true;
savedStart = offsetAtt.startOffset();
savedEnd = offsetAtt.endOffset();
- termAtt.setTermBuffer("phrase1");
+ termAtt.setEmpty().append("phrase1");
offsetAtt.setOffset(savedStart, savedEnd);
return true;
- } else if (!termAtt.term().equals("stop"))
+ } else if (!termAtt.toString().equals("stop"))
return true;
return false;
}
Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/AnalyzerQueryNodeProcessor.java
===================================================================
--- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/AnalyzerQueryNodeProcessor.java (revision 948225)
+++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/AnalyzerQueryNodeProcessor.java (working copy)
@@ -26,8 +26,8 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CachingTokenFilter;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.queryParser.core.QueryNodeException;
import org.apache.lucene.queryParser.core.config.QueryConfigHandler;
import org.apache.lucene.queryParser.core.nodes.FieldQueryNode;
@@ -162,11 +162,11 @@
// ignore
}
- if (!buffer.hasAttribute(TermAttribute.class)) {
+ if (!buffer.hasAttribute(CharTermAttribute.class)) {
return new NoTokenFoundQueryNode();
}
- TermAttribute termAtt = buffer.getAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = buffer.getAttribute(CharTermAttribute.class);
if (numTokens == 0) {
return new NoTokenFoundQueryNode();
@@ -177,7 +177,7 @@
boolean hasNext;
hasNext = buffer.incrementToken();
assert hasNext == true;
- term = termAtt.term();
+ term = termAtt.toString();
} catch (IOException e) {
// safe to ignore, because we know the number of tokens
@@ -197,7 +197,7 @@
try {
boolean hasNext = buffer.incrementToken();
assert hasNext == true;
- term = termAtt.term();
+ term = termAtt.toString();
} catch (IOException e) {
// safe to ignore, because we know the number of tokens
@@ -224,7 +224,7 @@
try {
boolean hasNext = buffer.incrementToken();
assert hasNext == true;
- term = termAtt.term();
+ term = termAtt.toString();
if (posIncrAtt != null) {
positionIncrement = posIncrAtt.getPositionIncrement();
}
@@ -290,7 +290,7 @@
try {
boolean hasNext = buffer.incrementToken();
assert hasNext == true;
- term = termAtt.term();
+ term = termAtt.toString();
if (posIncrAtt != null) {
positionIncrement = posIncrAtt.getPositionIncrement();
Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/analyzing/AnalyzingQueryParser.java
===================================================================
--- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/analyzing/AnalyzingQueryParser.java (revision 948225)
+++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/analyzing/AnalyzingQueryParser.java (working copy)
@@ -24,7 +24,7 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.Version;
@@ -107,7 +107,7 @@
// get Analyzer from superclass and tokenize the term
TokenStream source = getAnalyzer().tokenStream(field, new StringReader(termStr));
- TermAttribute termAtt = source.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
int countTokens = 0;
while (true) {
@@ -116,7 +116,7 @@
} catch (IOException e) {
break;
}
- String term = termAtt.term();
+ String term = termAtt.toString();
if (!"".equals(term)) {
try {
tlist.set(countTokens++, term);
@@ -190,7 +190,7 @@
// get Analyzer from superclass and tokenize the term
TokenStream source = getAnalyzer().tokenStream(field, new StringReader(termStr));
List tlist = new ArrayList();
- TermAttribute termAtt = source.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
while (true) {
try {
@@ -198,7 +198,7 @@
} catch (IOException e) {
break;
}
- tlist.add(termAtt.term());
+ tlist.add(termAtt.toString());
}
try {
@@ -237,13 +237,13 @@
throws ParseException {
// get Analyzer from superclass and tokenize the term
TokenStream source = getAnalyzer().tokenStream(field, new StringReader(termStr));
- TermAttribute termAtt = source.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
String nextToken = null;
boolean multipleTokens = false;
try {
if (source.incrementToken()) {
- nextToken = termAtt.term();
+ nextToken = termAtt.toString();
}
multipleTokens = source.incrementToken();
} catch (IOException e) {
@@ -273,13 +273,13 @@
throws ParseException {
// get Analyzer from superclass and tokenize the terms
TokenStream source = getAnalyzer().tokenStream(field, new StringReader(part1));
- TermAttribute termAtt = source.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
boolean multipleTokens = false;
// part1
try {
if (source.incrementToken()) {
- part1 = termAtt.term();
+ part1 = termAtt.toString();
}
multipleTokens = source.incrementToken();
} catch (IOException e) {
@@ -297,11 +297,11 @@
// part2
source = getAnalyzer().tokenStream(field, new StringReader(part2));
- termAtt = source.addAttribute(TermAttribute.class);
+ termAtt = source.addAttribute(CharTermAttribute.class);
try {
if (source.incrementToken()) {
- part2 = termAtt.term();
+ part2 = termAtt.toString();
}
multipleTokens = source.incrementToken();
} catch (IOException e) {
Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.java
===================================================================
--- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.java (revision 948225)
+++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.java (working copy)
@@ -307,7 +307,7 @@
List list = new ArrayList();
int positionCount = 0;
boolean severalTokensAtSamePosition = false;
- TermAttribute termAtt = source.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
PositionIncrementAttribute posincrAtt = source.addAttribute(PositionIncrementAttribute.class);
try {
@@ -328,7 +328,7 @@
return null;
else if (list.size() == 1) {
source.restoreState(list.get(0));
- return new TermQuery(new Term(field, termAtt.term()));
+ return new TermQuery(new Term(field, termAtt.toString()));
} else {
if (severalTokensAtSamePosition) {
if (positionCount == 1) {
@@ -337,7 +337,7 @@
for (int i = 0; i < list.size(); i++) {
source.restoreState(list.get(i));
TermQuery currentQuery = new TermQuery(
- new Term(field, termAtt.term()));
+ new Term(field, termAtt.toString()));
q.add(currentQuery, BooleanClause.Occur.SHOULD);
}
return q;
@@ -352,7 +352,7 @@
mpq.add(multiTerms.toArray(new Term[0]));
multiTerms.clear();
}
- multiTerms.add(new Term(field, termAtt.term()));
+ multiTerms.add(new Term(field, termAtt.toString()));
}
mpq.add(multiTerms.toArray(new Term[0]));
return mpq;
@@ -363,7 +363,7 @@
q.setSlop(phraseSlop);
for (int i = 0; i < list.size(); i++) {
source.restoreState(list.get(i));
- q.add(new Term(field, termAtt.term()));
+ q.add(new Term(field, termAtt.toString()));
}
return q;
}
Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.jj
===================================================================
--- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.jj (revision 948225)
+++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.jj (working copy)
@@ -331,7 +331,7 @@
List list = new ArrayList();
int positionCount = 0;
boolean severalTokensAtSamePosition = false;
- TermAttribute termAtt = source.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
PositionIncrementAttribute posincrAtt = source.addAttribute(PositionIncrementAttribute.class);
try {
@@ -352,7 +352,7 @@
return null;
else if (list.size() == 1) {
source.restoreState(list.get(0));
- return new TermQuery(new Term(field, termAtt.term()));
+ return new TermQuery(new Term(field, termAtt.toString()));
} else {
if (severalTokensAtSamePosition) {
if (positionCount == 1) {
@@ -361,7 +361,7 @@
for (int i = 0; i < list.size(); i++) {
source.restoreState(list.get(i));
TermQuery currentQuery = new TermQuery(
- new Term(field, termAtt.term()));
+ new Term(field, termAtt.toString()));
q.add(currentQuery, BooleanClause.Occur.SHOULD);
}
return q;
@@ -376,7 +376,7 @@
mpq.add(multiTerms.toArray(new Term[0]));
multiTerms.clear();
}
- multiTerms.add(new Term(field, termAtt.term()));
+ multiTerms.add(new Term(field, termAtt.toString()));
}
mpq.add(multiTerms.toArray(new Term[0]));
return mpq;
@@ -387,7 +387,7 @@
q.setSlop(phraseSlop);
for (int i = 0; i < list.size(); i++) {
source.restoreState(list.get(i));
- q.add(new Term(field, termAtt.term()));
+ q.add(new Term(field, termAtt.toString()));
}
return q;
}
Index: lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java
===================================================================
--- lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (revision 948225)
+++ lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (working copy)
@@ -25,7 +25,7 @@
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
@@ -278,7 +278,7 @@
tokens.add(t);
tokens.add(createToken("fin", 7, 9));
TokenStream ts = new TokenStream(Token.TOKEN_ATTRIBUTE_FACTORY) {
- final AttributeImpl reusableToken = (AttributeImpl) addAttribute(TermAttribute.class);
+ final AttributeImpl reusableToken = (AttributeImpl) addAttribute(CharTermAttribute.class);
Iterator it = tokens.iterator();
@Override
@@ -601,16 +601,12 @@
private static Token createToken(String term, int start, int offset)
{
- Token token = new Token(start, offset);
- token.setTermBuffer(term);
- return token;
+ return new Token(term, start, offset);
}
private static Token createToken(String term, int start, int offset, String type)
{
- Token token = new Token(start, offset, type);
- token.setTermBuffer(term);
- return token;
+ return new Token(term, start, offset, type);
}
Index: lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java
===================================================================
--- lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java (revision 948225)
+++ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java (working copy)
@@ -561,7 +561,7 @@
// untokenized
String fieldVal = field.stringValue();
Token token = new Token(0, fieldVal.length(), "untokenized");
- token.setTermBuffer(fieldVal);
+ token.setEmpty().append(fieldVal);
tokens.add(token);
fieldSetting.fieldLength++;
}
@@ -596,10 +596,10 @@
for (Token token : eField_Tokens.getValue()) {
- TermDocumentInformationFactory termDocumentInformationFactory = termDocumentInformationFactoryByTermText.get(token.term());
+ TermDocumentInformationFactory termDocumentInformationFactory = termDocumentInformationFactoryByTermText.get(token.toString());
if (termDocumentInformationFactory == null) {
termDocumentInformationFactory = new TermDocumentInformationFactory();
- termDocumentInformationFactoryByTermText.put(token.term(), termDocumentInformationFactory);
+ termDocumentInformationFactoryByTermText.put(token.toString(), termDocumentInformationFactory);
}
//termDocumentInformationFactory.termFrequency++;
Index: lucene/contrib/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
===================================================================
--- lucene/contrib/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java (revision 948225)
+++ lucene/contrib/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java (working copy)
@@ -26,9 +26,10 @@
import java.util.Locale;
import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.benchmark.BenchmarkTestCase;
import org.apache.lucene.benchmark.byTask.feeds.DocMaker;
import org.apache.lucene.benchmark.byTask.feeds.ReutersQueryMaker;
@@ -918,11 +919,11 @@
TokenStream ts2 = a2.tokenStream("bogus", new StringReader(text));
ts1.reset();
ts2.reset();
- TermAttribute termAtt1 = ts1.addAttribute(TermAttribute.class);
- TermAttribute termAtt2 = ts2.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt1 = ts1.addAttribute(CharTermAttribute.class);
+ CharTermAttribute termAtt2 = ts2.addAttribute(CharTermAttribute.class);
assertTrue(ts1.incrementToken());
assertTrue(ts2.incrementToken());
- assertEquals(termAtt1.term(), termAtt2.term());
+ assertEquals(termAtt1.toString(), termAtt2.toString());
assertFalse(ts1.incrementToken());
assertFalse(ts2.incrementToken());
ts1.close();
@@ -994,21 +995,7 @@
private void assertEqualShingle
(Analyzer analyzer, String text, String[] expected) throws Exception {
- TokenStream stream = analyzer.tokenStream("bogus", new StringReader(text));
- stream.reset();
- TermAttribute termAtt = stream.addAttribute(TermAttribute.class);
- int termNum = 0;
- while (stream.incrementToken()) {
- assertTrue("Extra output term(s), starting with '"
- + new String(termAtt.termBuffer(), 0, termAtt.termLength()) + "'",
- termNum < expected.length);
- assertEquals("Mismatch in output term # " + termNum + " - ",
- expected[termNum],
- new String(termAtt.termBuffer(), 0, termAtt.termLength()));
- ++termNum;
- }
- assertEquals("Too few output terms", expected.length, termNum);
- stream.close();
+ BaseTokenStreamTestCase.assertAnalyzesTo(analyzer, text, expected);
}
private String[] getShingleConfig(String params) {
Index: lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/TermsFilterBuilder.java
===================================================================
--- lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/TermsFilterBuilder.java (revision 948225)
+++ lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/TermsFilterBuilder.java (working copy)
@@ -5,7 +5,7 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.TermsFilter;
@@ -57,7 +57,7 @@
String text = DOMUtils.getNonBlankTextOrFail(e);
String fieldName = DOMUtils.getAttributeWithInheritanceOrFail(e, "fieldName");
TokenStream ts = analyzer.tokenStream(fieldName, new StringReader(text));
- TermAttribute termAtt = ts.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
try
{
@@ -65,11 +65,11 @@
while (ts.incrementToken()) {
if (term == null)
{
- term = new Term(fieldName, termAtt.term());
+ term = new Term(fieldName, termAtt.toString());
} else
{
// create from previous to save fieldName.intern overhead
- term = term.createTerm(termAtt.term());
+ term = term.createTerm(termAtt.toString());
}
tf.addTerm(term);
}
Index: lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/LikeThisQueryBuilder.java
===================================================================
--- lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/LikeThisQueryBuilder.java (revision 948225)
+++ lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/LikeThisQueryBuilder.java (working copy)
@@ -10,7 +10,7 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.search.similar.MoreLikeThisQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.xmlparser.DOMUtils;
@@ -77,11 +77,11 @@
for (int i = 0; i < fields.length; i++)
{
TokenStream ts = analyzer.tokenStream(fields[i],new StringReader(stopWords));
- TermAttribute termAtt = ts.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
try
{
while(ts.incrementToken()) {
- stopWordsSet.add(termAtt.term());
+ stopWordsSet.add(termAtt.toString());
}
}
catch(IOException ioe)
Index: lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/SpanOrTermsBuilder.java
===================================================================
--- lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/SpanOrTermsBuilder.java (revision 948225)
+++ lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/SpanOrTermsBuilder.java (working copy)
@@ -6,7 +6,7 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanQuery;
@@ -56,10 +56,10 @@
{
ArrayList clausesList=new ArrayList();
TokenStream ts=analyzer.tokenStream(fieldName,new StringReader(value));
- TermAttribute termAtt = ts.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
while (ts.incrementToken()) {
- SpanTermQuery stq=new SpanTermQuery(new Term(fieldName, termAtt.term()));
+ SpanTermQuery stq=new SpanTermQuery(new Term(fieldName, termAtt.toString()));
clausesList.add(stq);
}
SpanOrQuery soq=new SpanOrQuery(clausesList.toArray(new SpanQuery[clausesList.size()]));
Index: lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/TermsQueryBuilder.java
===================================================================
--- lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/TermsQueryBuilder.java (revision 948225)
+++ lucene/contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/TermsQueryBuilder.java (working copy)
@@ -5,7 +5,7 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -57,16 +57,16 @@
TokenStream ts = analyzer.tokenStream(fieldName, new StringReader(text));
try
{
- TermAttribute termAtt = ts.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
Term term = null;
while (ts.incrementToken()) {
if (term == null)
{
- term = new Term(fieldName, termAtt.term());
+ term = new Term(fieldName, termAtt.toString());
} else
{
// create from previous to save fieldName.intern overhead
- term = term.createTerm(termAtt.term());
+ term = term.createTerm(termAtt.toString());
}
bq.add(new BooleanClause(new TermQuery(term),BooleanClause.Occur.SHOULD));
}
Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java
===================================================================
--- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java (revision 948225)
+++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java (working copy)
@@ -25,7 +25,7 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
@@ -296,16 +296,11 @@
private int i = -1;
- private TermAttribute termAttribute;
+ private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class);
+ private final OffsetAttribute offsetAttribute = addAttribute(OffsetAttribute.class);
+ private final PositionIncrementAttribute positionIncrementAttribute = addAttribute(PositionIncrementAttribute.class);
- private OffsetAttribute offsetAttribute;
-
- private PositionIncrementAttribute positionIncrementAttribute;
-
public TokenStreamSparse() {
- termAttribute = addAttribute(TermAttribute.class);
- offsetAttribute = addAttribute(OffsetAttribute.class);
- positionIncrementAttribute = addAttribute(PositionIncrementAttribute.class);
reset();
}
@@ -316,8 +311,7 @@
return false;
}
clearAttributes();
- termAttribute.setTermBuffer(this.tokens[i].term(), 0, this.tokens[i]
- .term().length());
+ termAttribute.setEmpty().append(this.tokens[i]);
offsetAttribute.setOffset(this.tokens[i].startOffset(), this.tokens[i]
.endOffset());
positionIncrementAttribute.setPositionIncrement(this.tokens[i]
@@ -342,16 +336,11 @@
private int i = -1;
- private TermAttribute termAttribute;
+ private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class);
+ private final OffsetAttribute offsetAttribute = addAttribute(OffsetAttribute.class);
+ private final PositionIncrementAttribute positionIncrementAttribute = addAttribute(PositionIncrementAttribute.class);
- private OffsetAttribute offsetAttribute;
-
- private PositionIncrementAttribute positionIncrementAttribute;
-
public TokenStreamConcurrent() {
- termAttribute = addAttribute(TermAttribute.class);
- offsetAttribute = addAttribute(OffsetAttribute.class);
- positionIncrementAttribute = addAttribute(PositionIncrementAttribute.class);
reset();
}
@@ -362,8 +351,7 @@
return false;
}
clearAttributes();
- termAttribute.setTermBuffer(this.tokens[i].term(), 0, this.tokens[i]
- .term().length());
+ termAttribute.setEmpty().append(this.tokens[i]);
offsetAttribute.setOffset(this.tokens[i].startOffset(), this.tokens[i]
.endOffset());
positionIncrementAttribute.setPositionIncrement(this.tokens[i]
Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
===================================================================
--- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (revision 948225)
+++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (working copy)
@@ -41,7 +41,7 @@
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericField;
@@ -1424,13 +1424,10 @@
return new TokenStream() {
Iterator iter;
List lst;
- private TermAttribute termAtt;
- private PositionIncrementAttribute posIncrAtt;
- private OffsetAttribute offsetAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
{
- termAtt = addAttribute(TermAttribute.class);
- posIncrAtt = addAttribute(PositionIncrementAttribute.class);
- offsetAtt = addAttribute(OffsetAttribute.class);
lst = new ArrayList();
Token t;
t = createToken("hi", 0, 2);
@@ -1456,7 +1453,7 @@
if(iter.hasNext()) {
Token token = iter.next();
clearAttributes();
- termAtt.setTermBuffer(token.term());
+ termAtt.setEmpty().append(token);
posIncrAtt.setPositionIncrement(token.getPositionIncrement());
offsetAtt.setOffset(token.startOffset(), token.endOffset());
return true;
@@ -1473,13 +1470,10 @@
return new TokenStream() {
Iterator iter;
List lst;
- private TermAttribute termAtt;
- private PositionIncrementAttribute posIncrAtt;
- private OffsetAttribute offsetAtt;
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
{
- termAtt = addAttribute(TermAttribute.class);
- posIncrAtt = addAttribute(PositionIncrementAttribute.class);
- offsetAtt = addAttribute(OffsetAttribute.class);
lst = new ArrayList();
Token t;
t = createToken("hispeed", 0, 8);
@@ -1505,7 +1499,7 @@
if(iter.hasNext()) {
Token token = iter.next();
clearAttributes();
- termAtt.setTermBuffer(token.term());
+ termAtt.setEmpty().append(token);
posIncrAtt.setPositionIncrement(token.getPositionIncrement());
offsetAtt.setOffset(token.startOffset(), token.endOffset());
return true;
@@ -1762,9 +1756,7 @@
private static Token createToken(String term, int start, int offset)
{
- Token token = new Token(start, offset);
- token.setTermBuffer(term);
- return token;
+ return new Token(term, start, offset);
}
}
@@ -1795,7 +1787,7 @@
@Override
public TokenStream tokenStream(String arg0, Reader arg1) {
Tokenizer stream = new MockTokenizer(arg1, MockTokenizer.SIMPLE, true);
- stream.addAttribute(TermAttribute.class);
+ stream.addAttribute(CharTermAttribute.class);
stream.addAttribute(PositionIncrementAttribute.class);
stream.addAttribute(OffsetAttribute.class);
return new SynonymTokenizer(stream, synonyms);
@@ -1811,21 +1803,21 @@
private Token currentRealToken = null;
private Map synonyms;
StringTokenizer st = null;
- private TermAttribute realTermAtt;
+ private CharTermAttribute realTermAtt;
private PositionIncrementAttribute realPosIncrAtt;
private OffsetAttribute realOffsetAtt;
- private TermAttribute termAtt;
+ private CharTermAttribute termAtt;
private PositionIncrementAttribute posIncrAtt;
private OffsetAttribute offsetAtt;
public SynonymTokenizer(TokenStream realStream, Map synonyms) {
this.realStream = realStream;
this.synonyms = synonyms;
- realTermAtt = realStream.addAttribute(TermAttribute.class);
+ realTermAtt = realStream.addAttribute(CharTermAttribute.class);
realPosIncrAtt = realStream.addAttribute(PositionIncrementAttribute.class);
realOffsetAtt = realStream.addAttribute(OffsetAttribute.class);
- termAtt = addAttribute(TermAttribute.class);
+ termAtt = addAttribute(CharTermAttribute.class);
posIncrAtt = addAttribute(PositionIncrementAttribute.class);
offsetAtt = addAttribute(OffsetAttribute.class);
}
@@ -1840,25 +1832,25 @@
}
//Token nextRealToken = new Token(, offsetAtt.startOffset(), offsetAtt.endOffset());
clearAttributes();
- termAtt.setTermBuffer(realTermAtt.term());
+ termAtt.copyBuffer(realTermAtt.buffer(), 0, realTermAtt.length());
offsetAtt.setOffset(realOffsetAtt.startOffset(), realOffsetAtt.endOffset());
posIncrAtt.setPositionIncrement(realPosIncrAtt.getPositionIncrement());
- String expansions = synonyms.get(realTermAtt.term());
+ String expansions = synonyms.get(realTermAtt.toString());
if (expansions == null) {
return true;
}
st = new StringTokenizer(expansions, ",");
if (st.hasMoreTokens()) {
currentRealToken = new Token(realOffsetAtt.startOffset(), realOffsetAtt.endOffset());
- currentRealToken.setTermBuffer(realTermAtt.term());
+ currentRealToken.copyBuffer(realTermAtt.buffer(), 0, realTermAtt.length());
}
return true;
} else {
String tok = st.nextToken();
clearAttributes();
- termAtt.setTermBuffer(tok);
+ termAtt.setEmpty().append(tok);
offsetAtt.setOffset(currentRealToken.startOffset(), currentRealToken.endOffset());
posIncrAtt.setPositionIncrement(0);
if (!st.hasMoreTokens()) {
Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
===================================================================
--- lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java (revision 948225)
+++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java (working copy)
@@ -26,8 +26,8 @@
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
@@ -221,14 +221,14 @@
ch = 0;
}
- TermAttribute termAtt = addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
@Override
public boolean incrementToken() throws IOException {
if( !getNextPartialSnippet() )
return false;
clearAttributes();
- termAtt.setTermBuffer(snippet, startTerm, lenTerm);
+ termAtt.setEmpty().append(snippet, startTerm, startTerm + lenTerm);
offsetAtt.setOffset(correctOffset(startOffset), correctOffset(startOffset + lenTerm));
return true;
}
Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java
===================================================================
--- lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java (revision 948225)
+++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/IndexTimeSynonymTest.java (working copy)
@@ -25,7 +25,7 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.util.AttributeImpl;
@@ -301,7 +301,7 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
TokenStream ts = new TokenStream(Token.TOKEN_ATTRIBUTE_FACTORY) {
- final AttributeImpl reusableToken = (AttributeImpl) addAttribute(TermAttribute.class);
+ final AttributeImpl reusableToken = (AttributeImpl) addAttribute(CharTermAttribute.class);
int p = 0;
@Override
Index: lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermScorer.java
===================================================================
--- lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermScorer.java (revision 948225)
+++ lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermScorer.java (working copy)
@@ -21,7 +21,7 @@
import java.util.HashSet;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Query;
@@ -41,7 +41,7 @@
float maxTermWeight = 0;
private HashMap termsToFind;
- private TermAttribute termAtt;
+ private CharTermAttribute termAtt;
/**
*
@@ -95,7 +95,7 @@
* @see org.apache.lucene.search.highlight.Scorer#init(org.apache.lucene.analysis.TokenStream)
*/
public TokenStream init(TokenStream tokenStream) {
- termAtt = tokenStream.addAttribute(TermAttribute.class);
+ termAtt = tokenStream.addAttribute(CharTermAttribute.class);
return null;
}
@@ -118,7 +118,7 @@
* @see org.apache.lucene.search.highlight.Scorer#getTokenScore()
*/
public float getTokenScore() {
- String termText = termAtt.term();
+ String termText = termAtt.toString();
WeightedTerm queryTerm = termsToFind.get(termText);
if (queryTerm == null) {
Index: lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermPositionVector.java
===================================================================
--- lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermPositionVector.java (revision 948225)
+++ lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenStreamFromTermPositionVector.java (working copy)
@@ -25,9 +25,9 @@
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.index.TermPositionVector;
import org.apache.lucene.index.TermVectorOffsetInfo;
@@ -37,7 +37,7 @@
private Iterator tokensAtCurrentPosition;
- private TermAttribute termAttribute;
+ private CharTermAttribute termAttribute;
private PositionIncrementAttribute positionIncrementAttribute;
@@ -51,7 +51,7 @@
*/
public TokenStreamFromTermPositionVector(
final TermPositionVector termPositionVector) {
- termAttribute = addAttribute(TermAttribute.class);
+ termAttribute = addAttribute(CharTermAttribute.class);
positionIncrementAttribute = addAttribute(PositionIncrementAttribute.class);
offsetAttribute = addAttribute(OffsetAttribute.class);
final String[] terms = termPositionVector.getTerms();
@@ -65,7 +65,7 @@
offsets[j].getStartOffset(), offsets[j].getEndOffset());
} else {
token = new Token();
- token.setTermBuffer(terms[i]);
+ token.setEmpty().append(terms[i]);
}
// Yes - this is the position, not the increment! This is for
// sorting. This value
@@ -100,7 +100,7 @@
if (this.tokensAtCurrentPosition.hasNext()) {
final Token next = this.tokensAtCurrentPosition.next();
clearAttributes();
- termAttribute.setTermBuffer(next.term());
+ termAttribute.setEmpty().append(next);
positionIncrementAttribute.setPositionIncrement(next
.getPositionIncrement());
offsetAttribute.setOffset(next.startOffset(), next.endOffset());
Index: lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryScorer.java
===================================================================
--- lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryScorer.java (revision 948225)
+++ lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryScorer.java (working copy)
@@ -25,8 +25,8 @@
import org.apache.lucene.analysis.CachingTokenFilter;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.memory.MemoryIndex;
import org.apache.lucene.search.Query;
@@ -46,7 +46,7 @@
private float maxTermWeight;
private int position = -1;
private String defaultField;
- private TermAttribute termAtt;
+ private CharTermAttribute termAtt;
private PositionIncrementAttribute posIncAtt;
private boolean expandMultiTermQuery = true;
private Query query;
@@ -145,7 +145,7 @@
*/
public float getTokenScore() {
position += posIncAtt.getPositionIncrement();
- String termText = termAtt.term();
+ String termText = termAtt.toString();
WeightedSpanTerm weightedSpanTerm;
@@ -175,7 +175,7 @@
*/
public TokenStream init(TokenStream tokenStream) throws IOException {
position = -1;
- termAtt = tokenStream.addAttribute(TermAttribute.class);
+ termAtt = tokenStream.addAttribute(CharTermAttribute.class);
posIncAtt = tokenStream.addAttribute(PositionIncrementAttribute.class);
if(!skipInitExtractor) {
if(fieldWeightedSpanTerms != null) {
Index: lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java
===================================================================
--- lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java (revision 948225)
+++ lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java (working copy)
@@ -29,8 +29,8 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.TermFreqVector;
@@ -153,13 +153,13 @@
int currentToken = 0;
- TermAttribute termAtt;
+ CharTermAttribute termAtt;
OffsetAttribute offsetAtt;
StoredTokenStream(Token tokens[]) {
this.tokens = tokens;
- termAtt = addAttribute(TermAttribute.class);
+ termAtt = addAttribute(CharTermAttribute.class);
offsetAtt = addAttribute(OffsetAttribute.class);
}
@@ -170,7 +170,7 @@
}
Token token = tokens[currentToken++];
clearAttributes();
- termAtt.setTermBuffer(token.term());
+ termAtt.setEmpty().append(token);
offsetAtt.setOffset(token.startOffset(), token.endOffset());
return true;
}
@@ -204,9 +204,8 @@
unsortedTokens = new ArrayList();
}
for (int tp = 0; tp < offsets.length; tp++) {
- Token token = new Token(offsets[tp].getStartOffset(), offsets[tp]
+ Token token = new Token(terms[t], offsets[tp].getStartOffset(), offsets[tp]
.getEndOffset());
- token.setTermBuffer(terms[t]);
unsortedTokens.add(token);
}
} else {
Index: lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/SimpleSpanFragmenter.java
===================================================================
--- lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/SimpleSpanFragmenter.java (revision 948225)
+++ lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/SimpleSpanFragmenter.java (working copy)
@@ -20,9 +20,9 @@
import java.util.List;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.search.spans.Spans;
@@ -38,7 +38,7 @@
private QueryScorer queryScorer;
private int waitForPos = -1;
private int textSize;
- private TermAttribute termAtt;
+ private CharTermAttribute termAtt;
private PositionIncrementAttribute posIncAtt;
private OffsetAttribute offsetAtt;
@@ -70,7 +70,7 @@
return false;
}
- WeightedSpanTerm wSpanTerm = queryScorer.getWeightedSpanTerm(termAtt.term());
+ WeightedSpanTerm wSpanTerm = queryScorer.getWeightedSpanTerm(termAtt.toString());
if (wSpanTerm != null) {
List positionSpans = wSpanTerm.getPositionSpans();
@@ -101,7 +101,7 @@
position = -1;
currentNumFrags = 1;
textSize = originalText.length();
- termAtt = tokenStream.addAttribute(TermAttribute.class);
+ termAtt = tokenStream.addAttribute(CharTermAttribute.class);
posIncAtt = tokenStream.addAttribute(PositionIncrementAttribute.class);
offsetAtt = tokenStream.addAttribute(OffsetAttribute.class);
}
Index: lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenGroup.java
===================================================================
--- lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenGroup.java (revision 948225)
+++ lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenGroup.java (working copy)
@@ -19,8 +19,8 @@
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
/**
* One, or several overlapping tokens, along with the score(s) and the scope of
@@ -38,11 +38,11 @@
int matchStartOffset, matchEndOffset;
private OffsetAttribute offsetAtt;
- private TermAttribute termAtt;
+ private CharTermAttribute termAtt;
public TokenGroup(TokenStream tokenStream) {
offsetAtt = tokenStream.addAttribute(OffsetAttribute.class);
- termAtt = tokenStream.addAttribute(TermAttribute.class);
+ termAtt = tokenStream.addAttribute(CharTermAttribute.class);
}
void addToken(float score) {
@@ -68,7 +68,7 @@
}
}
Token token = new Token(termStartOffset, termEndOffset);
- token.setTermBuffer(termAtt.term());
+ token.setEmpty().append(termAtt);
tokens[numTokens] = token;
scores[numTokens] = score;
numTokens++;
Index: lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java
===================================================================
--- lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java (revision 948225)
+++ lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/Highlighter.java (working copy)
@@ -23,9 +23,9 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.util.PriorityQueue;
/**
@@ -191,7 +191,7 @@
ArrayList docFrags = new ArrayList();
StringBuilder newText=new StringBuilder();
- TermAttribute termAtt = tokenStream.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = tokenStream.addAttribute(CharTermAttribute.class);
OffsetAttribute offsetAtt = tokenStream.addAttribute(OffsetAttribute.class);
tokenStream.addAttribute(PositionIncrementAttribute.class);
tokenStream.reset();
@@ -225,7 +225,7 @@
(offsetAtt.startOffset()>text.length())
)
{
- throw new InvalidTokenOffsetsException("Token "+ termAtt.term()
+ throw new InvalidTokenOffsetsException("Token "+ termAtt.toString()
+" exceeds length of provided text sized "+text.length());
}
if((tokenGroup.numTokens>0)&&(tokenGroup.isDistinct()))
Index: lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
===================================================================
--- lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (revision 948225)
+++ lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (working copy)
@@ -30,9 +30,11 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.IndexReader;
@@ -51,6 +53,7 @@
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.RAMDirectory; // for javadocs
+import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Constants; // for javadocs
/**
@@ -276,8 +279,8 @@
return new TokenStream() {
private Iterator iter = keywords.iterator();
private int start = 0;
- private TermAttribute termAtt = addAttribute(TermAttribute.class);
- private OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
+ private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
+ private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
@Override
public boolean incrementToken() {
@@ -289,8 +292,8 @@
String term = obj.toString();
clearAttributes();
- termAtt.setTermBuffer(term);
- offsetAtt.setOffset(start, start+termAtt.termLength());
+ termAtt.setEmpty().append(term);
+ offsetAtt.setOffset(start, start+termAtt.length());
start += term.length() + 1; // separate words by 1 (blank) character
return true;
}
@@ -340,13 +343,15 @@
int numOverlapTokens = 0;
int pos = -1;
- TermAttribute termAtt = stream.addAttribute(TermAttribute.class);
+ TermToBytesRefAttribute termAtt = stream.addAttribute(TermToBytesRefAttribute.class);
PositionIncrementAttribute posIncrAttribute = stream.addAttribute(PositionIncrementAttribute.class);
OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class);
-
+ BytesRef ref = new BytesRef(10);
stream.reset();
while (stream.incrementToken()) {
- String term = termAtt.term();
+ termAtt.toBytesRef(ref);
+ // TODO: support non-UTF8 strings (like numerics) here
+ String term = ref.utf8ToString();
if (term.length() == 0) continue; // nothing to do
// if (DEBUG) System.err.println("token='" + term + "'");
numTokens++;
Index: lucene/contrib/lucli/src/java/lucli/LuceneMethods.java
===================================================================
--- lucene/contrib/lucli/src/java/lucli/LuceneMethods.java (revision 948225)
+++ lucene/contrib/lucli/src/java/lucli/LuceneMethods.java (working copy)
@@ -36,8 +36,8 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.IndexReader;
@@ -303,14 +303,14 @@
int position = 0;
// Tokenize field and add to postingTable
TokenStream stream = analyzer.tokenStream(fieldName, reader);
- TermAttribute termAtt = stream.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);
PositionIncrementAttribute posIncrAtt = stream.addAttribute(PositionIncrementAttribute.class);
try {
while (stream.incrementToken()) {
position += (posIncrAtt.getPositionIncrement() - 1);
position++;
- String name = termAtt.term();
+ String name = termAtt.toString();
Integer Count = tokenMap.get(name);
if (Count == null) { // not in there yet
tokenMap.put(name, Integer.valueOf(1)); //first one
Index: lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java
===================================================================
--- lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (revision 948225)
+++ lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (working copy)
@@ -32,7 +32,7 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
@@ -884,10 +884,10 @@
TokenStream ts = analyzer.tokenStream(fieldName, r);
int tokenCount=0;
// for every token
- TermAttribute termAtt = ts.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
while (ts.incrementToken()) {
- String word = termAtt.term();
+ String word = termAtt.toString();
tokenCount++;
if(tokenCount>maxNumTokensParsed)
{
Index: lucene/contrib/queries/src/java/org/apache/lucene/search/similar/SimilarityQueries.java
===================================================================
--- lucene/contrib/queries/src/java/org/apache/lucene/search/similar/SimilarityQueries.java (revision 948225)
+++ lucene/contrib/queries/src/java/org/apache/lucene/search/similar/SimilarityQueries.java (working copy)
@@ -22,7 +22,7 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -86,12 +86,12 @@
throws IOException
{
TokenStream ts = a.tokenStream( field, new StringReader( body));
- TermAttribute termAtt = ts.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
BooleanQuery tmp = new BooleanQuery();
Set already = new HashSet(); // ignore dups
while (ts.incrementToken()) {
- String word = termAtt.term();
+ String word = termAtt.toString();
// ignore opt stop words
if ( stop != null &&
stop.contains( word)) continue;
Index: lucene/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java
===================================================================
--- lucene/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java (revision 948225)
+++ lucene/contrib/queries/src/java/org/apache/lucene/search/FuzzyLikeThisQuery.java (working copy)
@@ -26,7 +26,7 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.util.BytesRef;
@@ -185,14 +185,14 @@
{
if(f.queryString==null) return;
TokenStream ts=analyzer.tokenStream(f.fieldName,new StringReader(f.queryString));
- TermAttribute termAtt = ts.addAttribute(TermAttribute.class);
+ CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
int corpusNumDocs=reader.numDocs();
Term internSavingTemplateTerm =new Term(f.fieldName); //optimization to avoid constructing new Term() objects
HashSet processedTerms=new HashSet();
while (ts.incrementToken())
{
- String term = termAtt.term();
+ String term = termAtt.toString();
if(!processedTerms.contains(term))
{
processedTerms.add(term);