Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java (working copy)
@@ -25,7 +25,6 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.StopFilter;
@@ -162,14 +161,16 @@
this(matchVersion, WordlistLoader.getWordSet( stopwords, STOPWORDS_COMMENT));
}
-
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the provided {@link Reader}.
- *
- * @return {@link TokenStreamComponents} built from an {@link ArabicLetterTokenizer} filtered with
- * {@link LowerCaseFilter}, {@link StopFilter}, {@link ArabicNormalizationFilter},
- * {@link KeywordMarkerTokenFilter} if a stem exclusion set is provided
- * and {@link ArabicStemFilter}.
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
+ *
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link ArabicLetterTokenizer} filtered with
+ * {@link LowerCaseFilter}, {@link StopFilter},
+ * {@link ArabicNormalizationFilter}, {@link KeywordMarkerTokenFilter}
+ * if a stem exclusion set is provided and {@link ArabicStemFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/bg/BulgarianAnalyzer.java (working copy)
@@ -24,7 +24,6 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.StopFilter;
@@ -117,15 +116,18 @@
super(matchVersion, stopwords);
this.stemExclusionSet = CharArraySet.unmodifiableSet(CharArraySet.copy(
matchVersion, stemExclusionSet)); }
-
+
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link BulgarianStemFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link BulgarianStemFilter}.
*/
@Override
public TokenStreamComponents createComponents(String fieldName, Reader reader) {
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java (working copy)
@@ -29,7 +29,6 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
@@ -191,12 +190,16 @@
excltable = WordlistLoader.getWordSet( exclusionlist );
setPreviousTokenStream(null); // force a new stemmer to be created
}
+
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the provided {@link Reader}.
- *
- * @return {@link TokenStreamComponents} built from a {@link StandardTokenizer} filtered with
- * {@link LowerCaseFilter}, {@link StandardFilter}, {@link StopFilter}, and
- * {@link BrazilianStemFilter}.
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
+ *
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link StandardTokenizer} filtered with
+ * {@link LowerCaseFilter}, {@link StandardFilter}, {@link StopFilter}
+ * , and {@link BrazilianStemFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java (working copy)
@@ -19,7 +19,6 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.Tokenizer;
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKTokenizer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKTokenizer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKTokenizer.java (working copy)
@@ -197,7 +197,7 @@
|| (ub == Character.UnicodeBlock.HALFWIDTH_AND_FULLWIDTH_FORMS)
) {
if (ub == Character.UnicodeBlock.HALFWIDTH_AND_FULLWIDTH_FORMS) {
- int i = (int) c;
+ int i = c;
if (i >= 65281 && i <= 65374) {
// convert certain HALFWIDTH_AND_FULLWIDTH_FORMS to BASIC_LATIN
i = i - 65248;
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/cn/ChineseAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/cn/ChineseAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/cn/ChineseAnalyzer.java (working copy)
@@ -20,7 +20,6 @@
import java.io.Reader;
import org.apache.lucene.analysis.ReusableAnalyzerBase;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.standard.StandardAnalyzer; // javadoc @link
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Tokenizer;
@@ -35,11 +34,13 @@
public final class ChineseAnalyzer extends ReusableAnalyzerBase {
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the
- * provided {@link Reader}.
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
*
- * @return {@link TokenStreamComponents} built from a
- * {@link ChineseTokenizer} filtered with {@link ChineseFilter}
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link ChineseTokenizer} filtered with
+ * {@link ChineseFilter}
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilter.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilter.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/HyphenationCompoundWordTokenFilter.java (working copy)
@@ -113,7 +113,7 @@
* strings.
*/
public HyphenationCompoundWordTokenFilter(Version matchVersion, TokenStream input,
- HyphenationTree hyphenator, Set dictionary) {
+ HyphenationTree hyphenator, Set> dictionary) {
this(input, hyphenator, dictionary, DEFAULT_MIN_WORD_SIZE,
DEFAULT_MIN_SUBWORD_SIZE, DEFAULT_MAX_SUBWORD_SIZE, false);
}
@@ -145,7 +145,7 @@
* Add only the longest matching subword to the stream
*/
public HyphenationCompoundWordTokenFilter(Version matchVersion, TokenStream input,
- HyphenationTree hyphenator, Set dictionary, int minWordSize,
+ HyphenationTree hyphenator, Set> dictionary, int minWordSize,
int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
super(matchVersion, input, dictionary, minWordSize, minSubwordSize, maxSubwordSize,
onlyLongestMatch);
@@ -201,7 +201,7 @@
*/
@Deprecated
public HyphenationCompoundWordTokenFilter(TokenStream input,
- HyphenationTree hyphenator, Set dictionary) {
+ HyphenationTree hyphenator, Set> dictionary) {
this(Version.LUCENE_30, input, hyphenator, dictionary, DEFAULT_MIN_WORD_SIZE,
DEFAULT_MIN_SUBWORD_SIZE, DEFAULT_MAX_SUBWORD_SIZE, false);
}
@@ -223,7 +223,7 @@
*/
@Deprecated
public HyphenationCompoundWordTokenFilter(TokenStream input,
- HyphenationTree hyphenator, Set dictionary, int minWordSize,
+ HyphenationTree hyphenator, Set> dictionary, int minWordSize,
int minSubwordSize, int maxSubwordSize, boolean onlyLongestMatch) {
super(Version.LUCENE_30, input, dictionary, minWordSize, minSubwordSize, maxSubwordSize,
onlyLongestMatch);
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/CharVector.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/CharVector.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/CharVector.java (working copy)
@@ -83,7 +83,7 @@
@Override
public Object clone() {
- CharVector cv = new CharVector((char[]) array.clone(), blockSize);
+ CharVector cv = new CharVector(array.clone(), blockSize);
cv.n = this.n;
return cv;
}
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/Hyphenation.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/Hyphenation.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/Hyphenation.java (working copy)
@@ -27,11 +27,6 @@
private int[] hyphenPoints;
/**
- * number of hyphenation points in word
- */
- private int len;
-
- /**
* rawWord as made of alternating strings and {@link Hyphen Hyphen} instances
*/
Hyphenation(int[] points) {
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/HyphenationTree.java (working copy)
@@ -44,7 +44,7 @@
/**
* This map stores hyphenation exceptions
*/
- protected HashMap stoplist;
+ protected HashMap> stoplist;
/**
* This map stores the character classes
@@ -57,7 +57,7 @@
private transient TernaryTree ivalues;
public HyphenationTree() {
- stoplist = new HashMap(23); // usually a small table
+ stoplist = new HashMap>(23); // usually a small table
classmap = new TernaryTree();
vspace = new ByteVector();
vspace.alloc(1); // this reserves index 0, which we don't use
@@ -363,7 +363,7 @@
if (stoplist.containsKey(sw)) {
// assume only simple hyphens (Hyphen.pre="-", Hyphen.post = Hyphen.no =
// null)
- ArrayList hw = stoplist.get(sw);
+ ArrayList hw = stoplist.get(sw);
int j = 0;
for (i = 0; i < hw.size(); i++) {
Object o = hw.get(i);
@@ -443,7 +443,7 @@
* @param hyphenatedword a vector of alternating strings and
* {@link Hyphen hyphen} objects.
*/
- public void addException(String word, ArrayList hyphenatedword) {
+ public void addException(String word, ArrayList hyphenatedword) {
stoplist.put(word, hyphenatedword);
}
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternConsumer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternConsumer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternConsumer.java (working copy)
@@ -42,7 +42,7 @@
* his own hyphenation. A hyphenatedword is a vector of alternating String's
* and {@link Hyphen Hyphen} instances
*/
- void addException(String word, ArrayList hyphenatedword);
+ void addException(String word, ArrayList hyphenatedword);
/**
* Add hyphenation patterns.
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/PatternParser.java (working copy)
@@ -51,7 +51,7 @@
StringBuilder token;
- ArrayList exception;
+ ArrayList exception;
char hyphenChar;
@@ -199,8 +199,8 @@
return pat.toString();
}
- protected ArrayList normalizeException(ArrayList ex) {
- ArrayList res = new ArrayList();
+ protected ArrayList normalizeException(ArrayList> ex) {
+ ArrayList res = new ArrayList();
for (int i = 0; i < ex.size(); i++) {
Object item = ex.get(i);
if (item instanceof String) {
@@ -230,7 +230,7 @@
return res;
}
- protected String getExceptionWord(ArrayList ex) {
+ protected String getExceptionWord(ArrayList> ex) {
StringBuilder res = new StringBuilder();
for (int i = 0; i < ex.size(); i++) {
Object item = ex.get(i);
@@ -291,7 +291,7 @@
currElement = ELEM_PATTERNS;
} else if (local.equals("exceptions")) {
currElement = ELEM_EXCEPTIONS;
- exception = new ArrayList();
+ exception = new ArrayList();
} else if (local.equals("hyphen")) {
if (token.length() > 0) {
exception.add(token.toString());
@@ -308,6 +308,7 @@
* java.lang.String, java.lang.String)
*/
@Override
+ @SuppressWarnings("unchecked")
public void endElement(String uri, String local, String raw) {
if (token.length() > 0) {
@@ -319,7 +320,7 @@
case ELEM_EXCEPTIONS:
exception.add(word);
exception = normalizeException(exception);
- consumer.addException(getExceptionWord(exception),
+ consumer.addException(getExceptionWord(exception),
(ArrayList) exception.clone());
break;
case ELEM_PATTERNS:
@@ -344,6 +345,7 @@
/**
* @see org.xml.sax.ContentHandler#characters(char[], int, int)
*/
+ @SuppressWarnings("unchecked")
@Override
public void characters(char ch[], int start, int length) {
StringBuffer chars = new StringBuffer(length);
@@ -428,7 +430,7 @@
System.out.println("class: " + c);
}
- public void addException(String w, ArrayList e) {
+ public void addException(String w, ArrayList e) {
System.out.println("exception: " + w + " : " + e.toString());
}
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/compound/hyphenation/TernaryTree.java (working copy)
@@ -249,7 +249,7 @@
public static int strcmp(String str, char[] a, int start) {
int i, d, len = str.length();
for (i = 0; i < len; i++) {
- d = (int) str.charAt(i) - a[start + i];
+ d = str.charAt(i) - a[start + i];
if (d != 0) {
return d;
}
@@ -258,7 +258,7 @@
}
}
if (a[start + i] != 0) {
- return (int) -a[start + i];
+ return -a[start + i];
}
return 0;
@@ -351,10 +351,10 @@
@Override
public Object clone() {
TernaryTree t = new TernaryTree();
- t.lo = (char[]) this.lo.clone();
- t.hi = (char[]) this.hi.clone();
- t.eq = (char[]) this.eq.clone();
- t.sc = (char[]) this.sc.clone();
+ t.lo = this.lo.clone();
+ t.hi = this.hi.clone();
+ t.eq = this.eq.clone();
+ t.sc = this.sc.clone();
t.kv = (CharVector) this.kv.clone();
t.root = this.root;
t.freenode = this.freenode;
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java (working copy)
@@ -18,7 +18,6 @@
*/
import org.apache.lucene.analysis.ReusableAnalyzerBase;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
@@ -216,16 +215,20 @@
stoptable = Collections.emptySet();
}
}
+
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the provided
- * {@link Reader}.
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
*
- * @return {@link TokenStreamComponents} built from a {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, and {@link CzechStemFilter} (only if version is
- * >= LUCENE_31). If a version is >= LUCENE_31 and a stem exclusion set
- * is provided via {@link #CzechAnalyzer(Version, Set, Set)} a
- * {@link KeywordMarkerTokenFilter} is added before {@link CzechStemFilter}.
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , and {@link CzechStemFilter} (only if version is >= LUCENE_31). If
+ * a version is >= LUCENE_31 and a stem exclusion set is provided via
+ * {@link #CzechAnalyzer(Version, Set, Set)} a
+ * {@link KeywordMarkerTokenFilter} is added before
+ * {@link CzechStemFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/da/DanishAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/da/DanishAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/da/DanishAnalyzer.java (working copy)
@@ -30,7 +30,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java (working copy)
@@ -29,7 +29,6 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
@@ -222,16 +221,17 @@
exclusionSet = WordlistLoader.getWordSet(exclusionlist);
setPreviousTokenStream(null); // force a new stemmer to be created
}
-
+
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the
- * provided {@link Reader}.
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
*
- * @return {@link TokenStreamComponents} built from a
- * {@link StandardTokenizer} filtered with {@link StandardFilter},
- * {@link LowerCaseFilter}, {@link StopFilter},
- * {@link KeywordMarkerTokenFilter} if a stem exclusion set is provided, and
- * {@link SnowballFilter}
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided, and {@link SnowballFilter}
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java (working copy)
@@ -19,7 +19,6 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
@@ -120,15 +119,17 @@
{
this(matchVersion, stopwords.keySet());
}
-
- /**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the
- * provided {@link Reader}.
- *
- * @return {@link TokenStreamComponents} built from a
- * {@link StandardTokenizer} filtered with
- * {@link GreekLowerCaseFilter}, {@link StandardFilter} and {@link StopFilter}
- */
+
+ /**
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
+ *
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link StandardTokenizer} filtered with
+ * {@link GreekLowerCaseFilter}, {@link StandardFilter} and
+ * {@link StopFilter}
+ */
@Override
protected TokenStreamComponents createComponents(String fieldName,
Reader reader) {
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/en/EnglishAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/en/EnglishAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/en/EnglishAnalyzer.java (working copy)
@@ -29,7 +29,6 @@
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -90,13 +89,16 @@
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link PorterStemFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link PorterStemFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/es/SpanishAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/es/SpanishAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/es/SpanishAnalyzer.java (working copy)
@@ -30,7 +30,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/fa/PersianAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/fa/PersianAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/fa/PersianAnalyzer.java (working copy)
@@ -25,7 +25,6 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
@@ -136,12 +135,13 @@
}
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the provided
- * {@link Reader}.
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
*
- * @return {@link TokenStreamComponents} built from a {@link ArabicLetterTokenizer}
- * filtered with {@link LowerCaseFilter},
- * {@link ArabicNormalizationFilter},
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link ArabicLetterTokenizer} filtered with
+ * {@link LowerCaseFilter}, {@link ArabicNormalizationFilter},
* {@link PersianNormalizationFilter} and Persian Stop words
*/
@Override
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/fi/FinnishAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/fi/FinnishAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/fi/FinnishAnalyzer.java (working copy)
@@ -30,7 +30,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java (working copy)
@@ -20,7 +20,6 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.StopwordAnalyzerBase;
@@ -225,14 +224,16 @@
}
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the provided
- * {@link Reader}.
- *
- * @return {@link TokenStreamComponents} built from a {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link ElisionFilter},
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
+ *
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link ElisionFilter},
* {@link LowerCaseFilter}, {@link StopFilter},
- * {@link KeywordMarkerTokenFilter} if a stem exclusion set is provided,
- * and {@link SnowballFilter}
+ * {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided, and {@link SnowballFilter}
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/hi/HindiAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/hi/HindiAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/hi/HindiAnalyzer.java (working copy)
@@ -22,7 +22,6 @@
import java.util.Set;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.StopFilter;
@@ -106,15 +105,16 @@
}
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the provided
- * {@link Reader}.
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
*
- * @return {@link TokenStreamComponents} built from a {@link IndicTokenizer}
- * filtered with {@link LowerCaseFilter},
- * {@link IndicNormalizationFilter},
- * {@link HindiNormalizationFilter},
- * {@link KeywordMarkerTokenFilter} if a stem exclusion set is provided,
- * {@link HindiStemFilter}, and Hindi Stop words
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link IndicTokenizer} filtered with
+ * {@link LowerCaseFilter}, {@link IndicNormalizationFilter},
+ * {@link HindiNormalizationFilter}, {@link KeywordMarkerTokenFilter}
+ * if a stem exclusion set is provided, {@link HindiStemFilter}, and
+ * Hindi Stop words
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/hu/HungarianAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/hu/HungarianAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/hu/HungarianAnalyzer.java (working copy)
@@ -30,7 +30,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/it/ItalianAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/it/ItalianAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/it/ItalianAnalyzer.java (working copy)
@@ -30,7 +30,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/miscellaneous/PatternAnalyzer.java (working copy)
@@ -311,7 +311,7 @@
return new String(output, 0, len);
} finally {
- if (input != null) input.close();
+ input.close();
}
}
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.java (working copy)
@@ -124,7 +124,7 @@
if (!input.incrementToken()) {
return false;
} else {
- curTermBuffer = (char[]) termAtt.termBuffer().clone();
+ curTermBuffer = termAtt.termBuffer().clone();
curTermLength = termAtt.termLength();
curGramSize = minGram;
tokStart = offsetAtt.startOffset();
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/ngram/NGramTokenFilter.java (working copy)
@@ -79,7 +79,7 @@
if (!input.incrementToken()) {
return false;
} else {
- curTermBuffer = (char[]) termAtt.termBuffer().clone();
+ curTermBuffer = termAtt.termBuffer().clone();
curTermLength = termAtt.termLength();
curGramSize = minGram;
curPos = 0;
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/no/NorwegianAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/no/NorwegianAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/no/NorwegianAnalyzer.java (working copy)
@@ -30,7 +30,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/pt/PortugueseAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/pt/PortugueseAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/pt/PortugueseAnalyzer.java (working copy)
@@ -30,7 +30,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java (working copy)
@@ -192,7 +192,7 @@
* if there stopwords, it is a StopFilter around wrapped.
*/
TokenStream withStopFilter;
- };
+ }
@Override
public TokenStream reusableTokenStream(String fieldName, Reader reader)
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/ro/RomanianAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/ro/RomanianAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/ro/RomanianAnalyzer.java (working copy)
@@ -29,7 +29,6 @@
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -110,13 +109,16 @@
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java (working copy)
@@ -26,7 +26,6 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -160,16 +159,17 @@
this(matchVersion, stopwords.keySet());
}
- /**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the
- * provided {@link Reader}.
- *
- * @return {@link TokenStreamComponents} built from a
- * {@link StandardTokenizer} filtered with {@link StandardFilter},
- * {@link LowerCaseFilter}, {@link StopFilter},
- * {@link KeywordMarkerTokenFilter} if a stem exclusion set is provided,
- * and {@link SnowballFilter}
- */
+ /**
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
+ *
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided, and {@link SnowballFilter}
+ */
@Override
protected TokenStreamComponents createComponents(String fieldName,
Reader reader) {
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/ru/RussianStemmer.java (working copy)
@@ -26,7 +26,7 @@
class RussianStemmer
{
// positions of RV, R1 and R2 respectively
- private int RV, R1, R2;
+ private int RV, /*R1,*/ R2;
// letters (currently unused letters are commented out)
private final static char A = '\u0430';
@@ -263,11 +263,7 @@
if (!findAndRemoveEnding(stemmingZone, adjectiveEndings))
return false;
// if adjective ending was found, try for participle ending.
- // variable r is unused, we are just interested in the side effect of
- // findAndRemoveEnding():
- boolean r =
- findAndRemoveEnding(stemmingZone, participleEndings1, participle1Predessors)
- ||
+ if (!findAndRemoveEnding(stemmingZone, participleEndings1, participle1Predessors))
findAndRemoveEnding(stemmingZone, participleEndings2);
return true;
}
@@ -391,7 +387,7 @@
private void markPositions(String word)
{
RV = 0;
- R1 = 0;
+// R1 = 0;
R2 = 0;
int i = 0;
// find RV
@@ -409,7 +405,7 @@
}
if (word.length() - 1 < ++i)
return; // R1 zone is empty
- R1 = i;
+// R1 = i;
// find R2
while (word.length() > i && !isVowel(word.charAt(i)))
{
@@ -532,13 +528,9 @@
if (!perfectiveGerund(stemmingZone))
{
reflexive(stemmingZone);
- // variable r is unused, we are just interested in the flow that gets
- // created by logical expression: apply adjectival(); if that fails,
- // apply verb() etc
- boolean r =
- adjectival(stemmingZone)
- || verb(stemmingZone)
- || noun(stemmingZone);
+ if (!adjectival(stemmingZone))
+ if (!verb(stemmingZone))
+ noun(stemmingZone);
}
// Step 2
removeI(stemmingZone);
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleFilter.java (working copy)
@@ -391,8 +391,8 @@
}
/**
- * {@see #advance()}
* @return the current value.
+ * @see #advance()
*/
public int getValue() {
return value;
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/shingle/ShingleMatrixFilter.java (working copy)
@@ -30,7 +30,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.EmptyTokenStream;
import org.apache.lucene.analysis.payloads.PayloadHelper;
-import org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix.Column;
import org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix.Column.Row;
import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java (working copy)
@@ -19,7 +19,6 @@
import java.text.DateFormat;
import java.text.ParseException;
-import java.text.SimpleDateFormat;
import java.util.Date;
import org.apache.lucene.analysis.TeeSinkTokenFilter.SinkFilter;
@@ -42,7 +41,7 @@
* Uses {@link java.text.SimpleDateFormat#getDateInstance()} as the {@link java.text.DateFormat} object.
*/
public DateRecognizerSinkFilter() {
- this(SimpleDateFormat.getDateInstance());
+ this(DateFormat.getDateInstance());
}
public DateRecognizerSinkFilter(DateFormat dateFormat) {
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/sv/SwedishAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/sv/SwedishAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/sv/SwedishAnalyzer.java (working copy)
@@ -30,7 +30,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WordlistLoader;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -106,13 +105,16 @@
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link LowerCaseFilter},
- * {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
- * exclusion set is provided and {@link SnowballFilter}.
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
+ * , {@link KeywordMarkerTokenFilter} if a stem exclusion set is
+ * provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java (working copy)
@@ -19,7 +19,6 @@
import java.io.Reader;
import org.apache.lucene.analysis.ReusableAnalyzerBase;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.StopFilter;
@@ -45,12 +44,14 @@
}
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the
- * provided {@link Reader}.
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
*
- * @return {@link TokenStreamComponents} built from a
- * {@link StandardTokenizer} filtered with {@link StandardFilter},
- * {@link ThaiWordFilter}, and {@link StopFilter}
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link ThaiWordFilter}, and
+ * {@link StopFilter}
*/
@Override
protected TokenStreamComponents createComponents(String fieldName,
Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/tr/TurkishAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/java/org/apache/lucene/analysis/tr/TurkishAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/tr/TurkishAnalyzer.java (working copy)
@@ -28,7 +28,6 @@
import org.apache.lucene.analysis.StopwordAnalyzerBase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -109,11 +108,14 @@
}
/**
- * Creates a {@link TokenStreamComponents} which tokenizes all the text in the provided
- * {@link Reader}.
+ * Creates a
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * which tokenizes all the text in the provided {@link Reader}.
*
- * @return A {@link TokenStreamComponents} built from an {@link StandardTokenizer}
- * filtered with {@link StandardFilter}, {@link TurkishLowerCaseFilter},
+ * @return A
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from an {@link StandardTokenizer} filtered with
+ * {@link StandardFilter}, {@link TurkishLowerCaseFilter},
* {@link StopFilter}, {@link KeywordMarkerTokenFilter} if a stem
* exclusion set is provided and {@link SnowballFilter}.
*/
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/Among.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/Among.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/Among.java (working copy)
@@ -60,4 +60,4 @@
public final Method method; /* method to use if substring matches */
public final SnowballProgram methodobject; /* object to invoke method on */
-};
+}
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/SnowballProgram.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/SnowballProgram.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/SnowballProgram.java (working copy)
@@ -270,7 +270,7 @@
@Deprecated
protected boolean eq_v(StringBuilder s)
{
- return eq_s(s.length(), (CharSequence)s);
+ return eq_s(s.length(), s);
}
protected boolean eq_v_b(CharSequence s)
@@ -280,7 +280,7 @@
/** @deprecated for binary back compat. Will be removed in Lucene 4.0 */
@Deprecated
protected boolean eq_v_b(StringBuilder s)
- { return eq_s_b(s.length(), (CharSequence)s);
+ { return eq_s_b(s.length(), s);
}
protected int find_among(Among v[], int v_size)
@@ -562,5 +562,5 @@
}
*/
-};
+}
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/TestApp.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/TestApp.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/TestApp.java (working copy)
@@ -54,7 +54,7 @@
return;
}
- Class stemClass = Class.forName("org.tartarus.snowball.ext." +
+ Class> stemClass = Class.forName("org.tartarus.snowball.ext." +
args[0] + "Stemmer");
SnowballProgram stemmer = (SnowballProgram) stemClass.newInstance();
Method stemMethod = stemClass.getMethod("stem", new Class[0]);
@@ -89,7 +89,7 @@
int character;
while ((character = reader.read()) != -1) {
char ch = (char) character;
- if (Character.isWhitespace((char) ch)) {
+ if (Character.isWhitespace(ch)) {
if (input.length() > 0) {
stemmer.setCurrent(input.toString());
for (int i = repeat; i != 0; i--) {
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/DanishStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/DanishStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/DanishStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class DanishStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -252,14 +253,14 @@
// [, line 66
ket = cursor;
// literal, line 66
- if (!(eq_s_b(2, "st")))
+ if (!(eq_s_b(2, (CharSequence) "st")))
{
break lab0;
}
// ], line 66
bra = cursor;
// literal, line 66
- if (!(eq_s_b(2, "ig")))
+ if (!(eq_s_b(2, (CharSequence) "ig")))
{
break lab0;
}
@@ -312,7 +313,7 @@
case 2:
// (, line 72
// <-, line 72
- slice_from("l\u00F8s");
+ slice_from((CharSequence) "l\u00F8s");
break;
}
return true;
@@ -347,7 +348,7 @@
S_ch = slice_to(S_ch);
limit_backward = v_2;
// name ch, line 77
- if (!(eq_v_b(S_ch)))
+ if (!(eq_v_b((CharSequence) S_ch)))
{
return false;
}
@@ -356,6 +357,7 @@
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/DutchStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/DutchStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/DutchStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class DutchStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -109,27 +110,27 @@
case 1:
// (, line 45
// <-, line 45
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 2:
// (, line 47
// <-, line 47
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 3:
// (, line 49
// <-, line 49
- slice_from("i");
+ slice_from((CharSequence) "i");
break;
case 4:
// (, line 51
// <-, line 51
- slice_from("o");
+ slice_from((CharSequence) "o");
break;
case 5:
// (, line 53
// <-, line 53
- slice_from("u");
+ slice_from((CharSequence) "u");
break;
case 6:
// (, line 54
@@ -154,7 +155,7 @@
// [, line 57
bra = cursor;
// literal, line 57
- if (!(eq_s(1, "y")))
+ if (!(eq_s(1, (CharSequence) "y")))
{
cursor = v_3;
break lab2;
@@ -162,7 +163,7 @@
// ], line 57
ket = cursor;
// <-, line 57
- slice_from("Y");
+ slice_from((CharSequence) "Y");
} while (false);
// repeat, line 58
replab3: while(true)
@@ -187,7 +188,7 @@
lab8: do {
// (, line 59
// literal, line 59
- if (!(eq_s(1, "i")))
+ if (!(eq_s(1, (CharSequence) "i")))
{
break lab8;
}
@@ -198,20 +199,20 @@
break lab8;
}
// <-, line 59
- slice_from("I");
+ slice_from((CharSequence) "I");
break lab7;
} while (false);
cursor = v_6;
// (, line 60
// literal, line 60
- if (!(eq_s(1, "y")))
+ if (!(eq_s(1, (CharSequence) "y")))
{
break lab6;
}
// ], line 60
ket = cursor;
// <-, line 60
- slice_from("Y");
+ slice_from((CharSequence) "Y");
} while (false);
cursor = v_5;
break golab5;
@@ -340,12 +341,12 @@
case 1:
// (, line 78
// <-, line 78
- slice_from("y");
+ slice_from((CharSequence) "y");
break;
case 2:
// (, line 79
// <-, line 79
- slice_from("i");
+ slice_from((CharSequence) "i");
break;
case 3:
// (, line 80
@@ -415,7 +416,7 @@
// [, line 96
ket = cursor;
// literal, line 96
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
return false;
}
@@ -466,7 +467,7 @@
v_2 = limit - cursor;
lab0: do {
// literal, line 102
- if (!(eq_s_b(3, "gem")))
+ if (!(eq_s_b(3, (CharSequence) "gem")))
{
break lab0;
}
@@ -522,7 +523,7 @@
break lab0;
}
// <-, line 110
- slice_from("heid");
+ slice_from((CharSequence) "heid");
break;
case 2:
// (, line 113
@@ -566,7 +567,7 @@
// [, line 122
ket = cursor;
// literal, line 122
- if (!(eq_s_b(4, "heid")))
+ if (!(eq_s_b(4, (CharSequence) "heid")))
{
break lab2;
}
@@ -582,7 +583,7 @@
v_4 = limit - cursor;
lab3: do {
// literal, line 122
- if (!(eq_s_b(1, "c")))
+ if (!(eq_s_b(1, (CharSequence) "c")))
{
break lab3;
}
@@ -595,7 +596,7 @@
// [, line 123
ket = cursor;
// literal, line 123
- if (!(eq_s_b(2, "en")))
+ if (!(eq_s_b(2, (CharSequence) "en")))
{
break lab2;
}
@@ -642,7 +643,7 @@
// [, line 130
ket = cursor;
// literal, line 130
- if (!(eq_s_b(2, "ig")))
+ if (!(eq_s_b(2, (CharSequence) "ig")))
{
break lab6;
}
@@ -658,7 +659,7 @@
v_7 = limit - cursor;
lab7: do {
// literal, line 130
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
break lab7;
}
@@ -690,7 +691,7 @@
v_8 = limit - cursor;
lab8: do {
// literal, line 133
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
break lab8;
}
@@ -782,6 +783,7 @@
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/EnglishStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/EnglishStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/EnglishStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class EnglishStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -185,7 +186,7 @@
// [, line 27
bra = cursor;
// literal, line 27
- if (!(eq_s(1, "'")))
+ if (!(eq_s(1, (CharSequence) "'")))
{
break lab0;
}
@@ -202,14 +203,14 @@
// [, line 28
bra = cursor;
// literal, line 28
- if (!(eq_s(1, "y")))
+ if (!(eq_s(1, (CharSequence) "y")))
{
break lab1;
}
// ], line 28
ket = cursor;
// <-, line 28
- slice_from("Y");
+ slice_from((CharSequence) "Y");
// set Y_found, line 28
B_Y_found = true;
} while (false);
@@ -236,7 +237,7 @@
// [, line 29
bra = cursor;
// literal, line 29
- if (!(eq_s(1, "y")))
+ if (!(eq_s(1, (CharSequence) "y")))
{
break lab6;
}
@@ -253,7 +254,7 @@
cursor++;
}
// <-, line 29
- slice_from("Y");
+ slice_from((CharSequence) "Y");
// set Y_found, line 29
B_Y_found = true;
continue replab3;
@@ -467,7 +468,7 @@
case 1:
// (, line 66
// <-, line 66
- slice_from("ss");
+ slice_from((CharSequence) "ss");
break;
case 2:
// (, line 68
@@ -486,12 +487,12 @@
cursor = c;
}
// <-, line 68
- slice_from("i");
+ slice_from((CharSequence) "i");
break lab1;
} while (false);
cursor = limit - v_2;
// <-, line 68
- slice_from("ie");
+ slice_from((CharSequence) "ie");
} while (false);
break;
case 3:
@@ -552,7 +553,7 @@
return false;
}
// <-, line 77
- slice_from("ee");
+ slice_from((CharSequence) "ee");
break;
case 2:
// (, line 79
@@ -594,7 +595,7 @@
// <+, line 83
{
int c = cursor;
- insert(cursor, cursor, "e");
+ insert(cursor, cursor, (CharSequence) "e");
cursor = c;
}
break;
@@ -631,7 +632,7 @@
// <+, line 87
{
int c = cursor;
- insert(cursor, cursor, "e");
+ insert(cursor, cursor, (CharSequence) "e");
cursor = c;
}
break;
@@ -652,7 +653,7 @@
v_1 = limit - cursor;
lab1: do {
// literal, line 94
- if (!(eq_s_b(1, "y")))
+ if (!(eq_s_b(1, (CharSequence) "y")))
{
break lab1;
}
@@ -660,7 +661,7 @@
} while (false);
cursor = limit - v_1;
// literal, line 94
- if (!(eq_s_b(1, "Y")))
+ if (!(eq_s_b(1, (CharSequence) "Y")))
{
return false;
}
@@ -685,7 +686,7 @@
cursor = limit - v_2;
}
// <-, line 96
- slice_from("i");
+ slice_from((CharSequence) "i");
return true;
}
@@ -713,82 +714,82 @@
case 1:
// (, line 101
// <-, line 101
- slice_from("tion");
+ slice_from((CharSequence) "tion");
break;
case 2:
// (, line 102
// <-, line 102
- slice_from("ence");
+ slice_from((CharSequence) "ence");
break;
case 3:
// (, line 103
// <-, line 103
- slice_from("ance");
+ slice_from((CharSequence) "ance");
break;
case 4:
// (, line 104
// <-, line 104
- slice_from("able");
+ slice_from((CharSequence) "able");
break;
case 5:
// (, line 105
// <-, line 105
- slice_from("ent");
+ slice_from((CharSequence) "ent");
break;
case 6:
// (, line 107
// <-, line 107
- slice_from("ize");
+ slice_from((CharSequence) "ize");
break;
case 7:
// (, line 109
// <-, line 109
- slice_from("ate");
+ slice_from((CharSequence) "ate");
break;
case 8:
// (, line 111
// <-, line 111
- slice_from("al");
+ slice_from((CharSequence) "al");
break;
case 9:
// (, line 112
// <-, line 112
- slice_from("ful");
+ slice_from((CharSequence) "ful");
break;
case 10:
// (, line 114
// <-, line 114
- slice_from("ous");
+ slice_from((CharSequence) "ous");
break;
case 11:
// (, line 116
// <-, line 116
- slice_from("ive");
+ slice_from((CharSequence) "ive");
break;
case 12:
// (, line 118
// <-, line 118
- slice_from("ble");
+ slice_from((CharSequence) "ble");
break;
case 13:
// (, line 119
// literal, line 119
- if (!(eq_s_b(1, "l")))
+ if (!(eq_s_b(1, (CharSequence) "l")))
{
return false;
}
// <-, line 119
- slice_from("og");
+ slice_from((CharSequence) "og");
break;
case 14:
// (, line 120
// <-, line 120
- slice_from("ful");
+ slice_from((CharSequence) "ful");
break;
case 15:
// (, line 121
// <-, line 121
- slice_from("less");
+ slice_from((CharSequence) "less");
break;
case 16:
// (, line 122
@@ -827,22 +828,22 @@
case 1:
// (, line 128
// <-, line 128
- slice_from("tion");
+ slice_from((CharSequence) "tion");
break;
case 2:
// (, line 129
// <-, line 129
- slice_from("ate");
+ slice_from((CharSequence) "ate");
break;
case 3:
// (, line 130
// <-, line 130
- slice_from("al");
+ slice_from((CharSequence) "al");
break;
case 4:
// (, line 132
// <-, line 132
- slice_from("ic");
+ slice_from((CharSequence) "ic");
break;
case 5:
// (, line 134
@@ -897,7 +898,7 @@
v_1 = limit - cursor;
lab1: do {
// literal, line 145
- if (!(eq_s_b(1, "s")))
+ if (!(eq_s_b(1, (CharSequence) "s")))
{
break lab1;
}
@@ -905,7 +906,7 @@
} while (false);
cursor = limit - v_1;
// literal, line 145
- if (!(eq_s_b(1, "t")))
+ if (!(eq_s_b(1, (CharSequence) "t")))
{
return false;
}
@@ -980,7 +981,7 @@
return false;
}
// literal, line 152
- if (!(eq_s_b(1, "l")))
+ if (!(eq_s_b(1, (CharSequence) "l")))
{
return false;
}
@@ -1034,57 +1035,57 @@
case 1:
// (, line 174
// <-, line 174
- slice_from("ski");
+ slice_from((CharSequence) "ski");
break;
case 2:
// (, line 175
// <-, line 175
- slice_from("sky");
+ slice_from((CharSequence) "sky");
break;
case 3:
// (, line 176
// <-, line 176
- slice_from("die");
+ slice_from((CharSequence) "die");
break;
case 4:
// (, line 177
// <-, line 177
- slice_from("lie");
+ slice_from((CharSequence) "lie");
break;
case 5:
// (, line 178
// <-, line 178
- slice_from("tie");
+ slice_from((CharSequence) "tie");
break;
case 6:
// (, line 182
// <-, line 182
- slice_from("idl");
+ slice_from((CharSequence) "idl");
break;
case 7:
// (, line 183
// <-, line 183
- slice_from("gentl");
+ slice_from((CharSequence) "gentl");
break;
case 8:
// (, line 184
// <-, line 184
- slice_from("ugli");
+ slice_from((CharSequence) "ugli");
break;
case 9:
// (, line 185
// <-, line 185
- slice_from("earli");
+ slice_from((CharSequence) "earli");
break;
case 10:
// (, line 186
// <-, line 186
- slice_from("onli");
+ slice_from((CharSequence) "onli");
break;
case 11:
// (, line 187
// <-, line 187
- slice_from("singl");
+ slice_from((CharSequence) "singl");
break;
}
return true;
@@ -1114,7 +1115,7 @@
// [, line 203
bra = cursor;
// literal, line 203
- if (!(eq_s(1, "Y")))
+ if (!(eq_s(1, (CharSequence) "Y")))
{
break lab3;
}
@@ -1131,7 +1132,7 @@
cursor++;
}
// <-, line 203
- slice_from("y");
+ slice_from((CharSequence) "y");
continue replab0;
} while (false);
cursor = v_1;
@@ -1140,6 +1141,7 @@
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/FinnishStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/FinnishStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/FinnishStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class FinnishStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -332,7 +333,7 @@
v_3 = limit - cursor;
lab0: do {
// literal, line 72
- if (!(eq_s_b(1, "k")))
+ if (!(eq_s_b(1, (CharSequence) "k")))
{
break lab0;
}
@@ -350,14 +351,14 @@
// [, line 74
ket = cursor;
// literal, line 74
- if (!(eq_s_b(3, "kse")))
+ if (!(eq_s_b(3, (CharSequence) "kse")))
{
return false;
}
// ], line 74
bra = cursor;
// <-, line 74
- slice_from("ksi");
+ slice_from((CharSequence) "ksi");
break;
case 3:
// (, line 78
@@ -410,7 +411,7 @@
private boolean r_VI() {
// (, line 93
// literal, line 93
- if (!(eq_s_b(1, "i")))
+ if (!(eq_s_b(1, (CharSequence) "i")))
{
return false;
}
@@ -459,7 +460,7 @@
case 1:
// (, line 98
// literal, line 98
- if (!(eq_s_b(1, "a")))
+ if (!(eq_s_b(1, (CharSequence) "a")))
{
return false;
}
@@ -467,7 +468,7 @@
case 2:
// (, line 99
// literal, line 99
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
return false;
}
@@ -475,7 +476,7 @@
case 3:
// (, line 100
// literal, line 100
- if (!(eq_s_b(1, "i")))
+ if (!(eq_s_b(1, (CharSequence) "i")))
{
return false;
}
@@ -483,7 +484,7 @@
case 4:
// (, line 101
// literal, line 101
- if (!(eq_s_b(1, "o")))
+ if (!(eq_s_b(1, (CharSequence) "o")))
{
return false;
}
@@ -491,7 +492,7 @@
case 5:
// (, line 102
// literal, line 102
- if (!(eq_s_b(1, "\u00E4")))
+ if (!(eq_s_b(1, (CharSequence) "\u00E4")))
{
return false;
}
@@ -499,7 +500,7 @@
case 6:
// (, line 103
// literal, line 103
- if (!(eq_s_b(1, "\u00F6")))
+ if (!(eq_s_b(1, (CharSequence) "\u00F6")))
{
return false;
}
@@ -525,7 +526,7 @@
} while (false);
cursor = limit - v_5;
// literal, line 112
- if (!(eq_s_b(2, "ie")))
+ if (!(eq_s_b(2, (CharSequence) "ie")))
{
cursor = limit - v_3;
break lab0;
@@ -557,7 +558,7 @@
case 9:
// (, line 121
// literal, line 121
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
return false;
}
@@ -610,7 +611,7 @@
v_3 = limit - cursor;
lab0: do {
// literal, line 146
- if (!(eq_s_b(2, "po")))
+ if (!(eq_s_b(2, (CharSequence) "po")))
{
break lab0;
}
@@ -681,7 +682,7 @@
// [, line 162
ket = cursor;
// literal, line 162
- if (!(eq_s_b(1, "t")))
+ if (!(eq_s_b(1, (CharSequence) "t")))
{
limit_backward = v_2;
return false;
@@ -733,7 +734,7 @@
v_6 = limit - cursor;
lab0: do {
// literal, line 167
- if (!(eq_s_b(2, "po")))
+ if (!(eq_s_b(2, (CharSequence) "po")))
{
break lab0;
}
@@ -825,7 +826,7 @@
// [, line 176
ket = cursor;
// literal, line 176
- if (!(eq_s_b(1, "j")))
+ if (!(eq_s_b(1, (CharSequence) "j")))
{
break lab2;
}
@@ -836,7 +837,7 @@
v_7 = limit - cursor;
lab4: do {
// literal, line 176
- if (!(eq_s_b(1, "o")))
+ if (!(eq_s_b(1, (CharSequence) "o")))
{
break lab4;
}
@@ -844,7 +845,7 @@
} while (false);
cursor = limit - v_7;
// literal, line 176
- if (!(eq_s_b(1, "u")))
+ if (!(eq_s_b(1, (CharSequence) "u")))
{
break lab2;
}
@@ -860,14 +861,14 @@
// [, line 177
ket = cursor;
// literal, line 177
- if (!(eq_s_b(1, "o")))
+ if (!(eq_s_b(1, (CharSequence) "o")))
{
break lab5;
}
// ], line 177
bra = cursor;
// literal, line 177
- if (!(eq_s_b(1, "j")))
+ if (!(eq_s_b(1, (CharSequence) "j")))
{
break lab5;
}
@@ -908,7 +909,7 @@
// -> x, line 179
S_x = slice_to(S_x);
// name x, line 179
- if (!(eq_v_b(S_x)))
+ if (!(eq_v_b((CharSequence) S_x)))
{
return false;
}
@@ -917,6 +918,7 @@
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/FrenchStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/FrenchStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/FrenchStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class FrenchStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -228,7 +229,7 @@
lab7: do {
// (, line 40
// literal, line 40
- if (!(eq_s(1, "u")))
+ if (!(eq_s(1, (CharSequence) "u")))
{
break lab7;
}
@@ -239,14 +240,14 @@
break lab7;
}
// <-, line 40
- slice_from("U");
+ slice_from((CharSequence) "U");
break lab6;
} while (false);
cursor = v_4;
lab8: do {
// (, line 41
// literal, line 41
- if (!(eq_s(1, "i")))
+ if (!(eq_s(1, (CharSequence) "i")))
{
break lab8;
}
@@ -257,20 +258,20 @@
break lab8;
}
// <-, line 41
- slice_from("I");
+ slice_from((CharSequence) "I");
break lab6;
} while (false);
cursor = v_4;
// (, line 42
// literal, line 42
- if (!(eq_s(1, "y")))
+ if (!(eq_s(1, (CharSequence) "y")))
{
break lab5;
}
// ], line 42
ket = cursor;
// <-, line 42
- slice_from("Y");
+ slice_from((CharSequence) "Y");
} while (false);
break lab4;
} while (false);
@@ -280,7 +281,7 @@
// [, line 45
bra = cursor;
// literal, line 45
- if (!(eq_s(1, "y")))
+ if (!(eq_s(1, (CharSequence) "y")))
{
break lab9;
}
@@ -291,27 +292,27 @@
break lab9;
}
// <-, line 45
- slice_from("Y");
+ slice_from((CharSequence) "Y");
break lab4;
} while (false);
cursor = v_3;
// (, line 47
// literal, line 47
- if (!(eq_s(1, "q")))
+ if (!(eq_s(1, (CharSequence) "q")))
{
break lab3;
}
// [, line 47
bra = cursor;
// literal, line 47
- if (!(eq_s(1, "u")))
+ if (!(eq_s(1, (CharSequence) "u")))
{
break lab3;
}
// ], line 47
ket = cursor;
// <-, line 47
- slice_from("U");
+ slice_from((CharSequence) "U");
} while (false);
cursor = v_2;
break golab2;
@@ -504,17 +505,17 @@
case 1:
// (, line 78
// <-, line 78
- slice_from("i");
+ slice_from((CharSequence) "i");
break;
case 2:
// (, line 79
// <-, line 79
- slice_from("u");
+ slice_from((CharSequence) "u");
break;
case 3:
// (, line 80
// <-, line 80
- slice_from("y");
+ slice_from((CharSequence) "y");
break;
case 4:
// (, line 81
@@ -611,7 +612,7 @@
// [, line 100
ket = cursor;
// literal, line 100
- if (!(eq_s_b(2, "ic")))
+ if (!(eq_s_b(2, (CharSequence) "ic")))
{
cursor = limit - v_1;
break lab0;
@@ -634,7 +635,7 @@
} while (false);
cursor = limit - v_2;
// <-, line 100
- slice_from("iqU");
+ slice_from((CharSequence) "iqU");
} while (false);
} while (false);
break;
@@ -646,7 +647,7 @@
return false;
}
// <-, line 104
- slice_from("log");
+ slice_from((CharSequence) "log");
break;
case 4:
// (, line 107
@@ -656,7 +657,7 @@
return false;
}
// <-, line 107
- slice_from("u");
+ slice_from((CharSequence) "u");
break;
case 5:
// (, line 110
@@ -666,7 +667,7 @@
return false;
}
// <-, line 110
- slice_from("ent");
+ slice_from((CharSequence) "ent");
break;
case 6:
// (, line 113
@@ -709,7 +710,7 @@
// [, line 117
ket = cursor;
// literal, line 117
- if (!(eq_s_b(2, "at")))
+ if (!(eq_s_b(2, (CharSequence) "at")))
{
cursor = limit - v_3;
break lab3;
@@ -750,7 +751,7 @@
break lab3;
}
// <-, line 118
- slice_from("eux");
+ slice_from((CharSequence) "eux");
} while (false);
break;
case 3:
@@ -773,7 +774,7 @@
break lab3;
}
// <-, line 122
- slice_from("i");
+ slice_from((CharSequence) "i");
break;
}
} while (false);
@@ -824,7 +825,7 @@
} while (false);
cursor = limit - v_6;
// <-, line 132
- slice_from("abl");
+ slice_from((CharSequence) "abl");
} while (false);
break;
case 2:
@@ -845,7 +846,7 @@
} while (false);
cursor = limit - v_7;
// <-, line 133
- slice_from("iqU");
+ slice_from((CharSequence) "iqU");
} while (false);
break;
case 3:
@@ -878,7 +879,7 @@
// [, line 142
ket = cursor;
// literal, line 142
- if (!(eq_s_b(2, "at")))
+ if (!(eq_s_b(2, (CharSequence) "at")))
{
cursor = limit - v_8;
break lab11;
@@ -896,7 +897,7 @@
// [, line 142
ket = cursor;
// literal, line 142
- if (!(eq_s_b(2, "ic")))
+ if (!(eq_s_b(2, (CharSequence) "ic")))
{
cursor = limit - v_8;
break lab11;
@@ -919,14 +920,14 @@
} while (false);
cursor = limit - v_9;
// <-, line 142
- slice_from("iqU");
+ slice_from((CharSequence) "iqU");
} while (false);
} while (false);
break;
case 9:
// (, line 144
// <-, line 144
- slice_from("eau");
+ slice_from((CharSequence) "eau");
break;
case 10:
// (, line 145
@@ -936,7 +937,7 @@
return false;
}
// <-, line 145
- slice_from("al");
+ slice_from((CharSequence) "al");
break;
case 11:
// (, line 147
@@ -962,7 +963,7 @@
return false;
}
// <-, line 147
- slice_from("eux");
+ slice_from((CharSequence) "eux");
} while (false);
break;
case 12:
@@ -989,7 +990,7 @@
// fail, line 155
// (, line 155
// <-, line 155
- slice_from("ant");
+ slice_from((CharSequence) "ant");
return false;
case 14:
// (, line 156
@@ -1001,7 +1002,7 @@
// fail, line 156
// (, line 156
// <-, line 156
- slice_from("ent");
+ slice_from((CharSequence) "ent");
return false;
case 15:
// (, line 158
@@ -1132,7 +1133,7 @@
// [, line 191
ket = cursor;
// literal, line 191
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
cursor = limit - v_3;
break lab0;
@@ -1163,7 +1164,7 @@
// [, line 199
ket = cursor;
// literal, line 199
- if (!(eq_s_b(1, "s")))
+ if (!(eq_s_b(1, (CharSequence) "s")))
{
cursor = limit - v_1;
break lab0;
@@ -1221,7 +1222,7 @@
v_5 = limit - cursor;
lab2: do {
// literal, line 202
- if (!(eq_s_b(1, "s")))
+ if (!(eq_s_b(1, (CharSequence) "s")))
{
break lab2;
}
@@ -1229,7 +1230,7 @@
} while (false);
cursor = limit - v_5;
// literal, line 202
- if (!(eq_s_b(1, "t")))
+ if (!(eq_s_b(1, (CharSequence) "t")))
{
limit_backward = v_4;
return false;
@@ -1241,7 +1242,7 @@
case 2:
// (, line 204
// <-, line 204
- slice_from("i");
+ slice_from((CharSequence) "i");
break;
case 3:
// (, line 205
@@ -1251,7 +1252,7 @@
case 4:
// (, line 206
// literal, line 206
- if (!(eq_s_b(2, "gu")))
+ if (!(eq_s_b(2, (CharSequence) "gu")))
{
limit_backward = v_4;
return false;
@@ -1321,7 +1322,7 @@
v_3 = limit - cursor;
lab3: do {
// literal, line 217
- if (!(eq_s_b(1, "\u00E9")))
+ if (!(eq_s_b(1, (CharSequence) "\u00E9")))
{
break lab3;
}
@@ -1329,7 +1330,7 @@
} while (false);
cursor = limit - v_3;
// literal, line 217
- if (!(eq_s_b(1, "\u00E8")))
+ if (!(eq_s_b(1, (CharSequence) "\u00E8")))
{
return false;
}
@@ -1337,10 +1338,11 @@
// ], line 217
bra = cursor;
// <-, line 217
- slice_from("e");
+ slice_from((CharSequence) "e");
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
@@ -1429,20 +1431,20 @@
lab10: do {
// (, line 234
// literal, line 234
- if (!(eq_s_b(1, "Y")))
+ if (!(eq_s_b(1, (CharSequence) "Y")))
{
break lab10;
}
// ], line 234
bra = cursor;
// <-, line 234
- slice_from("i");
+ slice_from((CharSequence) "i");
break lab9;
} while (false);
cursor = limit - v_8;
// (, line 235
// literal, line 235
- if (!(eq_s_b(1, "\u00E7")))
+ if (!(eq_s_b(1, (CharSequence) "\u00E7")))
{
cursor = limit - v_7;
break lab8;
@@ -1450,7 +1452,7 @@
// ], line 235
bra = cursor;
// <-, line 235
- slice_from("c");
+ slice_from((CharSequence) "c");
} while (false);
} while (false);
break lab3;
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/German2Stemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/German2Stemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/German2Stemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class German2Stemmer extends SnowballProgram {
private Among a_0[] = {
@@ -110,7 +111,7 @@
lab5: do {
// (, line 31
// literal, line 31
- if (!(eq_s(1, "u")))
+ if (!(eq_s(1, (CharSequence) "u")))
{
break lab5;
}
@@ -121,13 +122,13 @@
break lab5;
}
// <-, line 31
- slice_from("U");
+ slice_from((CharSequence) "U");
break lab4;
} while (false);
cursor = v_4;
// (, line 32
// literal, line 32
- if (!(eq_s(1, "y")))
+ if (!(eq_s(1, (CharSequence) "y")))
{
break lab3;
}
@@ -138,7 +139,7 @@
break lab3;
}
// <-, line 32
- slice_from("Y");
+ slice_from((CharSequence) "Y");
} while (false);
cursor = v_3;
break golab2;
@@ -178,22 +179,22 @@
case 1:
// (, line 37
// <-, line 37
- slice_from("ss");
+ slice_from((CharSequence) "ss");
break;
case 2:
// (, line 38
// <-, line 38
- slice_from("\u00E4");
+ slice_from((CharSequence) "\u00E4");
break;
case 3:
// (, line 39
// <-, line 39
- slice_from("\u00F6");
+ slice_from((CharSequence) "\u00F6");
break;
case 4:
// (, line 40
// <-, line 40
- slice_from("\u00FC");
+ slice_from((CharSequence) "\u00FC");
break;
case 5:
// (, line 41
@@ -350,27 +351,27 @@
case 1:
// (, line 64
// <-, line 64
- slice_from("y");
+ slice_from((CharSequence) "y");
break;
case 2:
// (, line 65
// <-, line 65
- slice_from("u");
+ slice_from((CharSequence) "u");
break;
case 3:
// (, line 66
// <-, line 66
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 4:
// (, line 67
// <-, line 67
- slice_from("o");
+ slice_from((CharSequence) "o");
break;
case 5:
// (, line 68
// <-, line 68
- slice_from("u");
+ slice_from((CharSequence) "u");
break;
case 6:
// (, line 69
@@ -538,7 +539,7 @@
// [, line 104
ket = cursor;
// literal, line 104
- if (!(eq_s_b(2, "ig")))
+ if (!(eq_s_b(2, (CharSequence) "ig")))
{
cursor = limit - v_4;
break lab3;
@@ -550,7 +551,7 @@
v_5 = limit - cursor;
lab4: do {
// literal, line 104
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
break lab4;
}
@@ -576,7 +577,7 @@
v_6 = limit - cursor;
lab5: do {
// literal, line 107
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
break lab5;
}
@@ -602,7 +603,7 @@
v_8 = limit - cursor;
lab8: do {
// literal, line 112
- if (!(eq_s_b(2, "er")))
+ if (!(eq_s_b(2, (CharSequence) "er")))
{
break lab8;
}
@@ -610,7 +611,7 @@
} while (false);
cursor = limit - v_8;
// literal, line 112
- if (!(eq_s_b(2, "en")))
+ if (!(eq_s_b(2, (CharSequence) "en")))
{
cursor = limit - v_7;
break lab6;
@@ -671,6 +672,7 @@
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/GermanStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/GermanStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/GermanStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class GermanStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -92,14 +93,14 @@
// [, line 32
bra = cursor;
// literal, line 32
- if (!(eq_s(1, "\u00DF")))
+ if (!(eq_s(1, (CharSequence) "\u00DF")))
{
break lab3;
}
// ], line 32
ket = cursor;
// <-, line 32
- slice_from("ss");
+ slice_from((CharSequence) "ss");
break lab2;
} while (false);
cursor = v_3;
@@ -139,7 +140,7 @@
lab9: do {
// (, line 37
// literal, line 37
- if (!(eq_s(1, "u")))
+ if (!(eq_s(1, (CharSequence) "u")))
{
break lab9;
}
@@ -150,13 +151,13 @@
break lab9;
}
// <-, line 37
- slice_from("U");
+ slice_from((CharSequence) "U");
break lab8;
} while (false);
cursor = v_6;
// (, line 38
// literal, line 38
- if (!(eq_s(1, "y")))
+ if (!(eq_s(1, (CharSequence) "y")))
{
break lab7;
}
@@ -167,7 +168,7 @@
break lab7;
}
// <-, line 38
- slice_from("Y");
+ slice_from((CharSequence) "Y");
} while (false);
cursor = v_5;
break golab6;
@@ -312,27 +313,27 @@
case 1:
// (, line 58
// <-, line 58
- slice_from("y");
+ slice_from((CharSequence) "y");
break;
case 2:
// (, line 59
// <-, line 59
- slice_from("u");
+ slice_from((CharSequence) "u");
break;
case 3:
// (, line 60
// <-, line 60
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 4:
// (, line 61
// <-, line 61
- slice_from("o");
+ slice_from((CharSequence) "o");
break;
case 5:
// (, line 62
// <-, line 62
- slice_from("u");
+ slice_from((CharSequence) "u");
break;
case 6:
// (, line 63
@@ -500,7 +501,7 @@
// [, line 98
ket = cursor;
// literal, line 98
- if (!(eq_s_b(2, "ig")))
+ if (!(eq_s_b(2, (CharSequence) "ig")))
{
cursor = limit - v_4;
break lab3;
@@ -512,7 +513,7 @@
v_5 = limit - cursor;
lab4: do {
// literal, line 98
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
break lab4;
}
@@ -538,7 +539,7 @@
v_6 = limit - cursor;
lab5: do {
// literal, line 101
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
break lab5;
}
@@ -564,7 +565,7 @@
v_8 = limit - cursor;
lab8: do {
// literal, line 106
- if (!(eq_s_b(2, "er")))
+ if (!(eq_s_b(2, (CharSequence) "er")))
{
break lab8;
}
@@ -572,7 +573,7 @@
} while (false);
cursor = limit - v_8;
// literal, line 106
- if (!(eq_s_b(2, "en")))
+ if (!(eq_s_b(2, (CharSequence) "en")))
{
cursor = limit - v_7;
break lab6;
@@ -633,6 +634,7 @@
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/HungarianStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/HungarianStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/HungarianStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class HungarianStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -353,12 +354,12 @@
case 1:
// (, line 62
// <-, line 62
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 2:
// (, line 63
// <-, line 63
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
}
return true;
@@ -502,17 +503,17 @@
case 1:
// (, line 117
// <-, line 117
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 2:
// (, line 118
// <-, line 118
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 3:
// (, line 119
// <-, line 119
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
}
return true;
@@ -552,12 +553,12 @@
case 3:
// (, line 127
// <-, line 127
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 4:
// (, line 128
// <-, line 128
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
}
return true;
@@ -635,12 +636,12 @@
case 1:
// (, line 143
// <-, line 143
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 2:
// (, line 144
// <-, line 144
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 3:
// (, line 145
@@ -700,12 +701,12 @@
case 2:
// (, line 156
// <-, line 156
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 3:
// (, line 157
// <-, line 157
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 4:
// (, line 158
@@ -715,12 +716,12 @@
case 5:
// (, line 159
// <-, line 159
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 6:
// (, line 160
// <-, line 160
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 7:
// (, line 161
@@ -730,7 +731,7 @@
case 8:
// (, line 162
// <-, line 162
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 9:
// (, line 163
@@ -770,12 +771,12 @@
case 2:
// (, line 170
// <-, line 170
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 3:
// (, line 171
// <-, line 171
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 4:
// (, line 172
@@ -785,12 +786,12 @@
case 5:
// (, line 173
// <-, line 173
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 6:
// (, line 174
// <-, line 174
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 7:
// (, line 175
@@ -810,12 +811,12 @@
case 10:
// (, line 178
// <-, line 178
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 11:
// (, line 179
// <-, line 179
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 12:
// (, line 180
@@ -830,12 +831,12 @@
case 14:
// (, line 182
// <-, line 182
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 15:
// (, line 183
// <-, line 183
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 16:
// (, line 184
@@ -855,12 +856,12 @@
case 19:
// (, line 187
// <-, line 187
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 20:
// (, line 188
// <-, line 188
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
}
return true;
@@ -895,12 +896,12 @@
case 2:
// (, line 195
// <-, line 195
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 3:
// (, line 196
// <-, line 196
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 4:
// (, line 197
@@ -920,12 +921,12 @@
case 7:
// (, line 200
// <-, line 200
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 8:
// (, line 201
// <-, line 201
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 9:
// (, line 202
@@ -945,12 +946,12 @@
case 12:
// (, line 205
// <-, line 205
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 13:
// (, line 206
// <-, line 206
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 14:
// (, line 207
@@ -975,12 +976,12 @@
case 18:
// (, line 211
// <-, line 211
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 19:
// (, line 212
// <-, line 212
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 20:
// (, line 214
@@ -995,12 +996,12 @@
case 22:
// (, line 216
// <-, line 216
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 23:
// (, line 217
// <-, line 217
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 24:
// (, line 218
@@ -1020,12 +1021,12 @@
case 27:
// (, line 221
// <-, line 221
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 28:
// (, line 222
// <-, line 222
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 29:
// (, line 223
@@ -1036,6 +1037,7 @@
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/ItalianStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/ItalianStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/ItalianStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class ItalianStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -279,32 +280,32 @@
case 1:
// (, line 37
// <-, line 37
- slice_from("\u00E0");
+ slice_from((CharSequence) "\u00E0");
break;
case 2:
// (, line 38
// <-, line 38
- slice_from("\u00E8");
+ slice_from((CharSequence) "\u00E8");
break;
case 3:
// (, line 39
// <-, line 39
- slice_from("\u00EC");
+ slice_from((CharSequence) "\u00EC");
break;
case 4:
// (, line 40
// <-, line 40
- slice_from("\u00F2");
+ slice_from((CharSequence) "\u00F2");
break;
case 5:
// (, line 41
// <-, line 41
- slice_from("\u00F9");
+ slice_from((CharSequence) "\u00F9");
break;
case 6:
// (, line 42
// <-, line 42
- slice_from("qU");
+ slice_from((CharSequence) "qU");
break;
case 7:
// (, line 43
@@ -345,7 +346,7 @@
lab7: do {
// (, line 47
// literal, line 47
- if (!(eq_s(1, "u")))
+ if (!(eq_s(1, (CharSequence) "u")))
{
break lab7;
}
@@ -356,13 +357,13 @@
break lab7;
}
// <-, line 47
- slice_from("U");
+ slice_from((CharSequence) "U");
break lab6;
} while (false);
cursor = v_5;
// (, line 48
// literal, line 48
- if (!(eq_s(1, "i")))
+ if (!(eq_s(1, (CharSequence) "i")))
{
break lab5;
}
@@ -373,7 +374,7 @@
break lab5;
}
// <-, line 48
- slice_from("I");
+ slice_from((CharSequence) "I");
} while (false);
cursor = v_4;
break golab4;
@@ -621,12 +622,12 @@
case 1:
// (, line 73
// <-, line 73
- slice_from("i");
+ slice_from((CharSequence) "i");
break;
case 2:
// (, line 74
// <-, line 74
- slice_from("u");
+ slice_from((CharSequence) "u");
break;
case 3:
// (, line 75
@@ -705,7 +706,7 @@
case 2:
// (, line 99
// <-, line 99
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
}
return true;
@@ -757,7 +758,7 @@
// [, line 114
ket = cursor;
// literal, line 114
- if (!(eq_s_b(2, "ic")))
+ if (!(eq_s_b(2, (CharSequence) "ic")))
{
cursor = limit - v_1;
break lab0;
@@ -782,7 +783,7 @@
return false;
}
// <-, line 117
- slice_from("log");
+ slice_from((CharSequence) "log");
break;
case 4:
// (, line 119
@@ -792,7 +793,7 @@
return false;
}
// <-, line 119
- slice_from("u");
+ slice_from((CharSequence) "u");
break;
case 5:
// (, line 121
@@ -802,7 +803,7 @@
return false;
}
// <-, line 121
- slice_from("ente");
+ slice_from((CharSequence) "ente");
break;
case 6:
// (, line 123
@@ -855,7 +856,7 @@
// [, line 128
ket = cursor;
// literal, line 128
- if (!(eq_s_b(2, "at")))
+ if (!(eq_s_b(2, (CharSequence) "at")))
{
cursor = limit - v_2;
break lab1;
@@ -932,7 +933,7 @@
// [, line 143
ket = cursor;
// literal, line 143
- if (!(eq_s_b(2, "at")))
+ if (!(eq_s_b(2, (CharSequence) "at")))
{
cursor = limit - v_4;
break lab3;
@@ -950,7 +951,7 @@
// [, line 143
ket = cursor;
// literal, line 143
- if (!(eq_s_b(2, "ic")))
+ if (!(eq_s_b(2, (CharSequence) "ic")))
{
cursor = limit - v_4;
break lab3;
@@ -1040,7 +1041,7 @@
// [, line 173
ket = cursor;
// literal, line 173
- if (!(eq_s_b(1, "i")))
+ if (!(eq_s_b(1, (CharSequence) "i")))
{
cursor = limit - v_1;
break lab0;
@@ -1063,7 +1064,7 @@
// [, line 176
ket = cursor;
// literal, line 176
- if (!(eq_s_b(1, "h")))
+ if (!(eq_s_b(1, (CharSequence) "h")))
{
cursor = limit - v_2;
break lab1;
@@ -1087,6 +1088,7 @@
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/KpStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/KpStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/KpStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class KpStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -178,7 +179,7 @@
} while (false);
cursor = limit - v_2;
// literal, line 35
- if (!(eq_s_b(2, "ij")))
+ if (!(eq_s_b(2, (CharSequence) "ij")))
{
return false;
}
@@ -211,7 +212,7 @@
} while (false);
cursor = limit - v_2;
// literal, line 36
- if (!(eq_s_b(2, "ij")))
+ if (!(eq_s_b(2, (CharSequence) "ij")))
{
return false;
}
@@ -231,7 +232,7 @@
v_2 = limit - cursor;
lab0: do {
// literal, line 37
- if (!(eq_s_b(2, "ij")))
+ if (!(eq_s_b(2, (CharSequence) "ij")))
{
break lab0;
}
@@ -303,7 +304,7 @@
cursor = limit - v_2;
// (, line 41
// literal, line 41
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
break lab0;
}
@@ -371,7 +372,7 @@
// <+ ch, line 44
{
int c = cursor;
- insert(cursor, cursor, S_ch);
+ insert(cursor, cursor, (CharSequence) S_ch);
cursor = c;
}
} while (false);
@@ -418,7 +419,7 @@
lab0: do {
// (, line 52
// literal, line 52
- if (!(eq_s_b(1, "t")))
+ if (!(eq_s_b(1, (CharSequence) "t")))
{
break lab0;
}
@@ -447,7 +448,7 @@
return false;
}
// <-, line 53
- slice_from("ie");
+ slice_from((CharSequence) "ie");
break;
case 4:
// (, line 55
@@ -457,7 +458,7 @@
lab2: do {
// (, line 55
// literal, line 55
- if (!(eq_s_b(2, "ar")))
+ if (!(eq_s_b(2, (CharSequence) "ar")))
{
break lab2;
}
@@ -486,7 +487,7 @@
lab3: do {
// (, line 56
// literal, line 56
- if (!(eq_s_b(2, "er")))
+ if (!(eq_s_b(2, (CharSequence) "er")))
{
break lab3;
}
@@ -519,7 +520,7 @@
return false;
}
// <-, line 57
- slice_from("e");
+ slice_from((CharSequence) "e");
} while (false);
break;
case 5:
@@ -535,7 +536,7 @@
return false;
}
// <-, line 59
- slice_from("au");
+ slice_from((CharSequence) "au");
break;
case 6:
// (, line 60
@@ -545,7 +546,7 @@
lab5: do {
// (, line 60
// literal, line 60
- if (!(eq_s_b(3, "hed")))
+ if (!(eq_s_b(3, (CharSequence) "hed")))
{
break lab5;
}
@@ -557,14 +558,14 @@
// ], line 60
bra = cursor;
// <-, line 60
- slice_from("heid");
+ slice_from((CharSequence) "heid");
break lab4;
} while (false);
cursor = limit - v_3;
lab6: do {
// (, line 61
// literal, line 61
- if (!(eq_s_b(2, "nd")))
+ if (!(eq_s_b(2, (CharSequence) "nd")))
{
break lab6;
}
@@ -576,7 +577,7 @@
lab7: do {
// (, line 62
// literal, line 62
- if (!(eq_s_b(1, "d")))
+ if (!(eq_s_b(1, (CharSequence) "d")))
{
break lab7;
}
@@ -604,7 +605,7 @@
v_4 = limit - cursor;
lab10: do {
// literal, line 63
- if (!(eq_s_b(1, "i")))
+ if (!(eq_s_b(1, (CharSequence) "i")))
{
break lab10;
}
@@ -612,7 +613,7 @@
} while (false);
cursor = limit - v_4;
// literal, line 63
- if (!(eq_s_b(1, "j")))
+ if (!(eq_s_b(1, (CharSequence) "j")))
{
break lab8;
}
@@ -650,7 +651,7 @@
case 7:
// (, line 65
// <-, line 65
- slice_from("nd");
+ slice_from((CharSequence) "nd");
break;
}
return true;
@@ -682,7 +683,7 @@
lab1: do {
// (, line 72
// literal, line 72
- if (!(eq_s_b(2, "'t")))
+ if (!(eq_s_b(2, (CharSequence) "'t")))
{
break lab1;
}
@@ -696,7 +697,7 @@
lab2: do {
// (, line 73
// literal, line 73
- if (!(eq_s_b(2, "et")))
+ if (!(eq_s_b(2, (CharSequence) "et")))
{
break lab2;
}
@@ -720,21 +721,21 @@
lab3: do {
// (, line 74
// literal, line 74
- if (!(eq_s_b(3, "rnt")))
+ if (!(eq_s_b(3, (CharSequence) "rnt")))
{
break lab3;
}
// ], line 74
bra = cursor;
// <-, line 74
- slice_from("rn");
+ slice_from((CharSequence) "rn");
break lab0;
} while (false);
cursor = limit - v_1;
lab4: do {
// (, line 75
// literal, line 75
- if (!(eq_s_b(1, "t")))
+ if (!(eq_s_b(1, (CharSequence) "t")))
{
break lab4;
}
@@ -758,35 +759,35 @@
lab5: do {
// (, line 76
// literal, line 76
- if (!(eq_s_b(3, "ink")))
+ if (!(eq_s_b(3, (CharSequence) "ink")))
{
break lab5;
}
// ], line 76
bra = cursor;
// <-, line 76
- slice_from("ing");
+ slice_from((CharSequence) "ing");
break lab0;
} while (false);
cursor = limit - v_1;
lab6: do {
// (, line 77
// literal, line 77
- if (!(eq_s_b(2, "mp")))
+ if (!(eq_s_b(2, (CharSequence) "mp")))
{
break lab6;
}
// ], line 77
bra = cursor;
// <-, line 77
- slice_from("m");
+ slice_from((CharSequence) "m");
break lab0;
} while (false);
cursor = limit - v_1;
lab7: do {
// (, line 78
// literal, line 78
- if (!(eq_s_b(1, "'")))
+ if (!(eq_s_b(1, (CharSequence) "'")))
{
break lab7;
}
@@ -827,7 +828,7 @@
return false;
}
// <-, line 80
- slice_from("g");
+ slice_from((CharSequence) "g");
break;
case 3:
// (, line 81
@@ -837,7 +838,7 @@
return false;
}
// <-, line 81
- slice_from("lijk");
+ slice_from((CharSequence) "lijk");
break;
case 4:
// (, line 82
@@ -847,7 +848,7 @@
return false;
}
// <-, line 82
- slice_from("isch");
+ slice_from((CharSequence) "isch");
break;
case 5:
// (, line 83
@@ -872,7 +873,7 @@
return false;
}
// <-, line 84
- slice_from("t");
+ slice_from((CharSequence) "t");
break;
case 7:
// (, line 85
@@ -882,7 +883,7 @@
return false;
}
// <-, line 85
- slice_from("s");
+ slice_from((CharSequence) "s");
break;
case 8:
// (, line 86
@@ -892,7 +893,7 @@
return false;
}
// <-, line 86
- slice_from("r");
+ slice_from((CharSequence) "r");
break;
case 9:
// (, line 87
@@ -904,7 +905,7 @@
// delete, line 87
slice_del();
// attach, line 87
- insert(cursor, cursor, "l");
+ insert(cursor, cursor, (CharSequence) "l");
// call lengthen_V, line 87
if (!r_lengthen_V())
{
@@ -926,7 +927,7 @@
// delete, line 88
slice_del();
// attach, line 88
- insert(cursor, cursor, "en");
+ insert(cursor, cursor, (CharSequence) "en");
// call lengthen_V, line 88
if (!r_lengthen_V())
{
@@ -946,7 +947,7 @@
return false;
}
// <-, line 89
- slice_from("ief");
+ slice_from((CharSequence) "ief");
break;
}
return true;
@@ -977,7 +978,7 @@
return false;
}
// <-, line 96
- slice_from("eer");
+ slice_from((CharSequence) "eer");
break;
case 2:
// (, line 97
@@ -1007,7 +1008,7 @@
case 4:
// (, line 101
// <-, line 101
- slice_from("r");
+ slice_from((CharSequence) "r");
break;
case 5:
// (, line 104
@@ -1037,7 +1038,7 @@
return false;
}
// <-, line 105
- slice_from("aar");
+ slice_from((CharSequence) "aar");
break;
case 7:
// (, line 106
@@ -1049,7 +1050,7 @@
// delete, line 106
slice_del();
// attach, line 106
- insert(cursor, cursor, "f");
+ insert(cursor, cursor, (CharSequence) "f");
// call lengthen_V, line 106
if (!r_lengthen_V())
{
@@ -1066,7 +1067,7 @@
// delete, line 107
slice_del();
// attach, line 107
- insert(cursor, cursor, "g");
+ insert(cursor, cursor, (CharSequence) "g");
// call lengthen_V, line 107
if (!r_lengthen_V())
{
@@ -1086,7 +1087,7 @@
return false;
}
// <-, line 108
- slice_from("t");
+ slice_from((CharSequence) "t");
break;
case 10:
// (, line 109
@@ -1101,7 +1102,7 @@
return false;
}
// <-, line 109
- slice_from("d");
+ slice_from((CharSequence) "d");
break;
}
return true;
@@ -1138,7 +1139,7 @@
break lab1;
}
// <-, line 116
- slice_from("ie");
+ slice_from((CharSequence) "ie");
break;
case 2:
// (, line 117
@@ -1148,7 +1149,7 @@
break lab1;
}
// <-, line 117
- slice_from("eer");
+ slice_from((CharSequence) "eer");
break;
case 3:
// (, line 118
@@ -1173,7 +1174,7 @@
break lab1;
}
// <-, line 119
- slice_from("n");
+ slice_from((CharSequence) "n");
break;
case 5:
// (, line 120
@@ -1188,7 +1189,7 @@
break lab1;
}
// <-, line 120
- slice_from("l");
+ slice_from((CharSequence) "l");
break;
case 6:
// (, line 121
@@ -1203,7 +1204,7 @@
break lab1;
}
// <-, line 121
- slice_from("r");
+ slice_from((CharSequence) "r");
break;
case 7:
// (, line 122
@@ -1213,7 +1214,7 @@
break lab1;
}
// <-, line 122
- slice_from("teer");
+ slice_from((CharSequence) "teer");
break;
case 8:
// (, line 124
@@ -1223,7 +1224,7 @@
break lab1;
}
// <-, line 124
- slice_from("lijk");
+ slice_from((CharSequence) "lijk");
break;
case 9:
// (, line 127
@@ -1319,17 +1320,17 @@
case 1:
// (, line 146
// <-, line 146
- slice_from("k");
+ slice_from((CharSequence) "k");
break;
case 2:
// (, line 147
// <-, line 147
- slice_from("f");
+ slice_from((CharSequence) "f");
break;
case 3:
// (, line 148
// <-, line 148
- slice_from("p");
+ slice_from((CharSequence) "p");
break;
}
return true;
@@ -1355,112 +1356,112 @@
case 1:
// (, line 155
// <-, line 155
- slice_from("b");
+ slice_from((CharSequence) "b");
break;
case 2:
// (, line 156
// <-, line 156
- slice_from("c");
+ slice_from((CharSequence) "c");
break;
case 3:
// (, line 157
// <-, line 157
- slice_from("d");
+ slice_from((CharSequence) "d");
break;
case 4:
// (, line 158
// <-, line 158
- slice_from("f");
+ slice_from((CharSequence) "f");
break;
case 5:
// (, line 159
// <-, line 159
- slice_from("g");
+ slice_from((CharSequence) "g");
break;
case 6:
// (, line 160
// <-, line 160
- slice_from("h");
+ slice_from((CharSequence) "h");
break;
case 7:
// (, line 161
// <-, line 161
- slice_from("j");
+ slice_from((CharSequence) "j");
break;
case 8:
// (, line 162
// <-, line 162
- slice_from("k");
+ slice_from((CharSequence) "k");
break;
case 9:
// (, line 163
// <-, line 163
- slice_from("l");
+ slice_from((CharSequence) "l");
break;
case 10:
// (, line 164
// <-, line 164
- slice_from("m");
+ slice_from((CharSequence) "m");
break;
case 11:
// (, line 165
// <-, line 165
- slice_from("n");
+ slice_from((CharSequence) "n");
break;
case 12:
// (, line 166
// <-, line 166
- slice_from("p");
+ slice_from((CharSequence) "p");
break;
case 13:
// (, line 167
// <-, line 167
- slice_from("q");
+ slice_from((CharSequence) "q");
break;
case 14:
// (, line 168
// <-, line 168
- slice_from("r");
+ slice_from((CharSequence) "r");
break;
case 15:
// (, line 169
// <-, line 169
- slice_from("s");
+ slice_from((CharSequence) "s");
break;
case 16:
// (, line 170
// <-, line 170
- slice_from("t");
+ slice_from((CharSequence) "t");
break;
case 17:
// (, line 171
// <-, line 171
- slice_from("v");
+ slice_from((CharSequence) "v");
break;
case 18:
// (, line 172
// <-, line 172
- slice_from("w");
+ slice_from((CharSequence) "w");
break;
case 19:
// (, line 173
// <-, line 173
- slice_from("x");
+ slice_from((CharSequence) "x");
break;
case 20:
// (, line 174
// <-, line 174
- slice_from("z");
+ slice_from((CharSequence) "z");
break;
case 21:
// (, line 175
// <-, line 175
- slice_from("f");
+ slice_from((CharSequence) "f");
break;
case 22:
// (, line 176
// <-, line 176
- slice_from("s");
+ slice_from((CharSequence) "s");
break;
}
return true;
@@ -1503,7 +1504,7 @@
lab0: do {
// (, line 183
// literal, line 183
- if (!(eq_s_b(1, "n")))
+ if (!(eq_s_b(1, (CharSequence) "n")))
{
break lab0;
}
@@ -1527,7 +1528,7 @@
lab1: do {
// (, line 184
// literal, line 184
- if (!(eq_s_b(1, "h")))
+ if (!(eq_s_b(1, (CharSequence) "h")))
{
break lab1;
}
@@ -1555,7 +1556,7 @@
// [, line 190
bra = cursor;
// literal, line 190
- if (!(eq_s(2, "ge")))
+ if (!(eq_s(2, (CharSequence) "ge")))
{
return false;
}
@@ -1638,7 +1639,7 @@
// [, line 197
bra = cursor;
// literal, line 197
- if (!(eq_s(2, "ge")))
+ if (!(eq_s(2, (CharSequence) "ge")))
{
break lab1;
}
@@ -1760,7 +1761,7 @@
v_6 = cursor;
lab7: do {
// literal, line 209
- if (!(eq_s(2, "ij")))
+ if (!(eq_s(2, (CharSequence) "ij")))
{
break lab7;
}
@@ -1815,7 +1816,7 @@
v_10 = cursor;
lab13: do {
// literal, line 210
- if (!(eq_s(2, "ij")))
+ if (!(eq_s(2, (CharSequence) "ij")))
{
break lab13;
}
@@ -1849,6 +1850,7 @@
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
@@ -1881,14 +1883,14 @@
// [, line 218
bra = cursor;
// literal, line 218
- if (!(eq_s(1, "y")))
+ if (!(eq_s(1, (CharSequence) "y")))
{
break lab0;
}
// ], line 218
ket = cursor;
// <-, line 218
- slice_from("Y");
+ slice_from((CharSequence) "Y");
// set Y_found, line 218
B_Y_found = true;
} while (false);
@@ -1915,7 +1917,7 @@
// [, line 219
bra = cursor;
// literal, line 219
- if (!(eq_s(1, "y")))
+ if (!(eq_s(1, (CharSequence) "y")))
{
break lab5;
}
@@ -1932,7 +1934,7 @@
cursor++;
}
// <-, line 219
- slice_from("Y");
+ slice_from((CharSequence) "Y");
// set Y_found, line 219
B_Y_found = true;
continue replab2;
@@ -2149,7 +2151,7 @@
// [, line 243
bra = cursor;
// literal, line 243
- if (!(eq_s(1, "Y")))
+ if (!(eq_s(1, (CharSequence) "Y")))
{
break lab22;
}
@@ -2166,7 +2168,7 @@
cursor++;
}
// <-, line 243
- slice_from("y");
+ slice_from((CharSequence) "y");
continue replab19;
} while (false);
cursor = v_19;
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/LovinsStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/LovinsStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/LovinsStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class LovinsStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -450,7 +451,7 @@
v_2 = limit - cursor;
lab0: do {
// literal, line 25
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
break lab0;
}
@@ -482,7 +483,7 @@
v_2 = limit - cursor;
lab0: do {
// literal, line 26
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
break lab0;
}
@@ -509,7 +510,7 @@
}
cursor = limit - v_1;
// literal, line 27
- if (!(eq_s_b(1, "f")))
+ if (!(eq_s_b(1, (CharSequence) "f")))
{
return false;
}
@@ -537,7 +538,7 @@
v_2 = limit - cursor;
lab1: do {
// literal, line 28
- if (!(eq_s_b(1, "t")))
+ if (!(eq_s_b(1, (CharSequence) "t")))
{
break lab1;
}
@@ -545,7 +546,7 @@
} while (false);
cursor = limit - v_2;
// literal, line 28
- if (!(eq_s_b(2, "ll")))
+ if (!(eq_s_b(2, (CharSequence) "ll")))
{
return false;
}
@@ -575,7 +576,7 @@
v_2 = limit - cursor;
lab0: do {
// literal, line 29
- if (!(eq_s_b(1, "o")))
+ if (!(eq_s_b(1, (CharSequence) "o")))
{
break lab0;
}
@@ -588,7 +589,7 @@
v_3 = limit - cursor;
lab1: do {
// literal, line 29
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
break lab1;
}
@@ -621,7 +622,7 @@
v_2 = limit - cursor;
lab0: do {
// literal, line 30
- if (!(eq_s_b(1, "a")))
+ if (!(eq_s_b(1, (CharSequence) "a")))
{
break lab0;
}
@@ -634,7 +635,7 @@
v_3 = limit - cursor;
lab1: do {
// literal, line 30
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
break lab1;
}
@@ -666,7 +667,7 @@
v_2 = limit - cursor;
lab1: do {
// literal, line 31
- if (!(eq_s_b(1, "l")))
+ if (!(eq_s_b(1, (CharSequence) "l")))
{
break lab1;
}
@@ -675,7 +676,7 @@
cursor = limit - v_2;
lab2: do {
// literal, line 31
- if (!(eq_s_b(1, "i")))
+ if (!(eq_s_b(1, (CharSequence) "i")))
{
break lab2;
}
@@ -684,7 +685,7 @@
cursor = limit - v_2;
// (, line 31
// literal, line 31
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
return false;
}
@@ -695,7 +696,7 @@
}
cursor--;
// literal, line 31
- if (!(eq_s_b(1, "u")))
+ if (!(eq_s_b(1, (CharSequence) "u")))
{
return false;
}
@@ -727,7 +728,7 @@
v_2 = limit - cursor;
lab0: do {
// literal, line 32
- if (!(eq_s_b(1, "u")))
+ if (!(eq_s_b(1, (CharSequence) "u")))
{
break lab0;
}
@@ -740,7 +741,7 @@
v_3 = limit - cursor;
lab1: do {
// literal, line 32
- if (!(eq_s_b(1, "x")))
+ if (!(eq_s_b(1, (CharSequence) "x")))
{
break lab1;
}
@@ -754,7 +755,7 @@
lab2: do {
// (, line 32
// literal, line 32
- if (!(eq_s_b(1, "s")))
+ if (!(eq_s_b(1, (CharSequence) "s")))
{
break lab2;
}
@@ -763,7 +764,7 @@
v_5 = limit - cursor;
lab3: do {
// literal, line 32
- if (!(eq_s_b(1, "o")))
+ if (!(eq_s_b(1, (CharSequence) "o")))
{
break lab3;
}
@@ -802,7 +803,7 @@
v_2 = limit - cursor;
lab0: do {
// literal, line 33
- if (!(eq_s_b(1, "a")))
+ if (!(eq_s_b(1, (CharSequence) "a")))
{
break lab0;
}
@@ -815,7 +816,7 @@
v_3 = limit - cursor;
lab1: do {
// literal, line 33
- if (!(eq_s_b(1, "c")))
+ if (!(eq_s_b(1, (CharSequence) "c")))
{
break lab1;
}
@@ -828,7 +829,7 @@
v_4 = limit - cursor;
lab2: do {
// literal, line 33
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
break lab2;
}
@@ -841,7 +842,7 @@
v_5 = limit - cursor;
lab3: do {
// literal, line 33
- if (!(eq_s_b(1, "m")))
+ if (!(eq_s_b(1, (CharSequence) "m")))
{
break lab3;
}
@@ -888,7 +889,7 @@
v_3 = limit - cursor;
lab2: do {
// literal, line 34
- if (!(eq_s_b(1, "s")))
+ if (!(eq_s_b(1, (CharSequence) "s")))
{
break lab2;
}
@@ -933,7 +934,7 @@
v_2 = limit - cursor;
lab1: do {
// literal, line 35
- if (!(eq_s_b(1, "l")))
+ if (!(eq_s_b(1, (CharSequence) "l")))
{
break lab1;
}
@@ -941,7 +942,7 @@
} while (false);
cursor = limit - v_2;
// literal, line 35
- if (!(eq_s_b(1, "i")))
+ if (!(eq_s_b(1, (CharSequence) "i")))
{
return false;
}
@@ -970,7 +971,7 @@
v_2 = limit - cursor;
lab0: do {
// literal, line 36
- if (!(eq_s_b(1, "c")))
+ if (!(eq_s_b(1, (CharSequence) "c")))
{
break lab0;
}
@@ -1016,7 +1017,7 @@
v_3 = limit - cursor;
lab0: do {
// literal, line 37
- if (!(eq_s_b(1, "l")))
+ if (!(eq_s_b(1, (CharSequence) "l")))
{
break lab0;
}
@@ -1029,7 +1030,7 @@
v_4 = limit - cursor;
lab1: do {
// literal, line 37
- if (!(eq_s_b(1, "n")))
+ if (!(eq_s_b(1, (CharSequence) "n")))
{
break lab1;
}
@@ -1061,7 +1062,7 @@
v_2 = limit - cursor;
lab1: do {
// literal, line 38
- if (!(eq_s_b(1, "n")))
+ if (!(eq_s_b(1, (CharSequence) "n")))
{
break lab1;
}
@@ -1069,7 +1070,7 @@
} while (false);
cursor = limit - v_2;
// literal, line 38
- if (!(eq_s_b(1, "r")))
+ if (!(eq_s_b(1, (CharSequence) "r")))
{
return false;
}
@@ -1099,7 +1100,7 @@
v_2 = limit - cursor;
lab1: do {
// literal, line 39
- if (!(eq_s_b(2, "dr")))
+ if (!(eq_s_b(2, (CharSequence) "dr")))
{
break lab1;
}
@@ -1108,7 +1109,7 @@
cursor = limit - v_2;
// (, line 39
// literal, line 39
- if (!(eq_s_b(1, "t")))
+ if (!(eq_s_b(1, (CharSequence) "t")))
{
return false;
}
@@ -1117,7 +1118,7 @@
v_3 = limit - cursor;
lab2: do {
// literal, line 39
- if (!(eq_s_b(1, "t")))
+ if (!(eq_s_b(1, (CharSequence) "t")))
{
break lab2;
}
@@ -1151,7 +1152,7 @@
v_2 = limit - cursor;
lab1: do {
// literal, line 40
- if (!(eq_s_b(1, "s")))
+ if (!(eq_s_b(1, (CharSequence) "s")))
{
break lab1;
}
@@ -1160,7 +1161,7 @@
cursor = limit - v_2;
// (, line 40
// literal, line 40
- if (!(eq_s_b(1, "t")))
+ if (!(eq_s_b(1, (CharSequence) "t")))
{
return false;
}
@@ -1169,7 +1170,7 @@
v_3 = limit - cursor;
lab2: do {
// literal, line 40
- if (!(eq_s_b(1, "o")))
+ if (!(eq_s_b(1, (CharSequence) "o")))
{
break lab2;
}
@@ -1202,7 +1203,7 @@
v_2 = limit - cursor;
lab1: do {
// literal, line 41
- if (!(eq_s_b(1, "l")))
+ if (!(eq_s_b(1, (CharSequence) "l")))
{
break lab1;
}
@@ -1211,7 +1212,7 @@
cursor = limit - v_2;
lab2: do {
// literal, line 41
- if (!(eq_s_b(1, "m")))
+ if (!(eq_s_b(1, (CharSequence) "m")))
{
break lab2;
}
@@ -1220,7 +1221,7 @@
cursor = limit - v_2;
lab3: do {
// literal, line 41
- if (!(eq_s_b(1, "n")))
+ if (!(eq_s_b(1, (CharSequence) "n")))
{
break lab3;
}
@@ -1228,7 +1229,7 @@
} while (false);
cursor = limit - v_2;
// literal, line 41
- if (!(eq_s_b(1, "r")))
+ if (!(eq_s_b(1, (CharSequence) "r")))
{
return false;
}
@@ -1252,7 +1253,7 @@
}
cursor = limit - v_1;
// literal, line 42
- if (!(eq_s_b(1, "c")))
+ if (!(eq_s_b(1, (CharSequence) "c")))
{
return false;
}
@@ -1281,7 +1282,7 @@
v_2 = limit - cursor;
lab0: do {
// literal, line 43
- if (!(eq_s_b(1, "s")))
+ if (!(eq_s_b(1, (CharSequence) "s")))
{
break lab0;
}
@@ -1294,7 +1295,7 @@
v_3 = limit - cursor;
lab1: do {
// literal, line 43
- if (!(eq_s_b(1, "u")))
+ if (!(eq_s_b(1, (CharSequence) "u")))
{
break lab1;
}
@@ -1326,7 +1327,7 @@
v_2 = limit - cursor;
lab1: do {
// literal, line 44
- if (!(eq_s_b(1, "l")))
+ if (!(eq_s_b(1, (CharSequence) "l")))
{
break lab1;
}
@@ -1335,7 +1336,7 @@
cursor = limit - v_2;
lab2: do {
// literal, line 44
- if (!(eq_s_b(1, "i")))
+ if (!(eq_s_b(1, (CharSequence) "i")))
{
break lab2;
}
@@ -1344,7 +1345,7 @@
cursor = limit - v_2;
// (, line 44
// literal, line 44
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
return false;
}
@@ -1355,7 +1356,7 @@
}
cursor--;
// literal, line 44
- if (!(eq_s_b(1, "u")))
+ if (!(eq_s_b(1, (CharSequence) "u")))
{
return false;
}
@@ -1379,7 +1380,7 @@
}
cursor = limit - v_1;
// literal, line 45
- if (!(eq_s_b(2, "in")))
+ if (!(eq_s_b(2, (CharSequence) "in")))
{
return false;
}
@@ -1407,7 +1408,7 @@
v_2 = limit - cursor;
lab0: do {
// literal, line 46
- if (!(eq_s_b(1, "f")))
+ if (!(eq_s_b(1, (CharSequence) "f")))
{
break lab0;
}
@@ -1463,7 +1464,7 @@
v_2 = limit - cursor;
lab0: do {
// literal, line 49
- if (!(eq_s_b(3, "met")))
+ if (!(eq_s_b(3, (CharSequence) "met")))
{
break lab0;
}
@@ -1476,7 +1477,7 @@
v_3 = limit - cursor;
lab1: do {
// literal, line 49
- if (!(eq_s_b(4, "ryst")))
+ if (!(eq_s_b(4, (CharSequence) "ryst")))
{
break lab1;
}
@@ -1503,7 +1504,7 @@
}
cursor = limit - v_1;
// literal, line 50
- if (!(eq_s_b(1, "l")))
+ if (!(eq_s_b(1, (CharSequence) "l")))
{
return false;
}
@@ -1588,42 +1589,42 @@
case 1:
// (, line 161
// <-, line 161
- slice_from("ief");
+ slice_from((CharSequence) "ief");
break;
case 2:
// (, line 162
// <-, line 162
- slice_from("uc");
+ slice_from((CharSequence) "uc");
break;
case 3:
// (, line 163
// <-, line 163
- slice_from("um");
+ slice_from((CharSequence) "um");
break;
case 4:
// (, line 164
// <-, line 164
- slice_from("rb");
+ slice_from((CharSequence) "rb");
break;
case 5:
// (, line 165
// <-, line 165
- slice_from("ur");
+ slice_from((CharSequence) "ur");
break;
case 6:
// (, line 166
// <-, line 166
- slice_from("ister");
+ slice_from((CharSequence) "ister");
break;
case 7:
// (, line 167
// <-, line 167
- slice_from("meter");
+ slice_from((CharSequence) "meter");
break;
case 8:
// (, line 168
// <-, line 168
- slice_from("olut");
+ slice_from((CharSequence) "olut");
break;
case 9:
// (, line 169
@@ -1632,7 +1633,7 @@
v_1 = limit - cursor;
lab0: do {
// literal, line 169
- if (!(eq_s_b(1, "a")))
+ if (!(eq_s_b(1, (CharSequence) "a")))
{
break lab0;
}
@@ -1645,7 +1646,7 @@
v_2 = limit - cursor;
lab1: do {
// literal, line 169
- if (!(eq_s_b(1, "i")))
+ if (!(eq_s_b(1, (CharSequence) "i")))
{
break lab1;
}
@@ -1658,7 +1659,7 @@
v_3 = limit - cursor;
lab2: do {
// literal, line 169
- if (!(eq_s_b(1, "o")))
+ if (!(eq_s_b(1, (CharSequence) "o")))
{
break lab2;
}
@@ -1667,77 +1668,77 @@
cursor = limit - v_3;
}
// <-, line 169
- slice_from("l");
+ slice_from((CharSequence) "l");
break;
case 10:
// (, line 170
// <-, line 170
- slice_from("bic");
+ slice_from((CharSequence) "bic");
break;
case 11:
// (, line 171
// <-, line 171
- slice_from("dic");
+ slice_from((CharSequence) "dic");
break;
case 12:
// (, line 172
// <-, line 172
- slice_from("pic");
+ slice_from((CharSequence) "pic");
break;
case 13:
// (, line 173
// <-, line 173
- slice_from("tic");
+ slice_from((CharSequence) "tic");
break;
case 14:
// (, line 174
// <-, line 174
- slice_from("ac");
+ slice_from((CharSequence) "ac");
break;
case 15:
// (, line 175
// <-, line 175
- slice_from("ec");
+ slice_from((CharSequence) "ec");
break;
case 16:
// (, line 176
// <-, line 176
- slice_from("ic");
+ slice_from((CharSequence) "ic");
break;
case 17:
// (, line 177
// <-, line 177
- slice_from("luc");
+ slice_from((CharSequence) "luc");
break;
case 18:
// (, line 178
// <-, line 178
- slice_from("uas");
+ slice_from((CharSequence) "uas");
break;
case 19:
// (, line 179
// <-, line 179
- slice_from("vas");
+ slice_from((CharSequence) "vas");
break;
case 20:
// (, line 180
// <-, line 180
- slice_from("cis");
+ slice_from((CharSequence) "cis");
break;
case 21:
// (, line 181
// <-, line 181
- slice_from("lis");
+ slice_from((CharSequence) "lis");
break;
case 22:
// (, line 182
// <-, line 182
- slice_from("eris");
+ slice_from((CharSequence) "eris");
break;
case 23:
// (, line 183
// <-, line 183
- slice_from("pans");
+ slice_from((CharSequence) "pans");
break;
case 24:
// (, line 184
@@ -1746,7 +1747,7 @@
v_4 = limit - cursor;
lab3: do {
// literal, line 184
- if (!(eq_s_b(1, "s")))
+ if (!(eq_s_b(1, (CharSequence) "s")))
{
break lab3;
}
@@ -1755,22 +1756,22 @@
cursor = limit - v_4;
}
// <-, line 184
- slice_from("ens");
+ slice_from((CharSequence) "ens");
break;
case 25:
// (, line 185
// <-, line 185
- slice_from("ons");
+ slice_from((CharSequence) "ons");
break;
case 26:
// (, line 186
// <-, line 186
- slice_from("lus");
+ slice_from((CharSequence) "lus");
break;
case 27:
// (, line 187
// <-, line 187
- slice_from("rus");
+ slice_from((CharSequence) "rus");
break;
case 28:
// (, line 188
@@ -1779,7 +1780,7 @@
v_5 = limit - cursor;
lab4: do {
// literal, line 188
- if (!(eq_s_b(1, "p")))
+ if (!(eq_s_b(1, (CharSequence) "p")))
{
break lab4;
}
@@ -1792,7 +1793,7 @@
v_6 = limit - cursor;
lab5: do {
// literal, line 188
- if (!(eq_s_b(1, "t")))
+ if (!(eq_s_b(1, (CharSequence) "t")))
{
break lab5;
}
@@ -1801,12 +1802,12 @@
cursor = limit - v_6;
}
// <-, line 188
- slice_from("hes");
+ slice_from((CharSequence) "hes");
break;
case 29:
// (, line 189
// <-, line 189
- slice_from("mis");
+ slice_from((CharSequence) "mis");
break;
case 30:
// (, line 190
@@ -1815,7 +1816,7 @@
v_7 = limit - cursor;
lab6: do {
// literal, line 190
- if (!(eq_s_b(1, "m")))
+ if (!(eq_s_b(1, (CharSequence) "m")))
{
break lab6;
}
@@ -1824,12 +1825,12 @@
cursor = limit - v_7;
}
// <-, line 190
- slice_from("ens");
+ slice_from((CharSequence) "ens");
break;
case 31:
// (, line 192
// <-, line 192
- slice_from("ers");
+ slice_from((CharSequence) "ers");
break;
case 32:
// (, line 193
@@ -1838,7 +1839,7 @@
v_8 = limit - cursor;
lab7: do {
// literal, line 193
- if (!(eq_s_b(1, "n")))
+ if (!(eq_s_b(1, (CharSequence) "n")))
{
break lab7;
}
@@ -1847,22 +1848,23 @@
cursor = limit - v_8;
}
// <-, line 193
- slice_from("es");
+ slice_from((CharSequence) "es");
break;
case 33:
// (, line 194
// <-, line 194
- slice_from("ys");
+ slice_from((CharSequence) "ys");
break;
case 34:
// (, line 195
// <-, line 195
- slice_from("ys");
+ slice_from((CharSequence) "ys");
break;
}
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/NorwegianStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/NorwegianStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/NorwegianStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class NorwegianStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -195,7 +196,7 @@
cursor = limit - v_3;
// (, line 46
// literal, line 46
- if (!(eq_s_b(1, "k")))
+ if (!(eq_s_b(1, (CharSequence) "k")))
{
return false;
}
@@ -210,7 +211,7 @@
case 3:
// (, line 48
// <-, line 48
- slice_from("er");
+ slice_from((CharSequence) "er");
break;
}
return true;
@@ -302,6 +303,7 @@
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/PorterStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/PorterStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/PorterStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class PorterStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -160,12 +161,12 @@
case 1:
// (, line 26
// <-, line 26
- slice_from("ss");
+ slice_from((CharSequence) "ss");
break;
case 2:
// (, line 27
// <-, line 27
- slice_from("i");
+ slice_from((CharSequence) "i");
break;
case 3:
// (, line 29
@@ -203,7 +204,7 @@
return false;
}
// <-, line 35
- slice_from("ee");
+ slice_from((CharSequence) "ee");
break;
case 2:
// (, line 37
@@ -245,7 +246,7 @@
// <+, line 41
{
int c = cursor;
- insert(cursor, cursor, "e");
+ insert(cursor, cursor, (CharSequence) "e");
cursor = c;
}
break;
@@ -282,7 +283,7 @@
// <+, line 45
{
int c = cursor;
- insert(cursor, cursor, "e");
+ insert(cursor, cursor, (CharSequence) "e");
cursor = c;
}
break;
@@ -302,7 +303,7 @@
v_1 = limit - cursor;
lab1: do {
// literal, line 52
- if (!(eq_s_b(1, "y")))
+ if (!(eq_s_b(1,(CharSequence) "y")))
{
break lab1;
}
@@ -310,7 +311,7 @@
} while (false);
cursor = limit - v_1;
// literal, line 52
- if (!(eq_s_b(1, "Y")))
+ if (!(eq_s_b(1,(CharSequence) "Y")))
{
return false;
}
@@ -334,7 +335,7 @@
cursor--;
}
// <-, line 54
- slice_from("i");
+ slice_from((CharSequence) "i");
return true;
}
@@ -362,72 +363,72 @@
case 1:
// (, line 59
// <-, line 59
- slice_from("tion");
+ slice_from((CharSequence) "tion");
break;
case 2:
// (, line 60
// <-, line 60
- slice_from("ence");
+ slice_from((CharSequence) "ence");
break;
case 3:
// (, line 61
// <-, line 61
- slice_from("ance");
+ slice_from((CharSequence) "ance");
break;
case 4:
// (, line 62
// <-, line 62
- slice_from("able");
+ slice_from((CharSequence) "able");
break;
case 5:
// (, line 63
// <-, line 63
- slice_from("ent");
+ slice_from((CharSequence) "ent");
break;
case 6:
// (, line 64
// <-, line 64
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 7:
// (, line 66
// <-, line 66
- slice_from("ize");
+ slice_from((CharSequence) "ize");
break;
case 8:
// (, line 68
// <-, line 68
- slice_from("ate");
+ slice_from((CharSequence) "ate");
break;
case 9:
// (, line 69
// <-, line 69
- slice_from("al");
+ slice_from((CharSequence) "al");
break;
case 10:
// (, line 71
// <-, line 71
- slice_from("al");
+ slice_from((CharSequence) "al");
break;
case 11:
// (, line 72
// <-, line 72
- slice_from("ful");
+ slice_from((CharSequence) "ful");
break;
case 12:
// (, line 74
// <-, line 74
- slice_from("ous");
+ slice_from((CharSequence) "ous");
break;
case 13:
// (, line 76
// <-, line 76
- slice_from("ive");
+ slice_from((CharSequence) "ive");
break;
case 14:
// (, line 77
// <-, line 77
- slice_from("ble");
+ slice_from((CharSequence) "ble");
break;
}
return true;
@@ -457,12 +458,12 @@
case 1:
// (, line 83
// <-, line 83
- slice_from("al");
+ slice_from((CharSequence) "al");
break;
case 2:
// (, line 85
// <-, line 85
- slice_from("ic");
+ slice_from((CharSequence) "ic");
break;
case 3:
// (, line 87
@@ -507,7 +508,7 @@
v_1 = limit - cursor;
lab1: do {
// literal, line 96
- if (!(eq_s_b(1, "s")))
+ if (!(eq_s_b(1,(CharSequence) "s")))
{
break lab1;
}
@@ -515,7 +516,7 @@
} while (false);
cursor = limit - v_1;
// literal, line 96
- if (!(eq_s_b(1, "t")))
+ if (!(eq_s_b(1,(CharSequence) "t")))
{
return false;
}
@@ -534,7 +535,7 @@
// [, line 101
ket = cursor;
// literal, line 101
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1,(CharSequence) "e")))
{
return false;
}
@@ -582,7 +583,7 @@
// [, line 107
ket = cursor;
// literal, line 107
- if (!(eq_s_b(1, "l")))
+ if (!(eq_s_b(1,(CharSequence) "l")))
{
return false;
}
@@ -594,7 +595,7 @@
return false;
}
// literal, line 108
- if (!(eq_s_b(1, "l")))
+ if (!(eq_s_b(1,(CharSequence) "l")))
{
return false;
}
@@ -603,6 +604,7 @@
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
@@ -630,14 +632,14 @@
// [, line 116
bra = cursor;
// literal, line 116
- if (!(eq_s(1, "y")))
+ if (!(eq_s(1,(CharSequence) "y")))
{
break lab0;
}
// ], line 116
ket = cursor;
// <-, line 116
- slice_from("Y");
+ slice_from((CharSequence) "Y");
// set Y_found, line 116
B_Y_found = true;
} while (false);
@@ -664,7 +666,7 @@
// [, line 117
bra = cursor;
// literal, line 117
- if (!(eq_s(1, "y")))
+ if (!(eq_s(1,(CharSequence) "y")))
{
break lab5;
}
@@ -681,7 +683,7 @@
cursor++;
}
// <-, line 117
- slice_from("Y");
+ slice_from((CharSequence) "Y");
// set Y_found, line 117
B_Y_found = true;
continue replab2;
@@ -874,7 +876,7 @@
// [, line 137
bra = cursor;
// literal, line 137
- if (!(eq_s(1, "Y")))
+ if (!(eq_s(1, (CharSequence) "Y")))
{
break lab27;
}
@@ -891,7 +893,7 @@
cursor++;
}
// <-, line 137
- slice_from("y");
+ slice_from((CharSequence) "y");
continue replab24;
} while (false);
cursor = v_19;
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/PortugueseStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/PortugueseStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/PortugueseStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class PortugueseStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -266,12 +267,12 @@
case 1:
// (, line 38
// <-, line 38
- slice_from("a~");
+ slice_from((CharSequence) "a~");
break;
case 2:
// (, line 39
// <-, line 39
- slice_from("o~");
+ slice_from((CharSequence) "o~");
break;
case 3:
// (, line 40
@@ -519,12 +520,12 @@
case 1:
// (, line 64
// <-, line 64
- slice_from("\u00E3");
+ slice_from((CharSequence) "\u00E3");
break;
case 2:
// (, line 65
// <-, line 65
- slice_from("\u00F5");
+ slice_from((CharSequence) "\u00F5");
break;
case 3:
// (, line 66
@@ -606,7 +607,7 @@
return false;
}
// <-, line 98
- slice_from("log");
+ slice_from((CharSequence) "log");
break;
case 3:
// (, line 101
@@ -616,7 +617,7 @@
return false;
}
// <-, line 102
- slice_from("u");
+ slice_from((CharSequence) "u");
break;
case 4:
// (, line 105
@@ -626,7 +627,7 @@
return false;
}
// <-, line 106
- slice_from("ente");
+ slice_from((CharSequence) "ente");
break;
case 5:
// (, line 109
@@ -669,7 +670,7 @@
// [, line 113
ket = cursor;
// literal, line 113
- if (!(eq_s_b(2, "at")))
+ if (!(eq_s_b(2, (CharSequence) "at")))
{
cursor = limit - v_1;
break lab0;
@@ -788,7 +789,7 @@
// [, line 148
ket = cursor;
// literal, line 148
- if (!(eq_s_b(2, "at")))
+ if (!(eq_s_b(2, (CharSequence) "at")))
{
cursor = limit - v_4;
break lab3;
@@ -813,12 +814,12 @@
return false;
}
// literal, line 153
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
return false;
}
// <-, line 154
- slice_from("ir");
+ slice_from((CharSequence) "ir");
break;
}
return true;
@@ -931,7 +932,7 @@
lab1: do {
// (, line 194
// literal, line 194
- if (!(eq_s_b(1, "u")))
+ if (!(eq_s_b(1, (CharSequence) "u")))
{
break lab1;
}
@@ -940,7 +941,7 @@
// test, line 194
v_2 = limit - cursor;
// literal, line 194
- if (!(eq_s_b(1, "g")))
+ if (!(eq_s_b(1, (CharSequence) "g")))
{
break lab1;
}
@@ -950,7 +951,7 @@
cursor = limit - v_1;
// (, line 195
// literal, line 195
- if (!(eq_s_b(1, "i")))
+ if (!(eq_s_b(1, (CharSequence) "i")))
{
return false;
}
@@ -959,7 +960,7 @@
// test, line 195
v_3 = limit - cursor;
// literal, line 195
- if (!(eq_s_b(1, "c")))
+ if (!(eq_s_b(1, (CharSequence) "c")))
{
return false;
}
@@ -976,12 +977,13 @@
case 2:
// (, line 196
// <-, line 196
- slice_from("c");
+ slice_from((CharSequence) "c");
break;
}
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
@@ -1055,7 +1057,7 @@
// [, line 207
ket = cursor;
// literal, line 207
- if (!(eq_s_b(1, "i")))
+ if (!(eq_s_b(1, (CharSequence) "i")))
{
break lab7;
}
@@ -1064,7 +1066,7 @@
// test, line 207
v_8 = limit - cursor;
// literal, line 207
- if (!(eq_s_b(1, "c")))
+ if (!(eq_s_b(1, (CharSequence) "c")))
{
break lab7;
}
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/RomanianStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/RomanianStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/RomanianStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class RomanianStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -296,7 +297,7 @@
lab5: do {
// (, line 33
// literal, line 33
- if (!(eq_s(1, "u")))
+ if (!(eq_s(1,(CharSequence) "u")))
{
break lab5;
}
@@ -307,13 +308,13 @@
break lab5;
}
// <-, line 33
- slice_from("U");
+ slice_from((CharSequence) "U");
break lab4;
} while (false);
cursor = v_3;
// (, line 34
// literal, line 34
- if (!(eq_s(1, "i")))
+ if (!(eq_s(1,(CharSequence) "i")))
{
break lab3;
}
@@ -324,7 +325,7 @@
break lab3;
}
// <-, line 34
- slice_from("I");
+ slice_from((CharSequence) "I");
} while (false);
cursor = v_2;
break golab2;
@@ -572,12 +573,12 @@
case 1:
// (, line 59
// <-, line 59
- slice_from("i");
+ slice_from((CharSequence) "i");
break;
case 2:
// (, line 60
// <-, line 60
- slice_from("u");
+ slice_from((CharSequence) "u");
break;
case 3:
// (, line 61
@@ -651,17 +652,17 @@
case 2:
// (, line 77
// <-, line 77
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 3:
// (, line 79
// <-, line 79
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 4:
// (, line 81
// <-, line 81
- slice_from("i");
+ slice_from((CharSequence) "i");
break;
case 5:
// (, line 83
@@ -670,7 +671,7 @@
v_1 = limit - cursor;
lab0: do {
// literal, line 83
- if (!(eq_s_b(2, "ab")))
+ if (!(eq_s_b(2,(CharSequence) "ab")))
{
break lab0;
}
@@ -679,17 +680,17 @@
cursor = limit - v_1;
}
// <-, line 83
- slice_from("i");
+ slice_from((CharSequence) "i");
break;
case 6:
// (, line 85
// <-, line 85
- slice_from("at");
+ slice_from((CharSequence) "at");
break;
case 7:
// (, line 87
// <-, line 87
- slice_from("a\u0163i");
+ slice_from((CharSequence) "a\u0163i");
break;
}
return true;
@@ -723,32 +724,32 @@
case 1:
// (, line 100
// <-, line 101
- slice_from("abil");
+ slice_from((CharSequence) "abil");
break;
case 2:
// (, line 103
// <-, line 104
- slice_from("ibil");
+ slice_from((CharSequence) "ibil");
break;
case 3:
// (, line 106
// <-, line 107
- slice_from("iv");
+ slice_from((CharSequence) "iv");
break;
case 4:
// (, line 112
// <-, line 113
- slice_from("ic");
+ slice_from((CharSequence) "ic");
break;
case 5:
// (, line 117
// <-, line 118
- slice_from("at");
+ slice_from((CharSequence) "at");
break;
case 6:
// (, line 121
// <-, line 122
- slice_from("it");
+ slice_from((CharSequence) "it");
break;
}
// set standard_suffix_removed, line 125
@@ -805,19 +806,19 @@
case 2:
// (, line 151
// literal, line 152
- if (!(eq_s_b(1, "\u0163")))
+ if (!(eq_s_b(1,(CharSequence) "\u0163")))
{
return false;
}
// ], line 152
bra = cursor;
// <-, line 152
- slice_from("t");
+ slice_from((CharSequence) "t");
break;
case 3:
// (, line 155
// <-, line 156
- slice_from("ist");
+ slice_from((CharSequence) "ist");
break;
}
// set standard_suffix_removed, line 160
@@ -871,7 +872,7 @@
} while (false);
cursor = limit - v_3;
// literal, line 200
- if (!(eq_s_b(1, "u")))
+ if (!(eq_s_b(1,(CharSequence) "u")))
{
limit_backward = v_2;
return false;
@@ -920,6 +921,7 @@
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/RussianStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/RussianStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/RussianStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class RussianStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -291,7 +292,7 @@
v_1 = limit - cursor;
lab1: do {
// literal, line 76
- if (!(eq_s_b(1, "\u0430")))
+ if (!(eq_s_b(1,(CharSequence) "\u0430")))
{
break lab1;
}
@@ -299,7 +300,7 @@
} while (false);
cursor = limit - v_1;
// literal, line 76
- if (!(eq_s_b(1, "\u044F")))
+ if (!(eq_s_b(1,(CharSequence) "\u044F")))
{
return false;
}
@@ -377,7 +378,7 @@
v_2 = limit - cursor;
lab2: do {
// literal, line 115
- if (!(eq_s_b(1, "\u0430")))
+ if (!(eq_s_b(1,(CharSequence) "\u0430")))
{
break lab2;
}
@@ -385,7 +386,7 @@
} while (false);
cursor = limit - v_2;
// literal, line 115
- if (!(eq_s_b(1, "\u044F")))
+ if (!(eq_s_b(1,(CharSequence) "\u044F")))
{
cursor = limit - v_1;
break lab0;
@@ -453,7 +454,7 @@
v_1 = limit - cursor;
lab1: do {
// literal, line 143
- if (!(eq_s_b(1, "\u0430")))
+ if (!(eq_s_b(1,(CharSequence) "\u0430")))
{
break lab1;
}
@@ -461,7 +462,7 @@
} while (false);
cursor = limit - v_1;
// literal, line 143
- if (!(eq_s_b(1, "\u044F")))
+ if (!(eq_s_b(1,(CharSequence) "\u044F")))
{
return false;
}
@@ -556,14 +557,14 @@
// [, line 189
ket = cursor;
// literal, line 189
- if (!(eq_s_b(1, "\u043D")))
+ if (!(eq_s_b(1,(CharSequence) "\u043D")))
{
return false;
}
// ], line 189
bra = cursor;
// literal, line 189
- if (!(eq_s_b(1, "\u043D")))
+ if (!(eq_s_b(1,(CharSequence) "\u043D")))
{
return false;
}
@@ -573,7 +574,7 @@
case 2:
// (, line 192
// literal, line 192
- if (!(eq_s_b(1, "\u043D")))
+ if (!(eq_s_b(1,(CharSequence) "\u043D")))
{
return false;
}
@@ -589,6 +590,7 @@
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
@@ -689,7 +691,7 @@
// [, line 209
ket = cursor;
// literal, line 209
- if (!(eq_s_b(1, "\u0438")))
+ if (!(eq_s_b(1,(CharSequence) "\u0438")))
{
cursor = limit - v_8;
break lab8;
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/SpanishStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/SpanishStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/SpanishStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class SpanishStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -482,27 +483,27 @@
case 1:
// (, line 51
// <-, line 51
- slice_from("a");
+ slice_from((CharSequence) "a");
break;
case 2:
// (, line 52
// <-, line 52
- slice_from("e");
+ slice_from((CharSequence) "e");
break;
case 3:
// (, line 53
// <-, line 53
- slice_from("i");
+ slice_from((CharSequence) "i");
break;
case 4:
// (, line 54
// <-, line 54
- slice_from("o");
+ slice_from((CharSequence) "o");
break;
case 5:
// (, line 55
// <-, line 55
- slice_from("u");
+ slice_from((CharSequence) "u");
break;
case 6:
// (, line 57
@@ -577,35 +578,35 @@
// ], line 73
bra = cursor;
// <-, line 73
- slice_from("iendo");
+ slice_from((CharSequence) "iendo");
break;
case 2:
// (, line 74
// ], line 74
bra = cursor;
// <-, line 74
- slice_from("ando");
+ slice_from((CharSequence) "ando");
break;
case 3:
// (, line 75
// ], line 75
bra = cursor;
// <-, line 75
- slice_from("ar");
+ slice_from((CharSequence) "ar");
break;
case 4:
// (, line 76
// ], line 76
bra = cursor;
// <-, line 76
- slice_from("er");
+ slice_from((CharSequence) "er");
break;
case 5:
// (, line 77
// ], line 77
bra = cursor;
// <-, line 77
- slice_from("ir");
+ slice_from((CharSequence) "ir");
break;
case 6:
// (, line 81
@@ -615,7 +616,7 @@
case 7:
// (, line 82
// literal, line 82
- if (!(eq_s_b(1, "u")))
+ if (!(eq_s_b(1,(CharSequence) "u")))
{
return false;
}
@@ -673,7 +674,7 @@
// [, line 106
ket = cursor;
// literal, line 106
- if (!(eq_s_b(2, "ic")))
+ if (!(eq_s_b(2,(CharSequence) "ic")))
{
cursor = limit - v_1;
break lab0;
@@ -698,7 +699,7 @@
return false;
}
// <-, line 111
- slice_from("log");
+ slice_from((CharSequence) "log");
break;
case 4:
// (, line 114
@@ -708,7 +709,7 @@
return false;
}
// <-, line 115
- slice_from("u");
+ slice_from((CharSequence) "u");
break;
case 5:
// (, line 118
@@ -718,7 +719,7 @@
return false;
}
// <-, line 119
- slice_from("ente");
+ slice_from((CharSequence) "ente");
break;
case 6:
// (, line 122
@@ -761,7 +762,7 @@
// [, line 126
ket = cursor;
// literal, line 126
- if (!(eq_s_b(2, "at")))
+ if (!(eq_s_b(2,(CharSequence) "at")))
{
cursor = limit - v_2;
break lab1;
@@ -880,7 +881,7 @@
// [, line 161
ket = cursor;
// literal, line 161
- if (!(eq_s_b(2, "at")))
+ if (!(eq_s_b(2,(CharSequence) "at")))
{
cursor = limit - v_5;
break lab4;
@@ -936,7 +937,7 @@
case 1:
// (, line 171
// literal, line 171
- if (!(eq_s_b(1, "u")))
+ if (!(eq_s_b(1,(CharSequence) "u")))
{
return false;
}
@@ -988,7 +989,7 @@
lab0: do {
// (, line 179
// literal, line 179
- if (!(eq_s_b(1, "u")))
+ if (!(eq_s_b(1,(CharSequence) "u")))
{
cursor = limit - v_3;
break lab0;
@@ -996,7 +997,7 @@
// test, line 179
v_4 = limit - cursor;
// literal, line 179
- if (!(eq_s_b(1, "g")))
+ if (!(eq_s_b(1,(CharSequence) "g")))
{
cursor = limit - v_3;
break lab0;
@@ -1061,7 +1062,7 @@
// [, line 210
ket = cursor;
// literal, line 210
- if (!(eq_s_b(1, "u")))
+ if (!(eq_s_b(1,(CharSequence) "u")))
{
cursor = limit - v_1;
break lab0;
@@ -1071,7 +1072,7 @@
// test, line 210
v_2 = limit - cursor;
// literal, line 210
- if (!(eq_s_b(1, "g")))
+ if (!(eq_s_b(1,(CharSequence) "g")))
{
cursor = limit - v_1;
break lab0;
@@ -1091,6 +1092,7 @@
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/SwedishStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/SwedishStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/SwedishStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class SwedishStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -281,18 +282,19 @@
case 2:
// (, line 58
// <-, line 58
- slice_from("l\u00F6s");
+ slice_from((CharSequence) "l\u00F6s");
break;
case 3:
// (, line 59
// <-, line 59
- slice_from("full");
+ slice_from((CharSequence) "full");
break;
}
limit_backward = v_2;
return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
Index: contrib/analyzers/common/src/java/org/tartarus/snowball/ext/TurkishStemmer.java
===================================================================
--- contrib/analyzers/common/src/java/org/tartarus/snowball/ext/TurkishStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/java/org/tartarus/snowball/ext/TurkishStemmer.java (working copy)
@@ -7,6 +7,7 @@
/**
* Generated class implementing code defined by a snowball script.
*/
+@SuppressWarnings("unused")
public class TurkishStemmer extends SnowballProgram {
private Among a_0[] = {
@@ -269,7 +270,7 @@
lab3: do {
// (, line 116
// literal, line 116
- if (!(eq_s_b(1, "a")))
+ if (!(eq_s_b(1, (CharSequence) "a")))
{
break lab3;
}
@@ -298,7 +299,7 @@
lab6: do {
// (, line 117
// literal, line 117
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
break lab6;
}
@@ -327,7 +328,7 @@
lab9: do {
// (, line 118
// literal, line 118
- if (!(eq_s_b(1, "\u0131")))
+ if (!(eq_s_b(1, (CharSequence) "\u0131")))
{
break lab9;
}
@@ -356,7 +357,7 @@
lab12: do {
// (, line 119
// literal, line 119
- if (!(eq_s_b(1, "i")))
+ if (!(eq_s_b(1, (CharSequence) "i")))
{
break lab12;
}
@@ -385,7 +386,7 @@
lab15: do {
// (, line 120
// literal, line 120
- if (!(eq_s_b(1, "o")))
+ if (!(eq_s_b(1, (CharSequence) "o")))
{
break lab15;
}
@@ -414,7 +415,7 @@
lab18: do {
// (, line 121
// literal, line 121
- if (!(eq_s_b(1, "\u00F6")))
+ if (!(eq_s_b(1, (CharSequence) "\u00F6")))
{
break lab18;
}
@@ -443,7 +444,7 @@
lab21: do {
// (, line 122
// literal, line 122
- if (!(eq_s_b(1, "u")))
+ if (!(eq_s_b(1, (CharSequence) "u")))
{
break lab21;
}
@@ -471,7 +472,7 @@
cursor = limit - v_3;
// (, line 123
// literal, line 123
- if (!(eq_s_b(1, "\u00FC")))
+ if (!(eq_s_b(1, (CharSequence) "\u00FC")))
{
return false;
}
@@ -517,7 +518,7 @@
// test, line 133
v_2 = limit - cursor;
// literal, line 133
- if (!(eq_s_b(1, "n")))
+ if (!(eq_s_b(1, (CharSequence) "n")))
{
break lab1;
}
@@ -549,7 +550,7 @@
// test, line 135
v_5 = limit - cursor;
// literal, line 135
- if (!(eq_s_b(1, "n")))
+ if (!(eq_s_b(1, (CharSequence) "n")))
{
break lab2;
}
@@ -598,7 +599,7 @@
// test, line 144
v_2 = limit - cursor;
// literal, line 144
- if (!(eq_s_b(1, "s")))
+ if (!(eq_s_b(1, (CharSequence) "s")))
{
break lab1;
}
@@ -630,7 +631,7 @@
// test, line 146
v_5 = limit - cursor;
// literal, line 146
- if (!(eq_s_b(1, "s")))
+ if (!(eq_s_b(1, (CharSequence) "s")))
{
break lab2;
}
@@ -679,7 +680,7 @@
// test, line 154
v_2 = limit - cursor;
// literal, line 154
- if (!(eq_s_b(1, "y")))
+ if (!(eq_s_b(1, (CharSequence) "y")))
{
break lab1;
}
@@ -711,7 +712,7 @@
// test, line 156
v_5 = limit - cursor;
// literal, line 156
- if (!(eq_s_b(1, "y")))
+ if (!(eq_s_b(1, (CharSequence) "y")))
{
break lab2;
}
@@ -1043,7 +1044,7 @@
private boolean r_mark_ki() {
// (, line 236
// literal, line 237
- if (!(eq_s_b(2, "ki")))
+ if (!(eq_s_b(2, (CharSequence) "ki")))
{
return false;
}
@@ -1254,7 +1255,7 @@
private boolean r_mark_yken() {
// (, line 307
// literal, line 308
- if (!(eq_s_b(3, "ken")))
+ if (!(eq_s_b(3, (CharSequence) "ken")))
{
return false;
}
@@ -2640,22 +2641,22 @@
case 1:
// (, line 417
// <-, line 417
- slice_from("p");
+ slice_from((CharSequence) "p");
break;
case 2:
// (, line 418
// <-, line 418
- slice_from("\u00E7");
+ slice_from((CharSequence) "\u00E7");
break;
case 3:
// (, line 419
// <-, line 419
- slice_from("t");
+ slice_from((CharSequence) "t");
break;
case 4:
// (, line 420
// <-, line 420
- slice_from("k");
+ slice_from((CharSequence) "k");
break;
}
return true;
@@ -2686,7 +2687,7 @@
v_2 = limit - cursor;
lab1: do {
// literal, line 431
- if (!(eq_s_b(1, "d")))
+ if (!(eq_s_b(1, (CharSequence) "d")))
{
break lab1;
}
@@ -2694,7 +2695,7 @@
} while (false);
cursor = limit - v_2;
// literal, line 431
- if (!(eq_s_b(1, "g")))
+ if (!(eq_s_b(1, (CharSequence) "g")))
{
return false;
}
@@ -2733,7 +2734,7 @@
v_6 = limit - cursor;
lab7: do {
// literal, line 432
- if (!(eq_s_b(1, "a")))
+ if (!(eq_s_b(1, (CharSequence) "a")))
{
break lab7;
}
@@ -2741,7 +2742,7 @@
} while (false);
cursor = limit - v_6;
// literal, line 432
- if (!(eq_s_b(1, "\u0131")))
+ if (!(eq_s_b(1, (CharSequence) "\u0131")))
{
break lab3;
}
@@ -2750,7 +2751,7 @@
// <+, line 432
{
int c = cursor;
- insert(cursor, cursor, "\u0131");
+ insert(cursor, cursor, (CharSequence) "\u0131");
cursor = c;
}
break lab2;
@@ -2786,7 +2787,7 @@
v_9 = limit - cursor;
lab12: do {
// literal, line 434
- if (!(eq_s_b(1, "e")))
+ if (!(eq_s_b(1, (CharSequence) "e")))
{
break lab12;
}
@@ -2794,7 +2795,7 @@
} while (false);
cursor = limit - v_9;
// literal, line 434
- if (!(eq_s_b(1, "i")))
+ if (!(eq_s_b(1, (CharSequence) "i")))
{
break lab8;
}
@@ -2803,7 +2804,7 @@
// <+, line 434
{
int c = cursor;
- insert(cursor, cursor, "i");
+ insert(cursor, cursor, (CharSequence) "i");
cursor = c;
}
break lab2;
@@ -2839,7 +2840,7 @@
v_12 = limit - cursor;
lab17: do {
// literal, line 436
- if (!(eq_s_b(1, "o")))
+ if (!(eq_s_b(1, (CharSequence) "o")))
{
break lab17;
}
@@ -2847,7 +2848,7 @@
} while (false);
cursor = limit - v_12;
// literal, line 436
- if (!(eq_s_b(1, "u")))
+ if (!(eq_s_b(1, (CharSequence) "u")))
{
break lab13;
}
@@ -2856,7 +2857,7 @@
// <+, line 436
{
int c = cursor;
- insert(cursor, cursor, "u");
+ insert(cursor, cursor, (CharSequence) "u");
cursor = c;
}
break lab2;
@@ -2891,7 +2892,7 @@
v_15 = limit - cursor;
lab21: do {
// literal, line 438
- if (!(eq_s_b(1, "\u00F6")))
+ if (!(eq_s_b(1, (CharSequence) "\u00F6")))
{
break lab21;
}
@@ -2899,7 +2900,7 @@
} while (false);
cursor = limit - v_15;
// literal, line 438
- if (!(eq_s_b(1, "\u00FC")))
+ if (!(eq_s_b(1, (CharSequence) "\u00FC")))
{
return false;
}
@@ -2908,7 +2909,7 @@
// <+, line 438
{
int c = cursor;
- insert(cursor, cursor, "\u00FC");
+ insert(cursor, cursor, (CharSequence) "\u00FC");
cursor = c;
}
} while (false);
@@ -2979,7 +2980,7 @@
{
lab3: do {
// literal, line 450
- if (!(eq_s(2, "ad")))
+ if (!(eq_s(2, (CharSequence) "ad")))
{
break lab3;
}
@@ -3010,7 +3011,7 @@
{
lab5: do {
// literal, line 452
- if (!(eq_s(5, "soyad")))
+ if (!(eq_s(5, (CharSequence) "soyad")))
{
break lab5;
}
@@ -3079,6 +3080,7 @@
cursor = limit_backward; return true;
}
+ @Override
public boolean stem() {
int v_1;
int v_2;
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicAnalyzer.java (working copy)
@@ -24,7 +24,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
-import org.apache.lucene.util.Version;
/**
* Test the Arabic Analyzer
@@ -35,14 +34,14 @@
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new ArabicAnalyzer(Version.LUCENE_CURRENT);
+ new ArabicAnalyzer(TEST_VERSION_CURRENT);
}
/**
* Some simple tests showing some features of the analyzer, how some regular forms will conflate
*/
public void testBasicFeatures() throws Exception {
- ArabicAnalyzer a = new ArabicAnalyzer(Version.LUCENE_CURRENT);
+ ArabicAnalyzer a = new ArabicAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "كبير", new String[] { "كبير" });
assertAnalyzesTo(a, "كبيرة", new String[] { "كبير" }); // feminine marker
@@ -63,7 +62,7 @@
* Simple tests to show things are getting reset correctly, etc.
*/
public void testReusableTokenStream() throws Exception {
- ArabicAnalyzer a = new ArabicAnalyzer(Version.LUCENE_CURRENT);
+ ArabicAnalyzer a = new ArabicAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(a, "كبير", new String[] { "كبير" });
assertAnalyzesToReuse(a, "كبيرة", new String[] { "كبير" }); // feminine marker
}
@@ -72,7 +71,7 @@
* Non-arabic text gets treated in a similar way as SimpleAnalyzer.
*/
public void testEnglishInput() throws Exception {
- assertAnalyzesTo(new ArabicAnalyzer(Version.LUCENE_CURRENT), "English text.", new String[] {
+ assertAnalyzesTo(new ArabicAnalyzer(TEST_VERSION_CURRENT), "English text.", new String[] {
"english", "text" });
}
@@ -82,7 +81,7 @@
public void testCustomStopwords() throws Exception {
Set set = new HashSet();
Collections.addAll(set, "the", "and", "a");
- ArabicAnalyzer a = new ArabicAnalyzer(Version.LUCENE_CURRENT, set);
+ ArabicAnalyzer a = new ArabicAnalyzer(TEST_VERSION_CURRENT, set);
assertAnalyzesTo(a, "The quick brown fox.", new String[] { "quick",
"brown", "fox" });
}
@@ -90,12 +89,12 @@
public void testWithStemExclusionSet() throws IOException {
Set set = new HashSet();
set.add("ساهدهات");
- ArabicAnalyzer a = new ArabicAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ ArabicAnalyzer a = new ArabicAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(a, "كبيرة the quick ساهدهات", new String[] { "كبير","the", "quick", "ساهدهات" });
assertAnalyzesToReuse(a, "كبيرة the quick ساهدهات", new String[] { "كبير","the", "quick", "ساهدهات" });
- a = new ArabicAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, CharArraySet.EMPTY_SET);
+ a = new ArabicAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, CharArraySet.EMPTY_SET);
assertAnalyzesTo(a, "كبيرة the quick ساهدهات", new String[] { "كبير","the", "quick", "ساهد" });
assertAnalyzesToReuse(a, "كبيرة the quick ساهدهات", new String[] { "كبير","the", "quick", "ساهد" });
}
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicNormalizationFilter.java (working copy)
@@ -21,11 +21,9 @@
import java.io.StringReader;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
/**
* Test the Arabic Normalization Filter
- *
*/
public class TestArabicNormalizationFilter extends BaseTokenStreamTestCase {
@@ -86,7 +84,7 @@
}
private void check(final String input, final String expected) throws IOException {
- ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(Version.LUCENE_CURRENT, new StringReader(input));
+ ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
ArabicNormalizationFilter filter = new ArabicNormalizationFilter(tokenStream);
assertTokenStreamContents(filter, new String[]{expected});
}
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/ar/TestArabicStemFilter.java (working copy)
@@ -23,7 +23,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
-import org.apache.lucene.util.Version;
/**
* Test the Arabic Normalization Filter
@@ -116,16 +115,16 @@
}
public void testWithKeywordAttribute() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("ساهدهات");
- ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(Version.LUCENE_CURRENT, new StringReader("ساهدهات"));
+ ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(TEST_VERSION_CURRENT, new StringReader("ساهدهات"));
ArabicStemFilter filter = new ArabicStemFilter(new KeywordMarkerTokenFilter(tokenStream, set));
assertTokenStreamContents(filter, new String[]{"ساهدهات"});
}
private void check(final String input, final String expected) throws IOException {
- ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(Version.LUCENE_CURRENT, new StringReader(input));
+ ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(TEST_VERSION_CURRENT, new StringReader(input));
ArabicStemFilter filter = new ArabicStemFilter(tokenStream);
assertTokenStreamContents(filter, new String[]{expected});
}
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianAnalyzer.java (working copy)
@@ -34,23 +34,23 @@
* This test fails with NPE when the stopwords file is missing in classpath
*/
public void testResourcesAvailable() {
- new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ new BulgarianAnalyzer(TEST_VERSION_CURRENT);
}
public void testStopwords() throws IOException {
- Analyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "Как се казваш?", new String[] {"казваш"});
}
public void testCustomStopwords() throws IOException {
- Analyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT, Collections
+ Analyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT, Collections
.emptySet());
assertAnalyzesTo(a, "Как се казваш?",
new String[] {"как", "се", "казваш"});
}
public void testReusableTokenStream() throws IOException {
- Analyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(a, "документи", new String[] {"документ"});
assertAnalyzesToReuse(a, "документ", new String[] {"документ"});
}
@@ -59,7 +59,7 @@
* Test some examples from the paper
*/
public void testBasicExamples() throws IOException {
- Analyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "енергийни кризи", new String[] {"енергийн", "криз"});
assertAnalyzesTo(a, "Атомната енергия", new String[] {"атомн", "енерг"});
@@ -72,7 +72,7 @@
public void testWithStemExclusionSet() throws IOException {
CharArraySet set = new CharArraySet(Version.LUCENE_31, 1, true);
set.add("строеве");
- Analyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ Analyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(a, "строевете строеве", new String[] { "строй", "строеве" });
}
}
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemmer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/bg/TestBulgarianStemmer.java (working copy)
@@ -35,7 +35,7 @@
* common (and some rare) plural pattern is listed.
*/
public void testMasculineNouns() throws IOException {
- BulgarianAnalyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ BulgarianAnalyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
// -и pattern
assertAnalyzesTo(a, "град", new String[] {"град"});
@@ -101,7 +101,7 @@
* Test showing how feminine noun forms conflate
*/
public void testFeminineNouns() throws IOException {
- BulgarianAnalyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ BulgarianAnalyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "вест", new String[] {"вест"});
assertAnalyzesTo(a, "вестта", new String[] {"вест"});
@@ -114,7 +114,7 @@
* plural pattern is listed
*/
public void testNeuterNouns() throws IOException {
- BulgarianAnalyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ BulgarianAnalyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
// -а pattern
assertAnalyzesTo(a, "дърво", new String[] {"дърв"});
@@ -142,7 +142,7 @@
* Test showing how adjectival forms conflate
*/
public void testAdjectives() throws IOException {
- BulgarianAnalyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ BulgarianAnalyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "красив", new String[] {"красив"});
assertAnalyzesTo(a, "красивия", new String[] {"красив"});
assertAnalyzesTo(a, "красивият", new String[] {"красив"});
@@ -158,7 +158,7 @@
* Test some exceptional rules, implemented as rewrites.
*/
public void testExceptions() throws IOException {
- BulgarianAnalyzer a = new BulgarianAnalyzer(Version.LUCENE_CURRENT);
+ BulgarianAnalyzer a = new BulgarianAnalyzer(TEST_VERSION_CURRENT);
// ци -> к
assertAnalyzesTo(a, "собственик", new String[] {"собственик"});
@@ -215,7 +215,7 @@
public void testWithKeywordAttribute() throws IOException {
CharArraySet set = new CharArraySet(Version.LUCENE_31, 1, true);
set.add("строеве");
- WhitespaceTokenizer tokenStream = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ WhitespaceTokenizer tokenStream = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader("строевете строеве"));
BulgarianStemFilter filter = new BulgarianStemFilter(
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/br/TestBrazilianStemmer.java (working copy)
@@ -25,7 +25,6 @@
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.LowerCaseTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test the Brazilian Stem Filter, which only modifies the term text.
@@ -128,7 +127,7 @@
}
public void testReusableTokenStream() throws Exception {
- Analyzer a = new BrazilianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new BrazilianAnalyzer(TEST_VERSION_CURRENT);
checkReuse(a, "boa", "boa");
checkReuse(a, "boainain", "boainain");
checkReuse(a, "boas", "boas");
@@ -136,35 +135,35 @@
}
public void testStemExclusionTable() throws Exception {
- BrazilianAnalyzer a = new BrazilianAnalyzer(Version.LUCENE_CURRENT);
+ BrazilianAnalyzer a = new BrazilianAnalyzer(TEST_VERSION_CURRENT);
a.setStemExclusionTable(new String[] { "quintessência" });
checkReuse(a, "quintessência", "quintessência"); // excluded words will be completely unchanged.
}
public void testStemExclusionTableBWCompat() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("Brasília");
BrazilianStemFilter filter = new BrazilianStemFilter(
- new LowerCaseTokenizer(Version.LUCENE_CURRENT, new StringReader("Brasília Brasilia")), set);
+ new LowerCaseTokenizer(TEST_VERSION_CURRENT, new StringReader("Brasília Brasilia")), set);
assertTokenStreamContents(filter, new String[] { "brasília", "brasil" });
}
public void testWithKeywordAttribute() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("Brasília");
BrazilianStemFilter filter = new BrazilianStemFilter(
- new KeywordMarkerTokenFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, new StringReader(
"Brasília Brasilia")), set));
assertTokenStreamContents(filter, new String[] { "brasília", "brasil" });
}
public void testWithKeywordAttributeAndExclusionTable() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("Brasília");
- CharArraySet set1 = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set1 = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set1.add("Brasilia");
BrazilianStemFilter filter = new BrazilianStemFilter(
- new KeywordMarkerTokenFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, new StringReader(
"Brasília Brasilia")), set), set1);
assertTokenStreamContents(filter, new String[] { "brasília", "brasilia" });
}
@@ -174,14 +173,14 @@
* when using reusable token streams.
*/
public void testExclusionTableReuse() throws Exception {
- BrazilianAnalyzer a = new BrazilianAnalyzer(Version.LUCENE_CURRENT);
+ BrazilianAnalyzer a = new BrazilianAnalyzer(TEST_VERSION_CURRENT);
checkReuse(a, "quintessência", "quintessente");
a.setStemExclusionTable(new String[] { "quintessência" });
checkReuse(a, "quintessência", "quintessência");
}
private void check(final String input, final String expected) throws Exception {
- checkOneTerm(new BrazilianAnalyzer(Version.LUCENE_CURRENT), input, expected);
+ checkOneTerm(new BrazilianAnalyzer(TEST_VERSION_CURRENT), input, expected);
}
private void checkReuse(Analyzer a, String input, String expected) throws Exception {
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/cjk/TestCJKTokenizer.java (working copy)
@@ -21,7 +21,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.util.Version;
public class TestCJKTokenizer extends BaseTokenStreamTestCase {
@@ -42,7 +41,7 @@
}
public void checkCJKToken(final String str, final TestToken[] out_tokens) throws IOException {
- Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new CJKAnalyzer(TEST_VERSION_CURRENT);
String terms[] = new String[out_tokens.length];
int startOffsets[] = new int[out_tokens.length];
int endOffsets[] = new int[out_tokens.length];
@@ -57,7 +56,7 @@
}
public void checkCJKTokenReusable(final Analyzer a, final String str, final TestToken[] out_tokens) throws IOException {
- Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new CJKAnalyzer(TEST_VERSION_CURRENT);
String terms[] = new String[out_tokens.length];
int startOffsets[] = new int[out_tokens.length];
int endOffsets[] = new int[out_tokens.length];
@@ -213,13 +212,13 @@
}
public void testTokenStream() throws Exception {
- Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new CJKAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(analyzer, "\u4e00\u4e01\u4e02",
new String[] { "\u4e00\u4e01", "\u4e01\u4e02"});
}
public void testReusableTokenStream() throws Exception {
- Analyzer analyzer = new CJKAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new CJKAnalyzer(TEST_VERSION_CURRENT);
String str = "\u3042\u3044\u3046\u3048\u304aabc\u304b\u304d\u304f\u3051\u3053";
TestToken[] out_tokens = {
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/compound/TestCompoundWordTokenFilter.java (working copy)
@@ -28,17 +28,11 @@
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.compound.hyphenation.HyphenationTree;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.util.Version;
public class TestCompoundWordTokenFilter extends BaseTokenStreamTestCase {
static final File dataDir = new File(System.getProperty("dataDir", "./bin"));
static final File testFile = new File(dataDir, "org/apache/lucene/analysis/compound/da_UTF8.xml");
- @Override
- protected void setUp() throws Exception {
- super.setUp();
- }
-
public void testHyphenationCompoundWordsDA() throws Exception {
String[] dict = { "læse", "hest" };
@@ -47,8 +41,8 @@
HyphenationTree hyphenator = HyphenationCompoundWordTokenFilter
.getHyphenationTree(reader);
- HyphenationCompoundWordTokenFilter tf = new HyphenationCompoundWordTokenFilter(Version.LUCENE_CURRENT,
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ HyphenationCompoundWordTokenFilter tf = new HyphenationCompoundWordTokenFilter(TEST_VERSION_CURRENT,
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"min veninde som er lidt af en læsehest")), hyphenator,
dict, CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE,
CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE,
@@ -67,8 +61,8 @@
.getHyphenationTree(reader);
// the word basket will not be added due to the longest match option
- HyphenationCompoundWordTokenFilter tf = new HyphenationCompoundWordTokenFilter(Version.LUCENE_CURRENT,
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ HyphenationCompoundWordTokenFilter tf = new HyphenationCompoundWordTokenFilter(TEST_VERSION_CURRENT,
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"basketballkurv")), hyphenator, dict,
CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE,
CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE, 40, true);
@@ -84,8 +78,8 @@
"Pelar", "Glas", "Ögon", "Fodral", "Bas", "Fiol", "Makare", "Gesäll",
"Sko", "Vind", "Rute", "Torkare", "Blad" };
- DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(Version.LUCENE_CURRENT,
- new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(TEST_VERSION_CURRENT,
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader(
"Bildörr Bilmotor Biltak Slagborr Hammarborr Pelarborr Glasögonfodral Basfiolsfodral Basfiolsfodralmakaregesäll Skomakare Vindrutetorkare Vindrutetorkarblad abba")),
dict);
@@ -113,8 +107,8 @@
"Pelar", "Glas", "Ögon", "Fodral", "Bas", "Fiols", "Makare", "Gesäll",
"Sko", "Vind", "Rute", "Torkare", "Blad", "Fiolsfodral" };
- DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(Version.LUCENE_CURRENT,
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("Basfiolsfodralmakaregesäll")),
+ DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(TEST_VERSION_CURRENT,
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("Basfiolsfodralmakaregesäll")),
dict, CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE,
CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE,
CompoundWordTokenFilterBase.DEFAULT_MAX_SUBWORD_SIZE, true);
@@ -129,9 +123,9 @@
String[] dict = { "Rind", "Fleisch", "Draht", "Schere", "Gesetz",
"Aufgabe", "Überwachung" };
- Tokenizer wsTokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ Tokenizer wsTokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"Rindfleischüberwachungsgesetz"));
- DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(Version.LUCENE_CURRENT,
+ DictionaryCompoundWordTokenFilter tf = new DictionaryCompoundWordTokenFilter(TEST_VERSION_CURRENT,
wsTokenizer, dict,
CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE,
CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE,
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechAnalyzer.java (working copy)
@@ -48,7 +48,7 @@
}
public void testStopWord() throws Exception {
- assertAnalyzesTo(new CzechAnalyzer(Version.LUCENE_CURRENT), "Pokud mluvime o volnem",
+ assertAnalyzesTo(new CzechAnalyzer(TEST_VERSION_CURRENT), "Pokud mluvime o volnem",
new String[] { "mluvim", "voln" });
}
@@ -63,7 +63,7 @@
}
public void testReusableTokenStream() throws Exception {
- Analyzer analyzer = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new CzechAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(analyzer, "Pokud mluvime o volnem", new String[] { "mluvim", "voln" });
assertAnalyzesToReuse(analyzer, "Česká Republika", new String[] { "česk", "republik" });
}
@@ -112,9 +112,9 @@
}
public void testWithStemExclusionSet() throws IOException{
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("hole");
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(cz, "hole desek", new String[] {"hole", "desk"});
}
}
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechStemmer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/cz/TestCzechStemmer.java (working copy)
@@ -24,7 +24,6 @@
import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.KeywordMarkerTokenFilter;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test the Czech Stemmer.
@@ -38,7 +37,7 @@
* Test showing how masculine noun forms conflate
*/
public void testMasculineNouns() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
/* animate ending with a hard consonant */
assertAnalyzesTo(cz, "pán", new String[] { "pán" });
@@ -106,7 +105,7 @@
* Test showing how feminine noun forms conflate
*/
public void testFeminineNouns() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
/* ending with hard consonant */
assertAnalyzesTo(cz, "kost", new String[] { "kost" });
@@ -150,7 +149,7 @@
* Test showing how neuter noun forms conflate
*/
public void testNeuterNouns() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
/* ending with o */
assertAnalyzesTo(cz, "město", new String[] { "měst" });
@@ -193,7 +192,7 @@
* Test showing how adjectival forms conflate
*/
public void testAdjectives() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
/* ending with ý/á/é */
assertAnalyzesTo(cz, "mladý", new String[] { "mlad" });
@@ -221,7 +220,7 @@
* Test some possessive suffixes
*/
public void testPossessive() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(cz, "Karlův", new String[] { "karl" });
assertAnalyzesTo(cz, "jazykový", new String[] { "jazyk" });
}
@@ -230,7 +229,7 @@
* Test some exceptional rules, implemented as rewrites.
*/
public void testExceptions() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
/* rewrite of št -> sk */
assertAnalyzesTo(cz, "český", new String[] { "česk" });
@@ -270,16 +269,16 @@
* Test that very short words are not stemmed.
*/
public void testDontStem() throws IOException {
- CzechAnalyzer cz = new CzechAnalyzer(Version.LUCENE_CURRENT);
+ CzechAnalyzer cz = new CzechAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(cz, "e", new String[] { "e" });
assertAnalyzesTo(cz, "zi", new String[] { "zi" });
}
public void testWithKeywordAttribute() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("hole");
CzechStemFilter filter = new CzechStemFilter(new KeywordMarkerTokenFilter(
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("hole desek")), set));
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("hole desek")), set));
assertTokenStreamContents(filter, new String[] { "hole", "desk" });
}
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/da/TestDanishAnalyzer.java (working copy)
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestDanishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new DanishAnalyzer(Version.LUCENE_CURRENT);
+ new DanishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new DanishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new DanishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "undersøg", "undersøg");
checkOneTermReuse(a, "undersøgelse", "undersøg");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("undersøgelse");
- Analyzer a = new DanishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new DanishAnalyzer(TEST_VERSION_CURRENT,
DanishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "undersøgelse", "undersøgelse");
checkOneTermReuse(a, "undersøg", "undersøg");
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanAnalyzer.java (working copy)
@@ -29,38 +29,38 @@
public class TestGermanAnalyzer extends BaseTokenStreamTestCase {
public void testReusableTokenStream() throws Exception {
- Analyzer a = new GermanAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new GermanAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "Tisch", "tisch");
checkOneTermReuse(a, "Tische", "tisch");
checkOneTermReuse(a, "Tischen", "tisch");
}
public void testExclusionTableBWCompat() throws IOException {
- GermanStemFilter filter = new GermanStemFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT,
+ GermanStemFilter filter = new GermanStemFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT,
new StringReader("Fischen Trinken")));
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("fischen");
filter.setExclusionSet(set);
assertTokenStreamContents(filter, new String[] { "fischen", "trink" });
}
public void testWithKeywordAttribute() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("fischen");
GermanStemFilter filter = new GermanStemFilter(
- new KeywordMarkerTokenFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, new StringReader(
"Fischen Trinken")), set));
assertTokenStreamContents(filter, new String[] { "fischen", "trink" });
}
public void testWithKeywordAttributeAndExclusionTable() throws IOException {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("fischen");
- CharArraySet set1 = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set1 = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set1.add("trinken");
set1.add("fischen");
GermanStemFilter filter = new GermanStemFilter(
- new KeywordMarkerTokenFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ new KeywordMarkerTokenFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, new StringReader(
"Fischen Trinken")), set));
filter.setExclusionSet(set1);
assertTokenStreamContents(filter, new String[] { "fischen", "trinken" });
@@ -71,7 +71,7 @@
* when using reusable token streams.
*/
public void testExclusionTableReuse() throws Exception {
- GermanAnalyzer a = new GermanAnalyzer(Version.LUCENE_CURRENT);
+ GermanAnalyzer a = new GermanAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "tischen", "tisch");
a.setStemExclusionTable(new String[] { "tischen" });
checkOneTermReuse(a, "tischen", "tischen");
@@ -81,7 +81,7 @@
* these only pass with LUCENE_CURRENT, not if you use o.a.l.a.de.GermanStemmer
*/
public void testGermanSpecials() throws Exception {
- GermanAnalyzer a = new GermanAnalyzer(Version.LUCENE_CURRENT);
+ GermanAnalyzer a = new GermanAnalyzer(TEST_VERSION_CURRENT);
// a/o/u + e is equivalent to the umlaut form
checkOneTermReuse(a, "Schaltflächen", "schaltflach");
checkOneTermReuse(a, "Schaltflaechen", "schaltflach");
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/de/TestGermanStemFilter.java (working copy)
@@ -28,7 +28,6 @@
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.util.Version;
/**
* Test the German stemmer. The stemming algorithm is known to work less
@@ -40,7 +39,7 @@
public void testStemming() throws Exception {
Tokenizer tokenizer = new KeywordTokenizer(new StringReader(""));
- TokenFilter filter = new GermanStemFilter(new LowerCaseFilter(Version.LUCENE_CURRENT, tokenizer));
+ TokenFilter filter = new GermanStemFilter(new LowerCaseFilter(TEST_VERSION_CURRENT, tokenizer));
// read test cases from external file:
File dataDir = new File(System.getProperty("dataDir", "./bin"));
File testFile = new File(dataDir, "org/apache/lucene/analysis/de/data.txt");
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/el/GreekAnalyzerTest.java (working copy)
@@ -32,7 +32,7 @@
* @throws Exception in case an error occurs
*/
public void testAnalyzer() throws Exception {
- Analyzer a = new GreekAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new GreekAnalyzer(TEST_VERSION_CURRENT);
// Verify the correct analysis of capitals and small accented letters
assertAnalyzesTo(a, "\u039c\u03af\u03b1 \u03b5\u03be\u03b1\u03b9\u03c1\u03b5\u03c4\u03b9\u03ba\u03ac \u03ba\u03b1\u03bb\u03ae \u03ba\u03b1\u03b9 \u03c0\u03bb\u03bf\u03cd\u03c3\u03b9\u03b1 \u03c3\u03b5\u03b9\u03c1\u03ac \u03c7\u03b1\u03c1\u03b1\u03ba\u03c4\u03ae\u03c1\u03c9\u03bd \u03c4\u03b7\u03c2 \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ae\u03c2 \u03b3\u03bb\u03ce\u03c3\u03c3\u03b1\u03c2",
new String[] { "\u03bc\u03b9\u03b1", "\u03b5\u03be\u03b1\u03b9\u03c1\u03b5\u03c4\u03b9\u03ba\u03b1", "\u03ba\u03b1\u03bb\u03b7", "\u03c0\u03bb\u03bf\u03c5\u03c3\u03b9\u03b1", "\u03c3\u03b5\u03b9\u03c1\u03b1", "\u03c7\u03b1\u03c1\u03b1\u03ba\u03c4\u03b7\u03c1\u03c9\u03bd",
@@ -48,7 +48,7 @@
}
public void testReusableTokenStream() throws Exception {
- Analyzer a = new GreekAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new GreekAnalyzer(TEST_VERSION_CURRENT);
// Verify the correct analysis of capitals and small accented letters
assertAnalyzesToReuse(a, "\u039c\u03af\u03b1 \u03b5\u03be\u03b1\u03b9\u03c1\u03b5\u03c4\u03b9\u03ba\u03ac \u03ba\u03b1\u03bb\u03ae \u03ba\u03b1\u03b9 \u03c0\u03bb\u03bf\u03cd\u03c3\u03b9\u03b1 \u03c3\u03b5\u03b9\u03c1\u03ac \u03c7\u03b1\u03c1\u03b1\u03ba\u03c4\u03ae\u03c1\u03c9\u03bd \u03c4\u03b7\u03c2 \u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ae\u03c2 \u03b3\u03bb\u03ce\u03c3\u03c3\u03b1\u03c2",
new String[] { "\u03bc\u03b9\u03b1", "\u03b5\u03be\u03b1\u03b9\u03c1\u03b5\u03c4\u03b9\u03ba\u03b1", "\u03ba\u03b1\u03bb\u03b7", "\u03c0\u03bb\u03bf\u03c5\u03c3\u03b9\u03b1", "\u03c3\u03b5\u03b9\u03c1\u03b1", "\u03c7\u03b1\u03c1\u03b1\u03ba\u03c4\u03b7\u03c1\u03c9\u03bd",
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/en/TestEnglishAnalyzer.java (working copy)
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestEnglishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new EnglishAnalyzer(Version.LUCENE_CURRENT);
+ new EnglishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new EnglishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new EnglishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "books", "book");
checkOneTermReuse(a, "book", "book");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("books");
- Analyzer a = new EnglishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new EnglishAnalyzer(TEST_VERSION_CURRENT,
EnglishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "books", "books");
checkOneTermReuse(a, "book", "book");
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/es/TestSpanishAnalyzer.java (working copy)
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestSpanishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new SpanishAnalyzer(Version.LUCENE_CURRENT);
+ new SpanishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new SpanishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new SpanishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "chicana", "chican");
checkOneTermReuse(a, "chicano", "chican");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("chicano");
- Analyzer a = new SpanishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new SpanishAnalyzer(TEST_VERSION_CURRENT,
SpanishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "chicana", "chican");
checkOneTermReuse(a, "chicano", "chicano");
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianAnalyzer.java (working copy)
@@ -19,7 +19,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.util.Version;
/**
* Test the Persian Analyzer
@@ -31,7 +30,7 @@
* This test fails with NPE when the stopwords file is missing in classpath
*/
public void testResourcesAvailable() {
- new PersianAnalyzer(Version.LUCENE_CURRENT);
+ new PersianAnalyzer(TEST_VERSION_CURRENT);
}
/**
@@ -42,7 +41,7 @@
* These verb forms are from http://en.wikipedia.org/wiki/Persian_grammar
*/
public void testBehaviorVerbs() throws Exception {
- Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT);
// active present indicative
assertAnalyzesTo(a, "میخورد", new String[] { "خورد" });
// active preterite indicative
@@ -118,7 +117,7 @@
* These verb forms are from http://en.wikipedia.org/wiki/Persian_grammar
*/
public void testBehaviorVerbsDefective() throws Exception {
- Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT);
// active present indicative
assertAnalyzesTo(a, "مي خورد", new String[] { "خورد" });
// active preterite indicative
@@ -189,7 +188,7 @@
* nouns, removing the plural -ha.
*/
public void testBehaviorNouns() throws Exception {
- Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "برگ ها", new String[] { "برگ" });
assertAnalyzesTo(a, "برگها", new String[] { "برگ" });
}
@@ -199,7 +198,7 @@
* (lowercased, etc)
*/
public void testBehaviorNonPersian() throws Exception {
- Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(a, "English test.", new String[] { "english", "test" });
}
@@ -207,7 +206,7 @@
* Basic test ensuring that reusableTokenStream works correctly.
*/
public void testReusableTokenStream() throws Exception {
- Analyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(a, "خورده مي شده بوده باشد", new String[] { "خورده" });
assertAnalyzesToReuse(a, "برگها", new String[] { "برگ" });
}
@@ -216,7 +215,7 @@
* Test that custom stopwords work, and are not case-sensitive.
*/
public void testCustomStopwords() throws Exception {
- PersianAnalyzer a = new PersianAnalyzer(Version.LUCENE_CURRENT, new String[] { "the", "and", "a" });
+ PersianAnalyzer a = new PersianAnalyzer(TEST_VERSION_CURRENT, new String[] { "the", "and", "a" });
assertAnalyzesTo(a, "The quick brown fox.", new String[] { "quick",
"brown", "fox" });
}
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilter.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilter.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/fa/TestPersianNormalizationFilter.java (working copy)
@@ -22,7 +22,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.ar.ArabicLetterTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test the Persian Normalization Filter
@@ -55,7 +54,7 @@
}
private void check(final String input, final String expected) throws IOException {
- ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(Version.LUCENE_CURRENT,
+ ArabicLetterTokenizer tokenStream = new ArabicLetterTokenizer(TEST_VERSION_CURRENT,
new StringReader(input));
PersianNormalizationFilter filter = new PersianNormalizationFilter(
tokenStream);
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/fi/TestFinnishAnalyzer.java (working copy)
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestFinnishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new FinnishAnalyzer(Version.LUCENE_CURRENT);
+ new FinnishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new FinnishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new FinnishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "edeltäjiinsä", "edeltäj");
checkOneTermReuse(a, "edeltäjistään", "edeltäj");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("edeltäjistään");
- Analyzer a = new FinnishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new FinnishAnalyzer(TEST_VERSION_CURRENT,
FinnishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "edeltäjiinsä", "edeltäj");
checkOneTermReuse(a, "edeltäjistään", "edeltäjistään");
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestElision.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestElision.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestElision.java (working copy)
@@ -29,7 +29,6 @@
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.util.Version;
/**
*
@@ -38,19 +37,19 @@
public void testElision() throws Exception {
String test = "Plop, juste pour voir l'embrouille avec O'brian. M'enfin.";
- Tokenizer tokenizer = new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(test));
- Set articles = new HashSet();
+ Tokenizer tokenizer = new StandardTokenizer(TEST_VERSION_CURRENT, new StringReader(test));
+ Set articles = new HashSet();
articles.add("l");
articles.add("M");
- TokenFilter filter = new ElisionFilter(Version.LUCENE_CURRENT, tokenizer, articles);
- List tas = filtre(filter);
+ TokenFilter filter = new ElisionFilter(TEST_VERSION_CURRENT, tokenizer, articles);
+ List tas = filter(filter);
assertEquals("embrouille", tas.get(4));
assertEquals("O'brian", tas.get(6));
assertEquals("enfin", tas.get(7));
}
- private List filtre(TokenFilter filter) throws IOException {
- List tas = new ArrayList();
+ private List filter(TokenFilter filter) throws IOException {
+ List tas = new ArrayList();
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
while (filter.incrementToken()) {
tas.add(termAtt.term());
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/fr/TestFrenchAnalyzer.java (working copy)
@@ -32,7 +32,7 @@
public class TestFrenchAnalyzer extends BaseTokenStreamTestCase {
public void testAnalyzer() throws Exception {
- FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT);
+ FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(fa, "", new String[] {
});
@@ -204,7 +204,7 @@
}
public void testReusableTokenStream() throws Exception {
- FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT);
+ FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT);
// stopwords
assertAnalyzesToReuse(
fa,
@@ -229,27 +229,27 @@
* when using reusable token streams.
*/
public void testExclusionTableReuse() throws Exception {
- FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT);
+ FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(fa, "habitable", new String[] { "habit" });
fa.setStemExclusionTable(new String[] { "habitable" });
assertAnalyzesToReuse(fa, "habitable", new String[] { "habitable" });
}
public void testExclusionTableViaCtor() throws Exception {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("habitable");
- FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT,
+ FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT,
CharArraySet.EMPTY_SET, set);
assertAnalyzesToReuse(fa, "habitable chiste", new String[] { "habitable",
"chist" });
- fa = new FrenchAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ fa = new FrenchAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(fa, "habitable chiste", new String[] { "habitable",
"chist" });
}
public void testElision() throws Exception {
- FrenchAnalyzer fa = new FrenchAnalyzer(Version.LUCENE_CURRENT);
+ FrenchAnalyzer fa = new FrenchAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(fa, "voir l'embrouille", new String[] { "voir", "embrouill" });
}
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiAnalyzer.java (working copy)
@@ -5,7 +5,6 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
@@ -31,11 +30,11 @@
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new HindiAnalyzer(Version.LUCENE_CURRENT);
+ new HindiAnalyzer(TEST_VERSION_CURRENT);
}
public void testBasics() throws Exception {
- Analyzer a = new HindiAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new HindiAnalyzer(TEST_VERSION_CURRENT);
// two ways to write 'hindi' itself.
checkOneTermReuse(a, "हिन्दी", "हिंद");
checkOneTermReuse(a, "हिंदी", "हिंद");
@@ -44,7 +43,7 @@
public void testExclusionSet() throws Exception {
Set exclusionSet = new HashSet();
exclusionSet.add("हिंदी");
- Analyzer a = new HindiAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new HindiAnalyzer(TEST_VERSION_CURRENT,
HindiAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "हिंदी", "हिंदी");
}
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiNormalizer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiNormalizer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiNormalizer.java (working copy)
@@ -24,7 +24,6 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test HindiNormalizer
@@ -60,7 +59,7 @@
check("आईऊॠॡऐऔीूॄॣैौ", "अइउऋऌएओिुृॢेो");
}
private void check(String input, String output) throws IOException {
- Tokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader(input));
TokenFilter tf = new HindiNormalizationFilter(tokenizer);
assertTokenStreamContents(tf, new String[] { output });
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiStemmer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/hi/TestHindiStemmer.java (working copy)
@@ -24,7 +24,6 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test HindiStemmer
@@ -82,7 +81,7 @@
}
private void check(String input, String output) throws IOException {
- Tokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader(input));
TokenFilter tf = new HindiStemFilter(tokenizer);
assertTokenStreamContents(tf, new String[] { output });
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/hu/TestHungarianAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/hu/TestHungarianAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/hu/TestHungarianAnalyzer.java (working copy)
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestHungarianAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new HungarianAnalyzer(Version.LUCENE_CURRENT);
+ new HungarianAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new HungarianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new HungarianAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "babakocsi", "babakocs");
checkOneTermReuse(a, "babakocsijáért", "babakocs");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("babakocsi");
- Analyzer a = new HungarianAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new HungarianAnalyzer(TEST_VERSION_CURRENT,
HungarianAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "babakocsi", "babakocsi");
checkOneTermReuse(a, "babakocsijáért", "babakocs");
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/in/TestIndicNormalizer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/in/TestIndicNormalizer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/in/TestIndicNormalizer.java (working copy)
@@ -24,7 +24,6 @@
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test IndicNormalizer
@@ -45,7 +44,7 @@
}
private void check(String input, String output) throws IOException {
- Tokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ Tokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader(input));
TokenFilter tf = new IndicNormalizationFilter(tokenizer);
assertTokenStreamContents(tf, new String[] { output });
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/in/TestIndicTokenizer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/in/TestIndicTokenizer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/in/TestIndicTokenizer.java (working copy)
@@ -22,7 +22,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.util.Version;
/**
* Test IndicTokenizer
@@ -30,7 +29,7 @@
public class TestIndicTokenizer extends BaseTokenStreamTestCase {
/** Test tokenizing Indic vowels, signs, and punctuation */
public void testBasics() throws IOException {
- TokenStream ts = new IndicTokenizer(Version.LUCENE_CURRENT,
+ TokenStream ts = new IndicTokenizer(TEST_VERSION_CURRENT,
new StringReader("मुझे हिंदी का और अभ्यास करना होगा ।"));
assertTokenStreamContents(ts,
new String[] { "मुझे", "हिंदी", "का", "और", "अभ्यास", "करना", "होगा" });
@@ -38,7 +37,7 @@
/** Test that words with format chars such as ZWJ are kept */
public void testFormat() throws Exception {
- TokenStream ts = new IndicTokenizer(Version.LUCENE_CURRENT,
+ TokenStream ts = new IndicTokenizer(TEST_VERSION_CURRENT,
new StringReader("शार्मा शार्मा"));
assertTokenStreamContents(ts, new String[] { "शार्मा", "शार्मा" });
}
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/it/TestItalianAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/it/TestItalianAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/it/TestItalianAnalyzer.java (working copy)
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestItalianAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new ItalianAnalyzer(Version.LUCENE_CURRENT);
+ new ItalianAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new ItalianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new ItalianAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "abbandonata", "abbandon");
checkOneTermReuse(a, "abbandonati", "abbandon");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("abbandonata");
- Analyzer a = new ItalianAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new ItalianAnalyzer(TEST_VERSION_CURRENT,
ItalianAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "abbandonata", "abbandonata");
checkOneTermReuse(a, "abbandonati", "abbandon");
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/PatternAnalyzerTest.java (working copy)
@@ -24,7 +24,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.util.Version;
/**
* Verifies the behavior of PatternAnalyzer.
@@ -37,13 +36,13 @@
*/
public void testNonWordPattern() throws IOException {
// Split on non-letter pattern, do not lowercase, no stopwords
- PatternAnalyzer a = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.NON_WORD_PATTERN,
+ PatternAnalyzer a = new PatternAnalyzer(TEST_VERSION_CURRENT, PatternAnalyzer.NON_WORD_PATTERN,
false, null);
check(a, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] {
"The", "quick", "brown", "Fox", "the", "abcd", "dc" });
// split on non-letter pattern, lowercase, english stopwords
- PatternAnalyzer b = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.NON_WORD_PATTERN,
+ PatternAnalyzer b = new PatternAnalyzer(TEST_VERSION_CURRENT, PatternAnalyzer.NON_WORD_PATTERN,
true, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
check(b, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] {
"quick", "brown", "fox", "abcd", "dc" });
@@ -55,13 +54,13 @@
*/
public void testWhitespacePattern() throws IOException {
// Split on whitespace patterns, do not lowercase, no stopwords
- PatternAnalyzer a = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
+ PatternAnalyzer a = new PatternAnalyzer(TEST_VERSION_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
false, null);
check(a, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] {
"The", "quick", "brown", "Fox,the", "abcd1234", "(56.78)", "dc." });
// Split on whitespace patterns, lowercase, english stopwords
- PatternAnalyzer b = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
+ PatternAnalyzer b = new PatternAnalyzer(TEST_VERSION_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
true, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
check(b, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] {
"quick", "brown", "fox,the", "abcd1234", "(56.78)", "dc." });
@@ -73,12 +72,12 @@
*/
public void testCustomPattern() throws IOException {
// Split on comma, do not lowercase, no stopwords
- PatternAnalyzer a = new PatternAnalyzer(Version.LUCENE_CURRENT, Pattern.compile(","), false, null);
+ PatternAnalyzer a = new PatternAnalyzer(TEST_VERSION_CURRENT, Pattern.compile(","), false, null);
check(a, "Here,Are,some,Comma,separated,words,", new String[] { "Here",
"Are", "some", "Comma", "separated", "words" });
// split on comma, lowercase, english stopwords
- PatternAnalyzer b = new PatternAnalyzer(Version.LUCENE_CURRENT, Pattern.compile(","), true,
+ PatternAnalyzer b = new PatternAnalyzer(TEST_VERSION_CURRENT, Pattern.compile(","), true,
StopAnalyzer.ENGLISH_STOP_WORDS_SET);
check(b, "Here,Are,some,Comma,separated,words,", new String[] { "here",
"some", "comma", "separated", "words" });
@@ -103,7 +102,7 @@
document.append(largeWord2);
// Split on whitespace patterns, do not lowercase, no stopwords
- PatternAnalyzer a = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
+ PatternAnalyzer a = new PatternAnalyzer(TEST_VERSION_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
false, null);
check(a, document.toString(), new String[] { new String(largeWord),
new String(largeWord2) });
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAndSuffixAwareTokenFilter.java (working copy)
@@ -20,7 +20,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.StringReader;
@@ -31,7 +30,7 @@
PrefixAndSuffixAwareTokenFilter ts = new PrefixAndSuffixAwareTokenFilter(
new SingleTokenTokenStream(createToken("^", 0, 0)),
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("hello world")),
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("hello world")),
new SingleTokenTokenStream(createToken("$", 0, 0)));
assertTokenStreamContents(ts,
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestPrefixAwareTokenFilter.java (working copy)
@@ -20,7 +20,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.StringReader;
@@ -42,7 +41,7 @@
// prefix and suffix using 2x prefix
ts = new PrefixAwareTokenFilter(new SingleTokenTokenStream(createToken("^", 0, 0)),
- new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("hello world")));
+ new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("hello world")));
ts = new PrefixAwareTokenFilter(ts, new SingleTokenTokenStream(createToken("$", 0, 0)));
assertTokenStreamContents(ts,
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/miscellaneous/TestStemmerOverrideFilter.java (working copy)
@@ -10,7 +10,6 @@
import org.apache.lucene.analysis.PorterStemFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
-import org.apache.lucene.util.Version;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
@@ -38,7 +37,7 @@
dictionary.put("booked", "books");
Tokenizer tokenizer = new KeywordTokenizer(new StringReader("booked"));
TokenStream stream = new PorterStemFilter(
- new StemmerOverrideFilter(Version.LUCENE_CURRENT, tokenizer, dictionary));
+ new StemmerOverrideFilter(TEST_VERSION_CURRENT, tokenizer, dictionary));
assertTokenStreamContents(stream, new String[] { "books" });
}
}
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilterTest.java (working copy)
@@ -20,7 +20,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
import java.io.StringReader;
@@ -31,9 +30,9 @@
private TokenStream input;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
- input = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abcde"));
+ input = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
}
public void testInvalidInput() throws Exception {
@@ -92,13 +91,13 @@
}
public void testSmallTokenInStream() throws Exception {
- input = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abc de fgh"));
+ input = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abc de fgh"));
EdgeNGramTokenFilter tokenizer = new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.Side.FRONT, 3, 3);
assertTokenStreamContents(tokenizer, new String[]{"abc","fgh"}, new int[]{0,7}, new int[]{3,10});
}
public void testReset() throws Exception {
- WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abcde"));
+ WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
EdgeNGramTokenFilter filter = new EdgeNGramTokenFilter(tokenizer, EdgeNGramTokenFilter.Side.FRONT, 1, 3);
assertTokenStreamContents(filter, new String[]{"a","ab","abc"}, new int[]{0,0,0}, new int[]{1,2,3});
tokenizer.reset(new StringReader("abcde"));
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/EdgeNGramTokenizerTest.java (working copy)
@@ -29,7 +29,7 @@
private StringReader input;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
input = new StringReader("abcde");
}
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenFilterTest.java (working copy)
@@ -20,7 +20,6 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
import java.io.StringReader;
@@ -31,9 +30,9 @@
private TokenStream input;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
- input = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abcde"));
+ input = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
}
public void testInvalidInput() throws Exception {
@@ -81,13 +80,13 @@
}
public void testSmallTokenInStream() throws Exception {
- input = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abc de fgh"));
+ input = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abc de fgh"));
NGramTokenFilter filter = new NGramTokenFilter(input, 3, 3);
assertTokenStreamContents(filter, new String[]{"abc","fgh"}, new int[]{0,7}, new int[]{3,10});
}
public void testReset() throws Exception {
- WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("abcde"));
+ WhitespaceTokenizer tokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("abcde"));
NGramTokenFilter filter = new NGramTokenFilter(tokenizer, 1, 1);
assertTokenStreamContents(filter, new String[]{"a","b","c","d","e"}, new int[]{0,1,2,3,4}, new int[]{1,2,3,4,5});
tokenizer.reset(new StringReader("abcde"));
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/ngram/NGramTokenizerTest.java (working copy)
@@ -29,7 +29,7 @@
private StringReader input;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
input = new StringReader("abcde");
}
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/nl/TestDutchStemmer.java (working copy)
@@ -127,14 +127,14 @@
}
public void testSnowballCorrectness() throws Exception {
- Analyzer a = new DutchAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "opheffen", "opheff");
checkOneTermReuse(a, "opheffende", "opheff");
checkOneTermReuse(a, "opheffing", "opheff");
}
public void testReusableTokenStream() throws Exception {
- Analyzer a = new DutchAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "lichaamsziek", "lichaamsziek");
checkOneTermReuse(a, "lichamelijk", "licham");
checkOneTermReuse(a, "lichamelijke", "licham");
@@ -146,7 +146,7 @@
* when using reusable token streams.
*/
public void testExclusionTableReuse() throws Exception {
- DutchAnalyzer a = new DutchAnalyzer(Version.LUCENE_CURRENT);
+ DutchAnalyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "lichamelijk", "licham");
a.setStemExclusionTable(new String[] { "lichamelijk" });
checkOneTermReuse(a, "lichamelijk", "lichamelijk");
@@ -157,10 +157,10 @@
public void testExclusionTableViaCtor() throws IOException {
CharArraySet set = new CharArraySet(Version.LUCENE_30, 1, true);
set.add("lichamelijk");
- DutchAnalyzer a = new DutchAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ DutchAnalyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesToReuse(a, "lichamelijk lichamelijke", new String[] { "lichamelijk", "licham" });
- a = new DutchAnalyzer(Version.LUCENE_CURRENT, CharArraySet.EMPTY_SET, set);
+ a = new DutchAnalyzer(TEST_VERSION_CURRENT, CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(a, "lichamelijk lichamelijke", new String[] { "lichamelijk", "licham" });
}
@@ -170,7 +170,7 @@
* when using reusable token streams.
*/
public void testStemDictionaryReuse() throws Exception {
- DutchAnalyzer a = new DutchAnalyzer(Version.LUCENE_CURRENT);
+ DutchAnalyzer a = new DutchAnalyzer(TEST_VERSION_CURRENT);
checkOneTermReuse(a, "lichamelijk", "licham");
a.setStemDictionary(customDictFile);
checkOneTermReuse(a, "lichamelijk", "somethingentirelydifferent");
@@ -196,7 +196,7 @@
}
private void check(final String input, final String expected) throws Exception {
- checkOneTerm(new DutchAnalyzer(Version.LUCENE_CURRENT), input, expected);
+ checkOneTerm(new DutchAnalyzer(TEST_VERSION_CURRENT), input, expected);
}
}
\ No newline at end of file
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/no/TestNorwegianAnalyzer.java (working copy)
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestNorwegianAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new NorwegianAnalyzer(Version.LUCENE_CURRENT);
+ new NorwegianAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new NorwegianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new NorwegianAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "havnedistriktene", "havnedistrikt");
checkOneTermReuse(a, "havnedistrikter", "havnedistrikt");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("havnedistriktene");
- Analyzer a = new NorwegianAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new NorwegianAnalyzer(TEST_VERSION_CURRENT,
NorwegianAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "havnedistriktene", "havnedistriktene");
checkOneTermReuse(a, "havnedistrikter", "havnedistrikt");
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterTest.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterTest.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilterTest.java (working copy)
@@ -22,21 +22,15 @@
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.index.Payload;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import java.io.StringReader;
-
-/**
- *
- *
- **/
public class DelimitedPayloadTokenFilterTest extends LuceneTestCase {
public void testPayloads() throws Exception {
String test = "The quick|JJ red|JJ fox|NN jumped|VB over the lazy|JJ brown|JJ dogs|NN";
DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter
- (new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)),
+ (new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)),
DelimitedPayloadTokenFilter.DEFAULT_DELIMITER, new IdentityEncoder());
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
PayloadAttribute payAtt = filter.getAttribute(PayloadAttribute.class);
@@ -57,7 +51,7 @@
String test = "The quick|JJ red|JJ fox|NN jumped|VB over the lazy|JJ brown|JJ dogs|NN";
DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter
- (new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)),
+ (new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)),
DelimitedPayloadTokenFilter.DEFAULT_DELIMITER, new IdentityEncoder());
assertTermEquals("The", filter, null);
assertTermEquals("quick", filter, "JJ".getBytes("UTF-8"));
@@ -75,7 +69,7 @@
public void testFloatEncoding() throws Exception {
String test = "The quick|1.0 red|2.0 fox|3.5 jumped|0.5 over the lazy|5 brown|99.3 dogs|83.7";
- DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)), '|', new FloatEncoder());
+ DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)), '|', new FloatEncoder());
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
PayloadAttribute payAtt = filter.getAttribute(PayloadAttribute.class);
assertTermEquals("The", filter, termAtt, payAtt, null);
@@ -93,7 +87,7 @@
public void testIntEncoding() throws Exception {
String test = "The quick|1 red|2 fox|3 jumped over the lazy|5 brown|99 dogs|83";
- DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)), '|', new IntegerEncoder());
+ DelimitedPayloadTokenFilter filter = new DelimitedPayloadTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)), '|', new IntegerEncoder());
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
PayloadAttribute payAtt = filter.getAttribute(PayloadAttribute.class);
assertTermEquals("The", filter, termAtt, payAtt, null);
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/NumericPayloadTokenFilterTest.java (working copy)
@@ -23,7 +23,6 @@
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.StringReader;
@@ -38,7 +37,7 @@
public void test() throws IOException {
String test = "The quick red fox jumped over the lazy brown dogs";
- NumericPayloadTokenFilter nptf = new NumericPayloadTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test))), 3, "D");
+ NumericPayloadTokenFilter nptf = new NumericPayloadTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test))), 3, "D");
boolean seenDogs = false;
TermAttribute termAtt = nptf.getAttribute(TermAttribute.class);
TypeAttribute typeAtt = nptf.getAttribute(TypeAttribute.class);
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TokenOffsetPayloadTokenFilterTest.java (working copy)
@@ -21,7 +21,6 @@
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.index.Payload;
-import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.StringReader;
@@ -36,7 +35,7 @@
public void test() throws IOException {
String test = "The quick red fox jumped over the lazy brown dogs";
- TokenOffsetPayloadTokenFilter nptf = new TokenOffsetPayloadTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)));
+ TokenOffsetPayloadTokenFilter nptf = new TokenOffsetPayloadTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)));
int count = 0;
PayloadAttribute payloadAtt = nptf.getAttribute(PayloadAttribute.class);
OffsetAttribute offsetAtt = nptf.getAttribute(OffsetAttribute.class);
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/payloads/TypeAsPayloadTokenFilterTest.java (working copy)
@@ -23,7 +23,6 @@
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.StringReader;
@@ -38,7 +37,7 @@
public void test() throws IOException {
String test = "The quick red fox jumped over the lazy brown dogs";
- TypeAsPayloadTokenFilter nptf = new TypeAsPayloadTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test))));
+ TypeAsPayloadTokenFilter nptf = new TypeAsPayloadTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test))));
int count = 0;
TermAttribute termAtt = nptf.getAttribute(TermAttribute.class);
TypeAttribute typeAtt = nptf.getAttribute(TypeAttribute.class);
@@ -48,7 +47,6 @@
assertTrue(typeAtt.type() + " is not null and it should be", typeAtt.type().equals(String.valueOf(Character.toUpperCase(termAtt.termBuffer()[0]))));
assertTrue("nextToken.getPayload() is null and it shouldn't be", payloadAtt.getPayload() != null);
String type = new String(payloadAtt.getPayload().getData(), "UTF-8");
- assertTrue("type is null and it shouldn't be", type != null);
assertTrue(type + " is not equal to " + typeAtt.type(), type.equals(typeAtt.type()) == true);
count++;
}
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/pt/TestPortugueseAnalyzer.java (working copy)
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestPortugueseAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new PortugueseAnalyzer(Version.LUCENE_CURRENT);
+ new PortugueseAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new PortugueseAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new PortugueseAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "quilométricas", "quilométr");
checkOneTermReuse(a, "quilométricos", "quilométr");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("quilométricas");
- Analyzer a = new PortugueseAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new PortugueseAnalyzer(TEST_VERSION_CURRENT,
PortugueseAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "quilométricas", "quilométricas");
checkOneTermReuse(a, "quilométricos", "quilométr");
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java (working copy)
@@ -37,7 +37,6 @@
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
public class QueryAutoStopWordAnalyzerTest extends BaseTokenStreamTestCase {
String variedFieldValues[] = {"the", "quick", "brown", "fox", "jumped", "over", "the", "lazy", "boring", "dog"};
@@ -51,7 +50,7 @@
protected void setUp() throws Exception {
super.setUp();
dir = new RAMDirectory();
- appAnalyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ appAnalyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = new IndexWriter(dir, appAnalyzer, true, IndexWriter.MaxFieldLength.UNLIMITED);
int numDocs = 200;
for (int i = 0; i < numDocs; i++) {
@@ -64,7 +63,7 @@
}
writer.close();
reader = IndexReader.open(dir, true);
- protectedAnalyzer = new QueryAutoStopWordAnalyzer(Version.LUCENE_CURRENT, appAnalyzer);
+ protectedAnalyzer = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, appAnalyzer);
}
@Override
@@ -75,7 +74,7 @@
//Helper method to query
private int search(Analyzer a, String queryString) throws IOException, ParseException {
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "repetitiveField", a);
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "repetitiveField", a);
Query q = qp.parse(queryString);
return new IndexSearcher(reader).search(q, null, 1000).totalHits;
}
@@ -157,14 +156,14 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
if (++invocationCount % 2 == 0)
- return new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader);
+ return new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
else
- return new LetterTokenizer(Version.LUCENE_CURRENT, reader);
+ return new LetterTokenizer(TEST_VERSION_CURRENT, reader);
}
}
public void testWrappingNonReusableAnalyzer() throws Exception {
- QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer(Version.LUCENE_CURRENT, new NonreusableAnalyzer());
+ QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, new NonreusableAnalyzer());
a.addStopWords(reader, 10);
int numHits = search(a, "repetitiveField:boring");
assertTrue(numHits == 0);
@@ -173,7 +172,7 @@
}
public void testTokenStream() throws Exception {
- QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer(Version.LUCENE_CURRENT, new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ QueryAutoStopWordAnalyzer a = new QueryAutoStopWordAnalyzer(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
a.addStopWords(reader, 10);
TokenStream ts = a.tokenStream("repetitiveField", new StringReader("this boring"));
TermAttribute termAtt = ts.getAttribute(TermAttribute.class);
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/reverse/TestReverseStringFilter.java (working copy)
@@ -27,9 +27,9 @@
public class TestReverseStringFilter extends BaseTokenStreamTestCase {
public void testFilter() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT,
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT,
new StringReader("Do have a nice day")); // 1-4 length string
- ReverseStringFilter filter = new ReverseStringFilter(Version.LUCENE_CURRENT, stream);
+ ReverseStringFilter filter = new ReverseStringFilter(TEST_VERSION_CURRENT, stream);
TermAttribute text = filter.getAttribute(TermAttribute.class);
assertTrue(filter.incrementToken());
assertEquals("oD", text.term());
@@ -45,9 +45,9 @@
}
public void testFilterWithMark() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"Do have a nice day")); // 1-4 length string
- ReverseStringFilter filter = new ReverseStringFilter(Version.LUCENE_CURRENT, stream, '\u0001');
+ ReverseStringFilter filter = new ReverseStringFilter(TEST_VERSION_CURRENT, stream, '\u0001');
TermAttribute text = filter
.getAttribute(TermAttribute.class);
assertTrue(filter.incrementToken());
@@ -64,14 +64,14 @@
}
public void testReverseString() throws Exception {
- assertEquals( "A", ReverseStringFilter.reverse( "A" ) );
- assertEquals( "BA", ReverseStringFilter.reverse( "AB" ) );
- assertEquals( "CBA", ReverseStringFilter.reverse( "ABC" ) );
+ assertEquals( "A", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "A" ) );
+ assertEquals( "BA", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "AB" ) );
+ assertEquals( "CBA", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "ABC" ) );
}
public void testReverseChar() throws Exception {
char[] buffer = { 'A', 'B', 'C', 'D', 'E', 'F' };
- ReverseStringFilter.reverse( buffer, 2, 3 );
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 2, 3 );
assertEquals( "ABEDCF", new String( buffer ) );
}
@@ -84,37 +84,37 @@
public void testReverseSupplementary() throws Exception {
// supplementary at end
- assertEquals("𩬅艱鍟䇹愯瀛", ReverseStringFilter.reverse(Version.LUCENE_CURRENT, "瀛愯䇹鍟艱𩬅"));
+ assertEquals("𩬅艱鍟䇹愯瀛", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "瀛愯䇹鍟艱𩬅"));
// supplementary at end - 1
- assertEquals("a𩬅艱鍟䇹愯瀛", ReverseStringFilter.reverse(Version.LUCENE_CURRENT, "瀛愯䇹鍟艱𩬅a"));
+ assertEquals("a𩬅艱鍟䇹愯瀛", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "瀛愯䇹鍟艱𩬅a"));
// supplementary at start
- assertEquals("fedcba𩬅", ReverseStringFilter.reverse(Version.LUCENE_CURRENT, "𩬅abcdef"));
+ assertEquals("fedcba𩬅", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "𩬅abcdef"));
// supplementary at start + 1
- assertEquals("fedcba𩬅z", ReverseStringFilter.reverse(Version.LUCENE_CURRENT, "z𩬅abcdef"));
+ assertEquals("fedcba𩬅z", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "z𩬅abcdef"));
// supplementary medial
- assertEquals("gfe𩬅dcba", ReverseStringFilter.reverse(Version.LUCENE_CURRENT, "abcd𩬅efg"));
+ assertEquals("gfe𩬅dcba", ReverseStringFilter.reverse(TEST_VERSION_CURRENT, "abcd𩬅efg"));
}
public void testReverseSupplementaryChar() throws Exception {
// supplementary at end
char[] buffer = "abc瀛愯䇹鍟艱𩬅".toCharArray();
- ReverseStringFilter.reverse(Version.LUCENE_CURRENT, buffer, 3, 7);
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 7);
assertEquals("abc𩬅艱鍟䇹愯瀛", new String(buffer));
// supplementary at end - 1
buffer = "abc瀛愯䇹鍟艱𩬅d".toCharArray();
- ReverseStringFilter.reverse(Version.LUCENE_CURRENT, buffer, 3, 8);
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 8);
assertEquals("abcd𩬅艱鍟䇹愯瀛", new String(buffer));
// supplementary at start
buffer = "abc𩬅瀛愯䇹鍟艱".toCharArray();
- ReverseStringFilter.reverse(Version.LUCENE_CURRENT, buffer, 3, 7);
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 7);
assertEquals("abc艱鍟䇹愯瀛𩬅", new String(buffer));
// supplementary at start + 1
buffer = "abcd𩬅瀛愯䇹鍟艱".toCharArray();
- ReverseStringFilter.reverse(Version.LUCENE_CURRENT, buffer, 3, 8);
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 8);
assertEquals("abc艱鍟䇹愯瀛𩬅d", new String(buffer));
// supplementary medial
buffer = "abc瀛愯𩬅def".toCharArray();
- ReverseStringFilter.reverse(Version.LUCENE_CURRENT, buffer, 3, 7);
+ ReverseStringFilter.reverse(TEST_VERSION_CURRENT, buffer, 3, 7);
assertEquals("abcfed𩬅愯瀛", new String(buffer));
}
}
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/ro/TestRomanianAnalyzer.java (working copy)
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestRomanianAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new RomanianAnalyzer(Version.LUCENE_CURRENT);
+ new RomanianAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new RomanianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new RomanianAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "absenţa", "absenţ");
checkOneTermReuse(a, "absenţi", "absenţ");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("absenţa");
- Analyzer a = new RomanianAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new RomanianAnalyzer(TEST_VERSION_CURRENT,
RomanianAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "absenţa", "absenţa");
checkOneTermReuse(a, "absenţi", "absenţ");
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianAnalyzer.java (working copy)
@@ -44,8 +44,7 @@
private File dataDir;
@Override
- protected void setUp() throws Exception
- {
+ protected void setUp() throws Exception {
super.setUp();
dataDir = new File(System.getProperty("dataDir", "./bin"));
}
@@ -71,7 +70,7 @@
TokenStream in = ra.tokenStream("all", inWords);
RussianLetterTokenizer sample =
- new RussianLetterTokenizer(Version.LUCENE_CURRENT,
+ new RussianLetterTokenizer(TEST_VERSION_CURRENT,
sampleUnicode);
TermAttribute text = in.getAttribute(TermAttribute.class);
@@ -98,7 +97,7 @@
public void testDigitsInRussianCharset()
{
Reader reader = new StringReader("text 1000");
- RussianAnalyzer ra = new RussianAnalyzer(Version.LUCENE_CURRENT);
+ RussianAnalyzer ra = new RussianAnalyzer(TEST_VERSION_CURRENT);
TokenStream stream = ra.tokenStream("", reader);
TermAttribute termText = stream.getAttribute(TermAttribute.class);
@@ -126,7 +125,7 @@
}
public void testReusableTokenStream() throws Exception {
- Analyzer a = new RussianAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new RussianAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(a, "Вместе с тем о силе электромагнитной энергии имели представление еще",
new String[] { "вмест", "сил", "электромагнитн", "энерг", "имел", "представлен" });
assertAnalyzesToReuse(a, "Но знание это хранилось в тайне",
@@ -135,9 +134,9 @@
public void testWithStemExclusionSet() throws Exception {
- CharArraySet set = new CharArraySet(Version.LUCENE_CURRENT, 1, true);
+ CharArraySet set = new CharArraySet(TEST_VERSION_CURRENT, 1, true);
set.add("представление");
- Analyzer a = new RussianAnalyzer(Version.LUCENE_CURRENT, RussianAnalyzer.getDefaultStopSet() , set);
+ Analyzer a = new RussianAnalyzer(TEST_VERSION_CURRENT, RussianAnalyzer.getDefaultStopSet() , set);
assertAnalyzesToReuse(a, "Вместе с тем о силе электромагнитной энергии имели представление еще",
new String[] { "вмест", "сил", "электромагнитн", "энерг", "имел", "представление" });
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianStem.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianStem.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/ru/TestRussianStem.java (working copy)
@@ -30,8 +30,8 @@
@Deprecated
public class TestRussianStem extends LuceneTestCase
{
- private ArrayList words = new ArrayList();
- private ArrayList stems = new ArrayList();
+ private ArrayList words = new ArrayList();
+ private ArrayList stems = new ArrayList();
public TestRussianStem(String name)
{
@@ -42,8 +42,7 @@
* @see TestCase#setUp()
*/
@Override
- protected void setUp() throws Exception
- {
+ protected void setUp() throws Exception {
super.setUp();
//System.out.println(new java.util.Date());
String str;
@@ -75,15 +74,6 @@
inStems.close();
}
- /**
- * @see TestCase#tearDown()
- */
- @Override
- protected void tearDown() throws Exception
- {
- super.tearDown();
- }
-
public void testStem()
{
for (int i = 0; i < words.size(); i++)
@@ -91,7 +81,7 @@
//if ( (i % 100) == 0 ) System.err.println(i);
String realStem =
RussianStemmer.stemWord(
- (String) words.get(i));
+ words.get(i));
assertEquals("unicode", stems.get(i), realStem);
}
}
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (working copy)
@@ -42,7 +42,6 @@
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
/**
* A test class for ShingleAnalyzerWrapper as regards queries and scoring.
@@ -86,7 +85,7 @@
protected ScoreDoc[] queryParsingTest(Analyzer analyzer, String qs) throws Exception {
searcher = setUpSearcher(analyzer);
- QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "content", analyzer);
+ QueryParser qp = new QueryParser(TEST_VERSION_CURRENT, "content", analyzer);
Query q = qp.parse(qs);
@@ -106,7 +105,7 @@
*/
public void testShingleAnalyzerWrapperQueryParsing() throws Exception {
ScoreDoc[] hits = queryParsingTest(new ShingleAnalyzerWrapper
- (new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2),
+ (new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2),
"test sentence");
int[] ranks = new int[] { 1, 2, 0 };
compareRanks(hits, ranks);
@@ -117,7 +116,7 @@
*/
public void testShingleAnalyzerWrapperPhraseQueryParsingFails() throws Exception {
ScoreDoc[] hits = queryParsingTest(new ShingleAnalyzerWrapper
- (new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2),
+ (new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2),
"\"this sentence\"");
int[] ranks = new int[] { 0 };
compareRanks(hits, ranks);
@@ -128,7 +127,7 @@
*/
public void testShingleAnalyzerWrapperPhraseQueryParsing() throws Exception {
ScoreDoc[] hits = queryParsingTest(new ShingleAnalyzerWrapper
- (new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2),
+ (new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2),
"\"test sentence\"");
int[] ranks = new int[] { 1 };
compareRanks(hits, ranks);
@@ -139,7 +138,7 @@
*/
public void testShingleAnalyzerWrapperRequiredQueryParsing() throws Exception {
ScoreDoc[] hits = queryParsingTest(new ShingleAnalyzerWrapper
- (new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2),
+ (new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2),
"+test +sentence");
int[] ranks = new int[] { 1, 2 };
compareRanks(hits, ranks);
@@ -149,7 +148,7 @@
* This shows how to construct a phrase query containing shingles.
*/
public void testShingleAnalyzerWrapperPhraseQuery() throws Exception {
- Analyzer analyzer = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2);
+ Analyzer analyzer = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2);
searcher = setUpSearcher(analyzer);
PhraseQuery q = new PhraseQuery();
@@ -178,7 +177,7 @@
* in the right order and adjacent to each other.
*/
public void testShingleAnalyzerWrapperBooleanQuery() throws Exception {
- Analyzer analyzer = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2);
+ Analyzer analyzer = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2);
searcher = setUpSearcher(analyzer);
BooleanQuery q = new BooleanQuery();
@@ -200,7 +199,7 @@
}
public void testReusableTokenStream() throws Exception {
- Analyzer a = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(Version.LUCENE_CURRENT), 2);
+ Analyzer a = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 2);
assertAnalyzesToReuse(a, "please divide into shingles",
new String[] { "please", "please divide", "divide", "divide into", "into", "into shingles", "shingles" },
new int[] { 0, 0, 7, 7, 14, 14, 19 },
@@ -222,9 +221,9 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
if (++invocationCount % 2 == 0)
- return new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader);
+ return new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
else
- return new LetterTokenizer(Version.LUCENE_CURRENT, reader);
+ return new LetterTokenizer(TEST_VERSION_CURRENT, reader);
}
}
@@ -249,7 +248,7 @@
public void testNonDefaultMinShingleSize() throws Exception {
ShingleAnalyzerWrapper analyzer
- = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(), 3, 4);
+ = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 3, 4);
assertAnalyzesToReuse(analyzer, "please divide this sentence into shingles",
new String[] { "please", "please divide this", "please divide this sentence",
"divide", "divide this sentence", "divide this sentence into",
@@ -273,7 +272,7 @@
public void testNonDefaultMinAndSameMaxShingleSize() throws Exception {
ShingleAnalyzerWrapper analyzer
- = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(), 3, 3);
+ = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT), 3, 3);
assertAnalyzesToReuse(analyzer, "please divide this sentence into shingles",
new String[] { "please", "please divide this",
"divide", "divide this sentence",
@@ -297,7 +296,7 @@
public void testNoTokenSeparator() throws Exception {
ShingleAnalyzerWrapper analyzer
- = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer());
+ = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
analyzer.setTokenSeparator("");
assertAnalyzesToReuse(analyzer, "please divide into shingles",
new String[] { "please", "pleasedivide",
@@ -319,7 +318,7 @@
public void testNullTokenSeparator() throws Exception {
ShingleAnalyzerWrapper analyzer
- = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer());
+ = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
analyzer.setTokenSeparator(null);
assertAnalyzesToReuse(analyzer, "please divide into shingles",
new String[] { "please", "pleasedivide",
@@ -340,7 +339,7 @@
}
public void testAltTokenSeparator() throws Exception {
ShingleAnalyzerWrapper analyzer
- = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer());
+ = new ShingleAnalyzerWrapper(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
analyzer.setTokenSeparator("");
assertAnalyzesToReuse(analyzer, "please divide into shingles",
new String[] { "please", "pleasedivide",
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleFilterTest.java (working copy)
@@ -26,7 +26,6 @@
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.tokenattributes.*;
-import org.apache.lucene.util.Version;
public class ShingleFilterTest extends BaseTokenStreamTestCase {
@@ -836,7 +835,7 @@
public void testReset() throws Exception {
- Tokenizer wsTokenizer = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("please divide this sentence"));
+ Tokenizer wsTokenizer = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("please divide this sentence"));
TokenStream filter = new ShingleFilter(wsTokenizer, 2);
assertTokenStreamContents(filter,
new String[]{"please","please divide","divide","divide this","this","this sentence","sentence"},
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/TestShingleMatrixFilter.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/TestShingleMatrixFilter.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/TestShingleMatrixFilter.java (working copy)
@@ -31,7 +31,6 @@
import org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix;
import org.apache.lucene.analysis.shingle.ShingleMatrixFilter.Matrix.Column;
import org.apache.lucene.analysis.tokenattributes.*;
-import org.apache.lucene.util.Version;
public class TestShingleMatrixFilter extends BaseTokenStreamTestCase {
@@ -41,11 +40,11 @@
public void testIterator() throws IOException {
- WhitespaceTokenizer wst = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader("one two three four five"));
+ WhitespaceTokenizer wst = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("one two three four five"));
ShingleMatrixFilter smf = new ShingleMatrixFilter(wst, 2, 2, '_', false, new ShingleMatrixFilter.OneDimensionalNonWeightedTokenSettingsCodec());
int i;
- for(i=0; smf.incrementToken(); i++);
+ for(i=0; smf.incrementToken(); i++) {}
assertEquals(4, i);
// call next once more. this should return false again rather than throwing an exception (LUCENE-1939)
@@ -65,11 +64,11 @@
assertFalse(ts.incrementToken());
TokenListStream tls;
- LinkedList tokens;
+ LinkedList tokens;
// test a plain old token stream with synonyms translated to rows.
- tokens = new LinkedList();
+ tokens = new LinkedList();
tokens.add(createToken("please", 0, 6));
tokens.add(createToken("divide", 7, 13));
tokens.add(createToken("this", 14, 18));
@@ -101,11 +100,11 @@
TokenStream ts;
TokenStream tls;
- LinkedList tokens;
+ LinkedList tokens;
// test a plain old token stream with synonyms tranlated to rows.
- tokens = new LinkedList();
+ tokens = new LinkedList();
tokens.add(tokenFactory("hello", 1, 0, 4));
tokens.add(tokenFactory("greetings", 0, 0, 4));
tokens.add(tokenFactory("world", 1, 5, 10));
@@ -145,7 +144,7 @@
ShingleMatrixFilter.defaultSettingsCodec = new ShingleMatrixFilter.SimpleThreeDimensionalTokenSettingsCodec();
- tokens = new LinkedList();
+ tokens = new LinkedList();
tokens.add(tokenFactory("hello", 1, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.newColumn));
tokens.add(tokenFactory("greetings", 0, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.newRow));
tokens.add(tokenFactory("world", 1, 1f, 5, 10, ShingleMatrixFilter.TokenPositioner.newColumn));
@@ -286,7 +285,7 @@
//
- tokens = new LinkedList();
+ tokens = new LinkedList();
tokens.add(tokenFactory("hello", 1, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.newColumn));
tokens.add(tokenFactory("greetings", 1, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.newRow));
tokens.add(tokenFactory("and", 1, 1f, 0, 4, ShingleMatrixFilter.TokenPositioner.sameRow));
@@ -413,11 +412,6 @@
}
- private Token tokenFactory(String text, int startOffset, int endOffset) {
- return tokenFactory(text, 1, 1f, startOffset, endOffset);
- }
-
-
private Token tokenFactory(String text, int posIncr, int startOffset, int endOffset) {
Token token = new Token(startOffset, endOffset);
token.setTermBuffer(text);
@@ -430,10 +424,6 @@
return tokenFactory(text, posIncr, 1f, 0, 0);
}
- private Token tokenFactory(String text, int posIncr, float weight) {
- return tokenFactory(text, posIncr, weight, 0, 0);
- }
-
private Token tokenFactory(String text, int posIncr, float weight, int startOffset, int endOffset) {
Token token = new Token(startOffset, endOffset);
token.setTermBuffer(text);
@@ -460,17 +450,6 @@
assertEquals(text, termAtt.term());
}
- private void assertNext(TokenStream ts, String text, int positionIncrement, float boost) throws IOException {
- TermAttribute termAtt = ts.addAttribute(TermAttribute.class);
- PositionIncrementAttribute posIncrAtt = ts.addAttribute(PositionIncrementAttribute.class);
- PayloadAttribute payloadAtt = ts.addAttribute(PayloadAttribute.class);
-
- assertTrue(ts.incrementToken());
- assertEquals(text, termAtt.term());
- assertEquals(positionIncrement, posIncrAtt.getPositionIncrement());
- assertEquals(boost, payloadAtt.getPayload() == null ? 1f : PayloadHelper.decodeFloat(payloadAtt.getPayload().getData()), 0);
- }
-
private void assertNext(TokenStream ts, String text, int positionIncrement, float boost, int startOffset, int endOffset) throws IOException {
TermAttribute termAtt = ts.addAttribute(TermAttribute.class);
PositionIncrementAttribute posIncrAtt = ts.addAttribute(PositionIncrementAttribute.class);
@@ -505,7 +484,7 @@
public static class TokenListStream extends TokenStream {
- private Collection tokens;
+ private Collection tokens;
TermAttribute termAtt;
PositionIncrementAttribute posIncrAtt;
PayloadAttribute payloadAtt;
@@ -513,7 +492,7 @@
TypeAttribute typeAtt;
FlagsAttribute flagsAtt;
- public TokenListStream(Collection tokens) {
+ public TokenListStream(Collection tokens) {
this.tokens = tokens;
termAtt = addAttribute(TermAttribute.class);
posIncrAtt = addAttribute(PositionIncrementAttribute.class);
@@ -523,7 +502,7 @@
flagsAtt = addAttribute(FlagsAttribute.class);
}
- private Iterator iterator;
+ private Iterator iterator;
@Override
public boolean incrementToken() throws IOException {
@@ -533,7 +512,7 @@
if (!iterator.hasNext()) {
return false;
}
- Token prototype = (Token) iterator.next();
+ Token prototype = iterator.next();
clearAttributes();
termAtt.setTermBuffer(prototype.termBuffer(), 0, prototype.termLength());
posIncrAtt.setPositionIncrement(prototype.getPositionIncrement());
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/DateRecognizerSinkTokenizerTest.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/DateRecognizerSinkTokenizerTest.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/DateRecognizerSinkTokenizerTest.java (working copy)
@@ -25,7 +25,6 @@
import org.apache.lucene.analysis.TeeSinkTokenFilter;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.TeeSinkTokenFilter.SinkTokenStream;
-import org.apache.lucene.util.Version;
public class DateRecognizerSinkTokenizerTest extends BaseTokenStreamTestCase {
@@ -37,7 +36,7 @@
public void test() throws IOException {
DateRecognizerSinkFilter sinkFilter = new DateRecognizerSinkFilter(new SimpleDateFormat("MM/dd/yyyy", Locale.US));
String test = "The quick red fox jumped over the lazy brown dogs on 7/11/2006 The dogs finally reacted on 7/12/2006";
- TeeSinkTokenFilter tee = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)));
+ TeeSinkTokenFilter tee = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)));
SinkTokenStream sink = tee.newSinkTokenStream(sinkFilter);
int count = 0;
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenRangeSinkTokenizerTest.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenRangeSinkTokenizerTest.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenRangeSinkTokenizerTest.java (working copy)
@@ -23,7 +23,6 @@
import org.apache.lucene.analysis.TeeSinkTokenFilter;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.TeeSinkTokenFilter.SinkTokenStream;
-import org.apache.lucene.util.Version;
public class TokenRangeSinkTokenizerTest extends BaseTokenStreamTestCase {
@@ -35,7 +34,7 @@
public void test() throws IOException {
TokenRangeSinkFilter sinkFilter = new TokenRangeSinkFilter(2, 4);
String test = "The quick red fox jumped over the lazy brown dogs";
- TeeSinkTokenFilter tee = new TeeSinkTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test)));
+ TeeSinkTokenFilter tee = new TeeSinkTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test)));
SinkTokenStream rangeToks = tee.newSinkTokenStream(sinkFilter);
int count = 0;
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenTypeSinkTokenizerTest.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenTypeSinkTokenizerTest.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/sinks/TokenTypeSinkTokenizerTest.java (working copy)
@@ -27,11 +27,9 @@
import org.apache.lucene.analysis.TeeSinkTokenFilter.SinkTokenStream;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
-import org.apache.lucene.util.Version;
public class TokenTypeSinkTokenizerTest extends BaseTokenStreamTestCase {
-
public TokenTypeSinkTokenizerTest(String s) {
super(s);
}
@@ -40,7 +38,7 @@
TokenTypeSinkFilter sinkFilter = new TokenTypeSinkFilter("D");
String test = "The quick red fox jumped over the lazy brown dogs";
- TeeSinkTokenFilter ttf = new TeeSinkTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(test))));
+ TeeSinkTokenFilter ttf = new TeeSinkTokenFilter(new WordTokenFilter(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(test))));
SinkTokenStream sink = ttf.newSinkTokenStream(sinkFilter);
boolean seenDogs = false;
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/snowball/TestSnowball.java (working copy)
@@ -33,13 +33,13 @@
public class TestSnowball extends BaseTokenStreamTestCase {
public void testEnglish() throws Exception {
- Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "English");
+ Analyzer a = new SnowballAnalyzer(TEST_VERSION_CURRENT, "English");
assertAnalyzesTo(a, "he abhorred accents",
new String[]{"he", "abhor", "accent"});
}
public void testStopwords() throws Exception {
- Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "English",
+ Analyzer a = new SnowballAnalyzer(TEST_VERSION_CURRENT, "English",
StandardAnalyzer.STOP_WORDS_SET);
assertAnalyzesTo(a, "the quick brown fox jumped",
new String[]{"quick", "brown", "fox", "jump"});
@@ -50,7 +50,7 @@
* we lowercase I correct for non-Turkish languages in either case.
*/
public void testEnglishLowerCase() throws Exception {
- Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "English");
+ Analyzer a = new SnowballAnalyzer(TEST_VERSION_CURRENT, "English");
assertAnalyzesTo(a, "cryogenic", new String[] { "cryogen" });
assertAnalyzesTo(a, "CRYOGENIC", new String[] { "cryogen" });
@@ -63,7 +63,7 @@
* Test turkish lowercasing
*/
public void testTurkish() throws Exception {
- Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "Turkish");
+ Analyzer a = new SnowballAnalyzer(TEST_VERSION_CURRENT, "Turkish");
assertAnalyzesTo(a, "ağacı", new String[] { "ağaç" });
assertAnalyzesTo(a, "AĞACI", new String[] { "ağaç" });
@@ -84,7 +84,7 @@
public void testReusableTokenStream() throws Exception {
- Analyzer a = new SnowballAnalyzer(Version.LUCENE_CURRENT, "English");
+ Analyzer a = new SnowballAnalyzer(TEST_VERSION_CURRENT, "English");
assertAnalyzesToReuse(a, "he abhorred accents",
new String[]{"he", "abhor", "accent"});
assertAnalyzesToReuse(a, "she abhorred him",
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/sv/TestSwedishAnalyzer.java (working copy)
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestSwedishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new SwedishAnalyzer(Version.LUCENE_CURRENT);
+ new SwedishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new SwedishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new SwedishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "jaktkarlarne", "jaktkarl");
checkOneTermReuse(a, "jaktkarlens", "jaktkarl");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("jaktkarlarne");
- Analyzer a = new SwedishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new SwedishAnalyzer(TEST_VERSION_CURRENT,
SwedishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "jaktkarlarne", "jaktkarlarne");
checkOneTermReuse(a, "jaktkarlens", "jaktkarl");
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/th/TestThaiAnalyzer.java (working copy)
@@ -18,7 +18,6 @@
*/
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
/**
* Test case for ThaiAnalyzer, modified from TestFrenchAnalyzer
@@ -32,7 +31,7 @@
* testcase for offsets
*/
public void testOffsets() throws Exception {
- assertAnalyzesTo(new ThaiAnalyzer(Version.LUCENE_CURRENT), "เดอะนิวยอร์กไทมส์",
+ assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT), "เดอะนิวยอร์กไทมส์",
new String[] { "เด", "อะนิว", "ยอ", "ร์ก", "ไทมส์"},
new int[] { 0, 2, 7, 9, 12 },
new int[] { 2, 7, 9, 12, 17});
@@ -50,7 +49,7 @@
* Instead, allow the definition of alphanum to include relevant categories like nonspacing marks!
*/
public void testBuggyTokenType() throws Exception {
- assertAnalyzesTo(new ThaiAnalyzer(Version.LUCENE_CURRENT), "เดอะนิวยอร์กไทมส์ ๑๒๓",
+ assertAnalyzesTo(new ThaiAnalyzer(TEST_VERSION_CURRENT), "เดอะนิวยอร์กไทมส์ ๑๒๓",
new String[] { "เด", "อะนิว", "ยอ", "ร์ก", "ไทมส์", "๑๒๓" },
new String[] { "", "", "", "", "", "" });
}
@@ -64,7 +63,7 @@
*/
public void testAnalyzer() throws Exception {
- ThaiAnalyzer analyzer = new ThaiAnalyzer(Version.LUCENE_CURRENT);
+ ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(analyzer, "", new String[] {});
@@ -89,7 +88,7 @@
* Test that position increments are adjusted correctly for stopwords.
*/
public void testPositionIncrements() throws Exception {
- ThaiAnalyzer analyzer = new ThaiAnalyzer(Version.LUCENE_CURRENT);
+ ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesTo(analyzer, "ประโยคว่า the ประโยคว่า",
new String[] { "ประโยค", "ว่า", "ประโยค", "ว่า" },
@@ -106,7 +105,7 @@
}
public void testReusableTokenStream() throws Exception {
- ThaiAnalyzer analyzer = new ThaiAnalyzer(Version.LUCENE_CURRENT);
+ ThaiAnalyzer analyzer = new ThaiAnalyzer(TEST_VERSION_CURRENT);
assertAnalyzesToReuse(analyzer, "", new String[] {});
assertAnalyzesToReuse(
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishAnalyzer.java (working copy)
@@ -23,18 +23,17 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestTurkishAnalyzer extends BaseTokenStreamTestCase {
/** This test fails with NPE when the
* stopwords file is missing in classpath */
public void testResourcesAvailable() {
- new TurkishAnalyzer(Version.LUCENE_CURRENT);
+ new TurkishAnalyzer(TEST_VERSION_CURRENT);
}
/** test stopwords and stemming */
public void testBasics() throws IOException {
- Analyzer a = new TurkishAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new TurkishAnalyzer(TEST_VERSION_CURRENT);
// stemming
checkOneTermReuse(a, "ağacı", "ağaç");
checkOneTermReuse(a, "ağaç", "ağaç");
@@ -46,7 +45,7 @@
public void testExclude() throws IOException {
Set exclusionSet = new HashSet();
exclusionSet.add("ağacı");
- Analyzer a = new TurkishAnalyzer(Version.LUCENE_CURRENT,
+ Analyzer a = new TurkishAnalyzer(TEST_VERSION_CURRENT,
TurkishAnalyzer.getDefaultStopSet(), exclusionSet);
checkOneTermReuse(a, "ağacı", "ağacı");
checkOneTermReuse(a, "ağaç", "ağaç");
Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilter.java
===================================================================
--- contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilter.java (revision 916146)
+++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/tr/TestTurkishLowerCaseFilter.java (working copy)
@@ -22,7 +22,6 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceTokenizer;
-import org.apache.lucene.util.Version;
/**
* Test the Turkish lowercase filter.
@@ -33,7 +32,7 @@
* Test composed forms
*/
public void testTurkishLowerCaseFilter() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"\u0130STANBUL \u0130ZM\u0130R ISPARTA"));
TurkishLowerCaseFilter filter = new TurkishLowerCaseFilter(stream);
assertTokenStreamContents(filter, new String[] {"istanbul", "izmir",
@@ -44,7 +43,7 @@
* Test decomposed forms
*/
public void testDecomposed() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"\u0049\u0307STANBUL \u0049\u0307ZM\u0049\u0307R ISPARTA"));
TurkishLowerCaseFilter filter = new TurkishLowerCaseFilter(stream);
assertTokenStreamContents(filter, new String[] {"istanbul", "izmir",
@@ -57,7 +56,7 @@
* to U+0130 + U+0316, and is lowercased the same way.
*/
public void testDecomposed2() throws Exception {
- TokenStream stream = new WhitespaceTokenizer(Version.LUCENE_CURRENT, new StringReader(
+ TokenStream stream = new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader(
"\u0049\u0316\u0307STANBUL \u0049\u0307ZM\u0049\u0307R I\u0316SPARTA"));
TurkishLowerCaseFilter filter = new TurkishLowerCaseFilter(stream);
assertTokenStreamContents(filter, new String[] {"i\u0316stanbul", "izmir",
Index: contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java
===================================================================
--- contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java (revision 916146)
+++ contrib/ant/src/test/org/apache/lucene/ant/IndexTaskTest.java (working copy)
@@ -20,8 +20,6 @@
import java.io.File;
import java.io.IOException;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.queryParser.QueryParser;
@@ -31,13 +29,13 @@
import org.apache.lucene.store.FSDirectory;
import org.apache.tools.ant.Project;
import org.apache.tools.ant.types.FileSet;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
/**
* Test cases for index task
*
*/
-public class IndexTaskTest extends TestCase {
+public class IndexTaskTest extends LuceneTestCase {
private final static String docHandler =
"org.apache.lucene.ant.FileExtensionDocumentHandler";
@@ -55,7 +53,8 @@
*@exception IOException Description of Exception
*/
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
+ super.setUp();
Project project = new Project();
IndexTask task = new IndexTask();
@@ -71,12 +70,12 @@
dir = FSDirectory.open(indexDir);
searcher = new IndexSearcher(dir, true);
- analyzer = new StopAnalyzer(Version.LUCENE_CURRENT);
+ analyzer = new StopAnalyzer(TEST_VERSION_CURRENT);
}
public void testSearch() throws Exception {
- Query query = new QueryParser(Version.LUCENE_CURRENT, "contents",analyzer).parse("test");
+ Query query = new QueryParser(TEST_VERSION_CURRENT, "contents",analyzer).parse("test");
int numHits = searcher.search(query, null, 1000).totalHits;
@@ -88,9 +87,10 @@
* TODO: remove indexDir?
*/
@Override
- public void tearDown() throws IOException {
+ protected void tearDown() throws Exception {
searcher.close();
dir.close();
+ super.tearDown();
}
}
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishContentSource.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishContentSource.java (revision 916146)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LongToEnglishContentSource.java (working copy)
@@ -13,10 +13,12 @@
public class LongToEnglishContentSource extends ContentSource{
private long counter = Long.MIN_VALUE + 10;
+ @Override
public void close() throws IOException {
}
//TODO: reduce/clean up synchonization
+ @Override
public synchronized DocData getNextDocData(DocData docData) throws NoMoreDataException, IOException {
docData.clear();
docData.setBody(English.longToEnglish(counter));
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java (revision 916146)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/QueryMaker.java (working copy)
@@ -30,7 +30,7 @@
/**
* Create the next query, of the given size.
* @param size the size of the query - number of terms, etc.
- * @exception if cannot make the query, or if size>0 was specified but this feature is not supported.
+ * @exception Exception if cannot make the query, or if size>0 was specified but this feature is not supported.
*/
public Query makeQuery (int size) throws Exception;
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/Points.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/Points.java (revision 916146)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/Points.java (working copy)
@@ -29,8 +29,6 @@
*/
public class Points {
- private Config config;
-
// stat points ordered by their start time.
// for now we collect points as TaskStats objects.
// later might optimize to collect only native data.
@@ -44,7 +42,6 @@
* Create a Points statistics object.
*/
public Points (Config config) {
- this.config = config;
}
/**
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/TaskStats.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/TaskStats.java (revision 916146)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/stats/TaskStats.java (working copy)
@@ -211,7 +211,7 @@
public Object clone() throws CloneNotSupportedException {
TaskStats c = (TaskStats) super.clone();
if (c.countsByTime != null) {
- c.countsByTime = (int[]) c.countsByTime.clone();
+ c.countsByTime = c.countsByTime.clone();
}
return c;
}
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NearRealtimeReaderTask.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NearRealtimeReaderTask.java (revision 916146)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NearRealtimeReaderTask.java (working copy)
@@ -70,7 +70,7 @@
// Parent sequence sets stopNow
reopenCount = 0;
while(!stopNow) {
- long waitForMsec = (long) (pauseMSec - (System.currentTimeMillis() - t));
+ long waitForMsec = (pauseMSec - (System.currentTimeMillis() - t));
if (waitForMsec > 0) {
Thread.sleep(waitForMsec);
//System.out.println("NRT wait: " + waitForMsec + " msec");
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewShingleAnalyzerTask.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewShingleAnalyzerTask.java (revision 916146)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/NewShingleAnalyzerTask.java (working copy)
@@ -18,12 +18,10 @@
*/
import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
import java.util.StringTokenizer;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.shingle.ShingleAnalyzerWrapper;
-import org.apache.lucene.analysis.shingle.ShingleFilter;
import org.apache.lucene.benchmark.byTask.PerfRunData;
import org.apache.lucene.util.Version;
@@ -39,8 +37,6 @@
public class NewShingleAnalyzerTask extends PerfTask {
private String analyzerClassName = "standard.StandardAnalyzer";
- private static final String shingleAnalyzerClassName
- = "org.apache.lucene.analysis.shingle.ShingleAnalyzerWrapper";
private int maxShingleSize = 2;
private boolean outputUnigrams = true;
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java (revision 916146)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTokensTask.java (working copy)
@@ -140,6 +140,6 @@
}
}
@Override
- public void close() {};
+ public void close() {}
}
}
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Config.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Config.java (revision 916146)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/utils/Config.java (working copy)
@@ -100,7 +100,6 @@
/**
* Create config without algorithm - useful for a programmatic perf test.
* @param props - configuration properties.
- * @throws IOException
*/
public Config (Properties props) {
this.props = props;
@@ -109,6 +108,7 @@
}
}
+ @SuppressWarnings("unchecked")
private void printProps() {
System.out.println("------------> config properties:");
List propKeys = new ArrayList(props.keySet());
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/stats/TimeData.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/stats/TimeData.java (revision 916146)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/stats/TimeData.java (working copy)
@@ -34,7 +34,7 @@
/** Total memory at the end of measurement interval. */
public long totalMem = 0L;
- public TimeData() {};
+ public TimeData() {}
public TimeData(String name) {
this.name = name;
@@ -78,7 +78,7 @@
/** Get rate of processing, defined as number of processed records per second. */
public double getRate() {
- double rps = (double) count * 1000.0 / (double) (elapsed>0 ? elapsed : 1); // assume at least 1ms for any countable op
+ double rps = count * 1000.0 / (elapsed > 0 ? elapsed : 1); // assume at least 1ms for any countable op
return rps;
}
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractReuters.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractReuters.java (revision 916146)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractReuters.java (working copy)
@@ -94,14 +94,12 @@
StringBuffer outBuffer = new StringBuffer(1024);
String line = null;
- int index = -1;
int docNumber = 0;
while ((line = reader.readLine()) != null)
{
//when we see a closing reuters tag, flush the file
- if ((index = line.indexOf(" fragInfos = new ArrayList();
/**
@@ -38,7 +37,6 @@
* @param fragCharSize the length (number of chars) of a fragment
*/
public FieldFragList( int fragCharSize ){
- this.fragCharSize = fragCharSize;
}
/**
Index: contrib/fast-vector-highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java
===================================================================
--- contrib/fast-vector-highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java (revision 916146)
+++ contrib/fast-vector-highlighter/src/java/org/apache/lucene/search/vectorhighlight/FieldTermStack.java (working copy)
@@ -64,7 +64,7 @@
writer.close();
IndexReader reader = IndexReader.open( dir, true );
- FieldTermStack ftl = new FieldTermStack( reader, 0, "f", fieldQuery );
+ new FieldTermStack( reader, 0, "f", fieldQuery );
reader.close();
}
Index: contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java
===================================================================
--- contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java (revision 916146)
+++ contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java (working copy)
@@ -21,11 +21,8 @@
import java.io.Reader;
import java.util.Collection;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.KeywordAnalyzer;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
@@ -47,9 +44,9 @@
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
-public abstract class AbstractTestCase extends TestCase {
+public abstract class AbstractTestCase extends LuceneTestCase {
protected final String F = "f";
protected final String F1 = "f1";
@@ -87,11 +84,12 @@
@Override
protected void setUp() throws Exception {
- analyzerW = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ super.setUp();
+ analyzerW = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
analyzerB = new BigramAnalyzer();
analyzerK = new KeywordAnalyzer();
- paW = new QueryParser(Version.LUCENE_CURRENT, F, analyzerW );
- paB = new QueryParser(Version.LUCENE_CURRENT, F, analyzerB );
+ paW = new QueryParser(TEST_VERSION_CURRENT, F, analyzerW );
+ paB = new QueryParser(TEST_VERSION_CURRENT, F, analyzerB );
dir = new RAMDirectory();
}
@@ -101,6 +99,7 @@
reader.close();
reader = null;
}
+ super.tearDown();
}
protected Query tq( String text ){
@@ -282,7 +281,7 @@
}
charBufferIndex = 0;
}
- int c = (int)charBuffer[charBufferIndex++];
+ int c = charBuffer[charBufferIndex++];
nextStartOffset++;
return c;
}
@@ -291,11 +290,13 @@
return delimiters.indexOf( c ) >= 0;
}
+ @Override
public void reset( Reader input ) throws IOException {
super.reset( input );
reset();
}
+ @Override
public void reset() throws IOException {
startTerm = 0;
nextStartOffset = 0;
Index: contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java
===================================================================
--- contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java (revision 916146)
+++ contrib/fast-vector-highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java (working copy)
@@ -17,7 +17,6 @@
* limitations under the License.
*/
-import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Index;
Index: contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java
===================================================================
--- contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java (revision 916146)
+++ contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java (working copy)
@@ -48,12 +48,10 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.OpenBitSet;
-import org.apache.lucene.util.Version;
-import junit.framework.TestCase;
-
-public class HighlighterPhraseTest extends TestCase {
+public class HighlighterPhraseTest extends LuceneTestCase {
private static final String FIELD = "text";
public void testConcurrentPhrase() throws CorruptIndexException,
@@ -61,7 +59,7 @@
final String TEXT = "the fox jumped";
final Directory directory = new RAMDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), MaxFieldLength.UNLIMITED);
try {
final Document document = new Document();
document.add(new Field(FIELD, new TokenStreamConcurrent(),
@@ -104,7 +102,7 @@
final String TEXT = "the fox jumped";
final Directory directory = new RAMDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), MaxFieldLength.UNLIMITED);
try {
final Document document = new Document();
document.add(new Field(FIELD, new TokenStreamConcurrent(),
@@ -125,19 +123,23 @@
indexSearcher.search(phraseQuery, new Collector() {
private int baseDoc;
+ @Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
+ @Override
public void collect(int i) throws IOException {
bitset.set(this.baseDoc + i);
}
+ @Override
public void setNextReader(IndexReader indexreader, int i)
throws IOException {
this.baseDoc = i;
}
+ @Override
public void setScorer(org.apache.lucene.search.Scorer scorer)
throws IOException {
// Do Nothing
@@ -169,7 +171,7 @@
final String TEXT = "the fox did not jump";
final Directory directory = new RAMDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), MaxFieldLength.UNLIMITED);
try {
final Document document = new Document();
document.add(new Field(FIELD, new TokenStreamSparse(),
@@ -211,7 +213,7 @@
final String TEXT = "the fox did not jump";
final Directory directory = new RAMDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), MaxFieldLength.UNLIMITED);
try {
final Document document = new Document();
document.add(new Field(FIELD, TEXT, Store.YES, Index.ANALYZED,
@@ -251,7 +253,7 @@
final String TEXT = "the fox did not jump";
final Directory directory = new RAMDirectory();
final IndexWriter indexWriter = new IndexWriter(directory,
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT), MaxFieldLength.UNLIMITED);
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT), MaxFieldLength.UNLIMITED);
try {
final Document document = new Document();
document.add(new Field(FIELD, new TokenStreamSparse(),
@@ -322,6 +324,7 @@
return true;
}
+ @Override
public void reset() {
this.i = -1;
this.tokens = new Token[] {
@@ -367,6 +370,7 @@
return true;
}
+ @Override
public void reset() {
this.i = -1;
this.tokens = new Token[] {
Index: contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
===================================================================
--- contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (revision 916146)
+++ contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (working copy)
@@ -79,6 +79,7 @@
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.Version;
import org.w3c.dom.Element;
import org.w3c.dom.NodeList;
@@ -89,7 +90,7 @@
*/
public class HighlighterTest extends BaseTokenStreamTestCase implements Formatter {
// TODO: change to CURRENT, does not work because posIncr:
- static final Version TEST_VERSION = Version.LUCENE_CURRENT;
+ static final Version TEST_VERSION = TEST_VERSION_CURRENT;
private IndexReader reader;
static final String FIELD_NAME = "contents";
@@ -118,7 +119,7 @@
}
public void testQueryScorerHits() throws Exception {
- Analyzer analyzer = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new SimpleAnalyzer(TEST_VERSION_CURRENT);
QueryParser qp = new QueryParser(TEST_VERSION, FIELD_NAME, analyzer);
query = qp.parse("\"very long\"");
searcher = new IndexSearcher(ramDir, true);
@@ -226,7 +227,7 @@
String f2c = f2 + ":";
String q = "(" + f1c + ph1 + " OR " + f2c + ph1 + ") AND (" + f1c + ph2
+ " OR " + f2c + ph2 + ")";
- Analyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
QueryParser qp = new QueryParser(TEST_VERSION, f1, analyzer);
Query query = qp.parse(q);
@@ -374,8 +375,8 @@
highlighter.setTextFragmenter(new SimpleFragmenter(40));
- String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired,
- "...");
+// String result =
+ highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired,"...");
//System.out.println("\t" + result);
}
@@ -1389,9 +1390,9 @@
// highlighting respects fieldnames used in query
Scorer fieldSpecificScorer = null;
- if (mode == this.QUERY) {
+ if (mode == TestHighlightRunner.QUERY) {
fieldSpecificScorer = new QueryScorer(query, FIELD_NAME);
- } else if (mode == this.QUERY_TERM) {
+ } else if (mode == TestHighlightRunner.QUERY_TERM) {
fieldSpecificScorer = new QueryTermScorer(query, "contents");
}
Highlighter fieldSpecificHighlighter = new Highlighter(new SimpleHTMLFormatter(),
@@ -1402,9 +1403,9 @@
// highlighting does not respect fieldnames used in query
Scorer fieldInSpecificScorer = null;
- if (mode == this.QUERY) {
+ if (mode == TestHighlightRunner.QUERY) {
fieldInSpecificScorer = new QueryScorer(query, null);
- } else if (mode == this.QUERY_TERM) {
+ } else if (mode == TestHighlightRunner.QUERY_TERM) {
fieldInSpecificScorer = new QueryTermScorer(query);
}
@@ -1529,64 +1530,64 @@
Highlighter highlighter;
String result;
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("foo");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("foo");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed10 foo ", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("10");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("10");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed10 foo", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("hi");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hi");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi -Speed10 foo", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("speed");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("speed");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed 10 foo", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("hispeed");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hispeed");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed 10 foo", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("hi speed");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hi speed");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed 10 foo", result);
// ///////////////// same tests, just put the bigger overlapping token
// first
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("foo");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("foo");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed10 foo ", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("10");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("10");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed10 foo", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("hi");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hi");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi -Speed10 foo", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("speed");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("speed");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed 10 foo", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("hispeed");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hispeed");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed 10 foo", result);
- query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("hi speed");
+ query = new QueryParser(TEST_VERSION, "text", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("hi speed");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed 10 foo", result);
@@ -1597,7 +1598,7 @@
}
private Directory dir = new RAMDirectory();
- private Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ private Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
public void testWeightedTermsWithDeletes() throws IOException, ParseException, InvalidTokenOffsetsException {
makeIndex();
@@ -1762,11 +1763,6 @@
}
- @Override
- protected void tearDown() throws Exception {
- super.tearDown();
- }
-
private static Token createToken(String term, int start, int offset)
{
Token token = new Token(start, offset);
@@ -1801,7 +1797,7 @@
*/
@Override
public TokenStream tokenStream(String arg0, Reader arg1) {
- LowerCaseTokenizer stream = new LowerCaseTokenizer(Version.LUCENE_CURRENT, arg1);
+ LowerCaseTokenizer stream = new LowerCaseTokenizer(LuceneTestCase.TEST_VERSION_CURRENT, arg1);
stream.addAttribute(TermAttribute.class);
stream.addAttribute(PositionIncrementAttribute.class);
stream.addAttribute(OffsetAttribute.class);
@@ -1816,7 +1812,6 @@
class SynonymTokenizer extends TokenStream {
private TokenStream realStream;
private Token currentRealToken = null;
- private org.apache.lucene.analysis.Token cRealToken = null;
private Map synonyms;
StringTokenizer st = null;
private TermAttribute realTermAtt;
Index: contrib/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyFilter.java
===================================================================
--- contrib/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyFilter.java (revision 916146)
+++ contrib/icu/src/test/org/apache/lucene/collation/TestICUCollationKeyFilter.java (working copy)
@@ -43,16 +43,16 @@
public class TestAnalyzer extends Analyzer {
- private Collator collator;
+ private Collator _collator;
TestAnalyzer(Collator collator) {
- this.collator = collator;
+ _collator = collator;
}
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
TokenStream result = new KeywordTokenizer(reader);
- result = new ICUCollationKeyFilter(result, collator);
+ result = new ICUCollationKeyFilter(result, _collator);
return result;
}
}
Index: contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocument.java
===================================================================
--- contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocument.java (revision 916146)
+++ contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocument.java (working copy)
@@ -30,7 +30,7 @@
public class InstantiatedDocument
implements Serializable {
- private static long serialVersionUID = 1l;
+ private static final long serialVersionUID = 1l;
private Document document;
Index: contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java
===================================================================
--- contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java (revision 916146)
+++ contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java (working copy)
@@ -16,13 +16,12 @@
* limitations under the License.
*/
+import java.io.Closeable;
import java.io.IOException;
import java.io.Serializable;
-import java.io.Closeable;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
Index: contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java
===================================================================
--- contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (revision 916146)
+++ contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (working copy)
@@ -167,11 +167,9 @@
}
@Override
- protected void doCommit(Map commitUserData) throws IOException {
+ protected void doCommit(Map commitUserData) throws IOException {
// todo: read/write lock
- boolean updated = false;
-
// 1. update norms
if (uncommittedNormsByFieldNameAndDocumentNumber != null) {
for (Map.Entry> e : uncommittedNormsByFieldNameAndDocumentNumber.entrySet()) {
@@ -181,8 +179,6 @@
}
}
uncommittedNormsByFieldNameAndDocumentNumber = null;
-
- updated = true;
}
// 2. remove deleted documents
@@ -197,9 +193,6 @@
}
}
uncommittedDeletedDocuments = null;
-
- updated = true;
-
}
// todo unlock read/writelock
Index: contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java
===================================================================
--- contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java (revision 916146)
+++ contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java (working copy)
@@ -37,7 +37,6 @@
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
@@ -470,7 +469,7 @@
// normalize settings per field name in document
Map fieldSettingsByFieldName = new HashMap();
- for (Fieldable field : (List) document.getDocument().getFields()) {
+ for (Fieldable field : document.getDocument().getFields()) {
FieldSetting fieldSetting = fieldSettingsByFieldName.get(field.name());
if (fieldSetting == null) {
fieldSetting = new FieldSetting();
@@ -514,7 +513,7 @@
Map> tokensByField = new LinkedHashMap>(20);
// tokenize indexed fields.
- for (Iterator it = (Iterator) document.getDocument().getFields().iterator(); it.hasNext();) {
+ for (Iterator it = document.getDocument().getFields().iterator(); it.hasNext();) {
Fieldable field = it.next();
@@ -526,7 +525,6 @@
tokensByField.put(field, tokens);
if (field.isTokenized()) {
- int termCounter = 0;
final TokenStream tokenStream;
// todo readerValue(), binaryValue()
if (field.tokenStreamValue() != null) {
Index: contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTerm.java
===================================================================
--- contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTerm.java (revision 916146)
+++ contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTerm.java (working copy)
@@ -16,13 +16,11 @@
* limitations under the License.
*/
-import org.apache.lucene.index.Term;
-
import java.io.Serializable;
import java.util.Comparator;
-import java.util.Collections;
-import java.util.Arrays;
+import org.apache.lucene.index.Term;
+
/**
* A term in the inverted index, coupled to the documents it occurs in.
*
Index: contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermEnum.java
===================================================================
--- contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermEnum.java (revision 916146)
+++ contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermEnum.java (working copy)
@@ -19,14 +19,10 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermEnum;
-import java.io.IOException;
-import java.util.Arrays;
-
/**
* A {@link org.apache.lucene.index.TermEnum} navigating an {@link org.apache.lucene.store.instantiated.InstantiatedIndexReader}.
*/
-public class InstantiatedTermEnum
- extends TermEnum {
+public class InstantiatedTermEnum extends TermEnum {
private final InstantiatedIndexReader reader;
Index: contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java
===================================================================
--- contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (revision 916146)
+++ contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (working copy)
@@ -22,8 +22,6 @@
import java.util.Iterator;
import java.util.List;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
@@ -42,11 +40,12 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.AttributeImpl;
+import org.apache.lucene.util.LuceneTestCase;
/**
* Asserts equality of content and behaviour of two index readers.
*/
-public class TestIndicesEquals extends TestCase {
+public class TestIndicesEquals extends LuceneTestCase {
// public void test2() throws Exception {
// FSDirectory fsdir = FSDirectory.open(new File("/tmp/fatcorpus"));
@@ -61,7 +60,7 @@
RAMDirectory dir = new RAMDirectory();
// create dir data
- IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
for (int i = 0; i < 20; i++) {
Document document = new Document();
assembleDocument(document, i);
@@ -85,7 +84,7 @@
InstantiatedIndex ii = new InstantiatedIndex();
// create dir data
- IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
for (int i = 0; i < 500; i++) {
Document document = new Document();
assembleDocument(document, i);
@@ -94,7 +93,7 @@
indexWriter.close();
// test ii writer
- InstantiatedIndexWriter instantiatedIndexWriter = ii.indexWriterFactory(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true);
+ InstantiatedIndexWriter instantiatedIndexWriter = ii.indexWriterFactory(new StandardAnalyzer(TEST_VERSION_CURRENT), true);
for (int i = 0; i < 500; i++) {
Document document = new Document();
assembleDocument(document, i);
Index: contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestSerialization.java
===================================================================
--- contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestSerialization.java (revision 916146)
+++ contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestSerialization.java (working copy)
@@ -17,10 +17,9 @@
*/
-import junit.framework.TestCase;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
@@ -30,13 +29,13 @@
import java.io.ByteArrayOutputStream;
import java.io.ObjectOutputStream;
-public class TestSerialization extends TestCase {
+public class TestSerialization extends LuceneTestCase {
public void test() throws Exception {
Directory dir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("foo", "bar rab abr bra rba", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("moo", "bar rab abr bra rba", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
Index: contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java
===================================================================
--- contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java (revision 916146)
+++ contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java (working copy)
@@ -15,16 +15,12 @@
*
*/
-import junit.framework.TestCase;
-
import java.io.IOException;
-import java.util.Map;
-
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -32,21 +28,21 @@
/**
* @since 2009-mar-30 13:15:49
*/
-public class TestUnoptimizedReaderOnConstructor extends TestCase {
+public class TestUnoptimizedReaderOnConstructor extends LuceneTestCase {
public void test() throws Exception {
Directory dir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
addDocument(iw, "Hello, world!");
addDocument(iw, "All work and no play makes jack a dull boy");
iw.close();
- iw = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
+ iw = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
addDocument(iw, "Hello, tellus!");
addDocument(iw, "All work and no play makes danny a dull boy");
iw.close();
- iw = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
+ iw = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.UNLIMITED);
addDocument(iw, "Hello, earth!");
addDocument(iw, "All work and no play makes wendy a dull girl");
iw.close();
@@ -54,9 +50,8 @@
IndexReader unoptimizedReader = IndexReader.open(dir, false);
unoptimizedReader.deleteDocument(2);
- InstantiatedIndex ii;
try {
- ii = new InstantiatedIndex(unoptimizedReader);
+ new InstantiatedIndex(unoptimizedReader);
} catch (Exception e) {
fail("No exceptions when loading an unoptimized reader!");
}
Index: contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
===================================================================
--- contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (revision 916146)
+++ contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (working copy)
@@ -793,41 +793,41 @@
return new TermEnum() {
- private int i = ix; // index into info.sortedTerms
- private int j = jx; // index into sortedFields
+ private int srtTermsIdx = ix; // index into info.sortedTerms
+ private int srtFldsIdx = jx; // index into sortedFields
@Override
public boolean next() {
if (DEBUG) System.err.println("TermEnum.next");
- if (j >= sortedFields.length) return false;
- Info info = getInfo(j);
- if (++i < info.sortedTerms.length) return true;
+ if (srtFldsIdx >= sortedFields.length) return false;
+ Info info = getInfo(srtFldsIdx);
+ if (++srtTermsIdx < info.sortedTerms.length) return true;
// move to successor
- j++;
- i = 0;
- if (j >= sortedFields.length) return false;
- getInfo(j).sortTerms();
+ srtFldsIdx++;
+ srtTermsIdx = 0;
+ if (srtFldsIdx >= sortedFields.length) return false;
+ getInfo(srtFldsIdx).sortTerms();
return true;
}
@Override
public Term term() {
- if (DEBUG) System.err.println("TermEnum.term: " + i);
- if (j >= sortedFields.length) return null;
- Info info = getInfo(j);
- if (i >= info.sortedTerms.length) return null;
+ if (DEBUG) System.err.println("TermEnum.term: " + srtTermsIdx);
+ if (srtFldsIdx >= sortedFields.length) return null;
+ Info info = getInfo(srtFldsIdx);
+ if (srtTermsIdx >= info.sortedTerms.length) return null;
// if (DEBUG) System.err.println("TermEnum.term: " + i + ", " + info.sortedTerms[i].getKey());
- return createTerm(info, j, info.sortedTerms[i].getKey());
+ return createTerm(info, srtFldsIdx, info.sortedTerms[srtTermsIdx].getKey());
}
@Override
public int docFreq() {
if (DEBUG) System.err.println("TermEnum.docFreq");
- if (j >= sortedFields.length) return 0;
- Info info = getInfo(j);
- if (i >= info.sortedTerms.length) return 0;
- return numPositions(info.getPositions(i));
+ if (srtFldsIdx >= sortedFields.length) return 0;
+ Info info = getInfo(srtFldsIdx);
+ if (srtTermsIdx >= info.sortedTerms.length) return 0;
+ return numPositions(info.getPositions(srtTermsIdx));
}
@Override
Index: contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
===================================================================
--- contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (revision 916146)
+++ contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (working copy)
@@ -31,7 +31,6 @@
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
-import java.util.Set;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Analyzer;
@@ -52,7 +51,6 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.TermDocs;
-import org.apache.lucene.util.Version;
/**
Verifies that Lucene MemoryIndex and RAMDirectory have the same behaviour,
@@ -202,7 +200,6 @@
public class MemoryIndexTest extends BaseTokenStreamTestCase {
private Analyzer analyzer;
- private boolean fastMode = false;
private final boolean verbose = false;
@@ -271,16 +268,14 @@
}
}
- boolean toLowerCase = true;
// boolean toLowerCase = false;
// Set stopWords = null;
- Set> stopWords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
Analyzer[] analyzers = new Analyzer[] {
- new SimpleAnalyzer(Version.LUCENE_CURRENT),
- new StopAnalyzer(Version.LUCENE_CURRENT),
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
-// new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ new SimpleAnalyzer(TEST_VERSION_CURRENT),
+ new StopAnalyzer(TEST_VERSION_CURRENT),
+ new StandardAnalyzer(TEST_VERSION_CURRENT),
+// new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
// new PatternAnalyzer(PatternAnalyzer.NON_WORD_PATTERN, false, null),
// new PatternAnalyzer(PatternAnalyzer.NON_WORD_PATTERN, true, stopWords),
// new SnowballAnalyzer("English", StopAnalyzer.ENGLISH_STOP_WORDS),
@@ -465,7 +460,8 @@
}
}
- private int getMemorySize(Object index) {
+ // for debugging purposes
+ int getMemorySize(Object index) {
if (index instanceof Directory) {
try {
Directory dir = (Directory) index;
@@ -486,7 +482,7 @@
}
private Query parseQuery(String expression) throws ParseException {
- QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, FIELD_NAME, analyzer);
+ QueryParser parser = new QueryParser(TEST_VERSION_CURRENT, FIELD_NAME, analyzer);
// parser.setPhraseSlop(0);
return parser.parse(expression);
}
@@ -559,7 +555,7 @@
System.arraycopy(output, 0, buffer, 0, len);
return buffer;
} finally {
- if (input != null) input.close();
+ input.close();
}
}
Index: contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java
===================================================================
--- contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java (revision 916146)
+++ contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java (working copy)
@@ -59,7 +59,7 @@
protected long size(SegmentInfo info) throws IOException {
long byteSize = info.sizeInBytes();
float delRatio = (info.docCount <= 0 ? 0.0f : ((float)info.getDelCount() / (float)info.docCount));
- return (info.docCount <= 0 ? byteSize : (long)((float)byteSize * (1.0f - delRatio)));
+ return (info.docCount <= 0 ? byteSize : (long)(byteSize * (1.0f - delRatio)));
}
public void setPartialExpunge(boolean doPartialExpunge) {
Index: contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java
===================================================================
--- contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java (revision 916146)
+++ contrib/misc/src/java/org/apache/lucene/index/MultiPassIndexSplitter.java (working copy)
@@ -223,7 +223,7 @@
@Override
public boolean next() throws IOException {
boolean res;
- while ((res = super.next())) {
+ while ((res = super.next()) == true) {
if (!dels.get(doc())) {
break;
}
Index: contrib/misc/src/java/org/apache/lucene/misc/ChainedFilter.java
===================================================================
--- contrib/misc/src/java/org/apache/lucene/misc/ChainedFilter.java (revision 916146)
+++ contrib/misc/src/java/org/apache/lucene/misc/ChainedFilter.java (working copy)
@@ -25,7 +25,6 @@
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.OpenBitSet;
import org.apache.lucene.util.OpenBitSetDISI;
-import org.apache.lucene.util.SortedVIntList;
/**
*
Index: contrib/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java
===================================================================
--- contrib/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java (revision 916146)
+++ contrib/misc/src/java/org/apache/lucene/misc/SweetSpotSimilarity.java (working copy)
@@ -189,7 +189,7 @@
(
(
s *
- (float)(Math.abs(numTerms - l) + Math.abs(numTerms - h) - (h-l))
+ (Math.abs(numTerms - l) + Math.abs(numTerms - h) - (h-l))
)
+ 1.0f
)
@@ -248,7 +248,7 @@
final float max = tf_hyper_max;
final double base = tf_hyper_base;
final float xoffset = tf_hyper_xoffset;
- final double x = (double)(freq - xoffset);
+ final double x = (freq - xoffset);
final float result = min +
(float)(
Index: contrib/misc/src/java/org/apache/lucene/queryParser/analyzing/AnalyzingQueryParser.java
===================================================================
--- contrib/misc/src/java/org/apache/lucene/queryParser/analyzing/AnalyzingQueryParser.java (revision 916146)
+++ contrib/misc/src/java/org/apache/lucene/queryParser/analyzing/AnalyzingQueryParser.java (working copy)
@@ -141,7 +141,7 @@
if (tlist.size() == 0) {
return null;
} else if (tlist.size() == 1) {
- if (wlist != null && wlist.size() == 1) {
+ if (wlist.size() == 1) {
/* if wlist contains one wildcard, it must be at the end, because:
* 1) wildcards are not allowed in 1st position of a term by QueryParser
* 2) if wildcard was *not* in end, there would be *two* or more tokens */
Index: contrib/misc/src/java/org/apache/lucene/queryParser/precedence/ParseException.java
===================================================================
--- contrib/misc/src/java/org/apache/lucene/queryParser/precedence/ParseException.java (revision 916146)
+++ contrib/misc/src/java/org/apache/lucene/queryParser/precedence/ParseException.java (working copy)
@@ -96,6 +96,7 @@
* of the final stack trace, and hence the correct error message
* gets displayed.
*/
+ @Override
public String getMessage() {
if (!specialConstructor) {
return super.getMessage();
Index: contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.java
===================================================================
--- contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.java (revision 916146)
+++ contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.java (working copy)
@@ -87,8 +87,8 @@
// make it possible to call setDefaultOperator() without accessing
// the nested class:
- public static final Operator AND_OPERATOR = Operator.AND;
- public static final Operator OR_OPERATOR = Operator.OR;
+ public static final Operator AND_OPERATOR = Operator.OP_AND;
+ public static final Operator OR_OPERATOR = Operator.OP_OR;
/** The actual operator that parser uses to combine query terms */
private Operator operator = OR_OPERATOR;
@@ -102,7 +102,7 @@
int fuzzyPrefixLength = FuzzyQuery.defaultPrefixLength;
Locale locale = Locale.getDefault();
- static enum Operator { OR, AND }
+ static enum Operator { OP_OR, OP_AND }
/** Constructs a query parser.
* @param f the default field for query terms.
@@ -617,7 +617,6 @@
break;
default:
jj_la1[1] = jj_gen;
- ;
}
{if (true) return ret;}
throw new Error("Missing return statement in function");
@@ -650,7 +649,6 @@
break;
default:
jj_la1[3] = jj_gen;
- ;
}
{if (true) return ret;}
throw new Error("Missing return statement in function");
@@ -681,7 +679,6 @@
case RANGEIN_START:
case RANGEEX_START:
case NUMBER:
- ;
break;
default:
jj_la1[4] = jj_gen;
@@ -694,7 +691,6 @@
break;
default:
jj_la1[5] = jj_gen;
- ;
}
modifier = Modifier();
q = andExpression(field);
@@ -719,7 +715,6 @@
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case AND:
- ;
break;
default:
jj_la1[6] = jj_gen;
@@ -746,7 +741,6 @@
jj_consume_token(COLON);
field=discardEscapeChar(fieldToken.image);
} else {
- ;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case QUOTED:
@@ -769,7 +763,6 @@
break;
default:
jj_la1[7] = jj_gen;
- ;
}
break;
default:
@@ -826,7 +819,6 @@
break;
default:
jj_la1[10] = jj_gen;
- ;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case CARAT:
@@ -839,12 +831,10 @@
break;
default:
jj_la1[11] = jj_gen;
- ;
}
break;
default:
jj_la1[12] = jj_gen;
- ;
}
String termImage=discardEscapeChar(term.image);
if (wildcard) {
@@ -886,7 +876,6 @@
break;
default:
jj_la1[14] = jj_gen;
- ;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case RANGEIN_GOOP:
@@ -908,7 +897,6 @@
break;
default:
jj_la1[16] = jj_gen;
- ;
}
if (goop1.kind == RANGEIN_QUOTED) {
goop1.image = goop1.image.substring(1, goop1.image.length()-1);
@@ -942,7 +930,6 @@
break;
default:
jj_la1[18] = jj_gen;
- ;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case RANGEEX_GOOP:
@@ -964,7 +951,6 @@
break;
default:
jj_la1[20] = jj_gen;
- ;
}
if (goop1.kind == RANGEEX_QUOTED) {
goop1.image = goop1.image.substring(1, goop1.image.length()-1);
@@ -987,7 +973,6 @@
break;
default:
jj_la1[21] = jj_gen;
- ;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case CARAT:
@@ -996,7 +981,6 @@
break;
default:
jj_la1[22] = jj_gen;
- ;
}
int s = phraseSlop;
Index: contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParserTokenManager.java
===================================================================
--- contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParserTokenManager.java (revision 916146)
+++ contrib/misc/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParserTokenManager.java (working copy)
@@ -1,30 +1,8 @@
/* Generated By:JavaCC: Do not edit this line. PrecedenceQueryParserTokenManager.java */
package org.apache.lucene.queryParser.precedence;
-import java.io.IOException;
-import java.io.StringReader;
-import java.text.DateFormat;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.List;
-import java.util.Locale;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.*;
-import org.apache.lucene.document.DateTools;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.BooleanClause;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.FuzzyQuery;
-import org.apache.lucene.search.MultiPhraseQuery;
-import org.apache.lucene.search.PhraseQuery;
-import org.apache.lucene.search.PrefixQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.TermRangeQuery;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.WildcardQuery;
-import org.apache.lucene.util.AttributeSource;
/** Token Manager. */
+@SuppressWarnings("unused")
public class PrecedenceQueryParserTokenManager implements PrecedenceQueryParserConstants
{
@@ -359,7 +337,7 @@
}
else
{
- int hiByte = (int)(curChar >> 8);
+ int hiByte = (curChar >> 8);
int i1 = hiByte >> 6;
long l1 = 1L << (hiByte & 077);
int i2 = (curChar & 0xff) >> 6;
@@ -551,7 +529,7 @@
}
else
{
- int hiByte = (int)(curChar >> 8);
+ int hiByte = (curChar >> 8);
int i1 = hiByte >> 6;
long l1 = 1L << (hiByte & 077);
int i2 = (curChar & 0xff) >> 6;
@@ -646,7 +624,7 @@
}
else
{
- int hiByte = (int)(curChar >> 8);
+ int hiByte = (curChar >> 8);
int i1 = hiByte >> 6;
long l1 = 1L << (hiByte & 077);
int i2 = (curChar & 0xff) >> 6;
@@ -809,7 +787,7 @@
}
else
{
- int hiByte = (int)(curChar >> 8);
+ int hiByte = (curChar >> 8);
int i1 = hiByte >> 6;
long l1 = 1L << (hiByte & 077);
int i2 = (curChar & 0xff) >> 6;
Index: contrib/misc/src/java/org/apache/lucene/queryParser/precedence/Token.java
===================================================================
--- contrib/misc/src/java/org/apache/lucene/queryParser/precedence/Token.java (revision 916146)
+++ contrib/misc/src/java/org/apache/lucene/queryParser/precedence/Token.java (working copy)
@@ -90,6 +90,7 @@
/**
* Returns the image.
*/
+ @Override
public String toString()
{
return image;
Index: contrib/misc/src/java/org/apache/lucene/queryParser/precedence/TokenMgrError.java
===================================================================
--- contrib/misc/src/java/org/apache/lucene/queryParser/precedence/TokenMgrError.java (revision 916146)
+++ contrib/misc/src/java/org/apache/lucene/queryParser/precedence/TokenMgrError.java (working copy)
@@ -115,7 +115,8 @@
*
* from this method for such cases in the release version of your parser.
*/
- public String getMessage() {
+ @Override
+ public String getMessage() {
return super.getMessage();
}
Index: contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java
===================================================================
--- contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java (revision 916146)
+++ contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java (working copy)
@@ -20,8 +20,6 @@
import java.io.IOException;
import java.util.Arrays;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -34,12 +32,13 @@
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
/**
* Tests changing of field norms with a custom similarity and with fake norms.
*/
-public class TestFieldNormModifier extends TestCase {
+public class TestFieldNormModifier extends LuceneTestCase {
+
public TestFieldNormModifier(String name) {
super(name);
}
@@ -57,8 +56,9 @@
};
@Override
- public void setUp() throws Exception {
- IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, MaxFieldLength.UNLIMITED);
+ protected void setUp() throws Exception {
+ super.setUp();
+ IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.UNLIMITED);
for (int i = 0; i < NUM_DOCS; i++) {
Document d = new Document();
Index: contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java
===================================================================
--- contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java (revision 916146)
+++ contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java (working copy)
@@ -23,7 +23,6 @@
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
public class TestIndexSplitter extends LuceneTestCase {
@@ -36,7 +35,7 @@
_TestUtil.rmDir(destDir);
destDir.mkdirs();
FSDirectory fsDir = FSDirectory.open(dir);
- IndexWriter iw = new IndexWriter(fsDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, MaxFieldLength.UNLIMITED);
+ IndexWriter iw = new IndexWriter(fsDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.UNLIMITED);
for (int x=0; x < 100; x++) {
Document doc = TestIndexWriterReader.createDocument(x, "index", 5);
iw.addDocument(doc);
Index: contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java
===================================================================
--- contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java (revision 916146)
+++ contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java (working copy)
@@ -22,18 +22,17 @@
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
-import junit.framework.TestCase;
-
-public class TestMultiPassIndexSplitter extends TestCase {
+public class TestMultiPassIndexSplitter extends LuceneTestCase {
IndexReader input;
int NUM_DOCS = 11;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
+ super.setUp();
RAMDirectory dir = new RAMDirectory();
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
MaxFieldLength.LIMITED);
Document doc;
for (int i = 0; i < NUM_DOCS; i++) {
Index: contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java
===================================================================
--- contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java (revision 916146)
+++ contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java (working copy)
@@ -1,12 +1,11 @@
package org.apache.lucene.index;
-import junit.framework.TestCase;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
import java.util.Collections;
/*
@@ -24,13 +23,12 @@
*
*/
+public class TestTermVectorAccessor extends LuceneTestCase {
-public class TestTermVectorAccessor extends TestCase {
-
public void test() throws Exception {
Directory dir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(Version.LUCENE_CURRENT, Collections.emptySet()), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(TEST_VERSION_CURRENT, Collections.emptySet()), true, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc;
Index: contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java
===================================================================
--- contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java (revision 916146)
+++ contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java (working copy)
@@ -20,8 +20,6 @@
import java.util.Calendar;
import java.util.GregorianCalendar;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
@@ -43,9 +41,9 @@
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
-public class ChainedFilterTest extends TestCase {
+public class ChainedFilterTest extends LuceneTestCase {
public static final int MAX = 500;
private RAMDirectory directory;
@@ -57,10 +55,11 @@
private QueryWrapperFilter sueFilter;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
+ super.setUp();
directory = new RAMDirectory();
IndexWriter writer =
- new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
Calendar cal = new GregorianCalendar();
cal.clear();
@@ -188,7 +187,7 @@
public void testWithCachingFilter() throws Exception {
Directory dir = new RAMDirectory();
- Analyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
IndexWriter writer = new IndexWriter(dir, analyzer, true, MaxFieldLength.LIMITED);
writer.close();
Index: contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java
===================================================================
--- contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java (revision 916146)
+++ contrib/misc/src/test/org/apache/lucene/misc/SweetSpotSimilarityTest.java (working copy)
@@ -18,30 +18,10 @@
package org.apache.lucene.misc;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.Similarity;
-import org.apache.lucene.search.DefaultSimilarity;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.PhraseQuery;
-import org.apache.lucene.search.DisjunctionMaxQuery;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.BooleanClause;
-import org.apache.lucene.search.BooleanClause.Occur;
-
-import junit.framework.Test;
import junit.framework.TestCase;
-import junit.framework.TestSuite;
-import java.io.File;
-import java.math.BigDecimal;
-import java.util.Random;
-import java.util.Date;
-import java.util.List;
-import java.util.Arrays;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.Iterator;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.Similarity;
/**
* Test of the SweetSpotSimilarity
Index: contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java
===================================================================
--- contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java (revision 916146)
+++ contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java (working copy)
@@ -19,8 +19,6 @@
import java.io.IOException;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -37,12 +35,13 @@
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
/**
* Tests changing the norms after changing the simularity
*/
-public class TestLengthNormModifier extends TestCase {
+public class TestLengthNormModifier extends LuceneTestCase {
+
public TestLengthNormModifier(String name) {
super(name);
}
@@ -60,8 +59,9 @@
};
@Override
- public void setUp() throws Exception {
- IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, MaxFieldLength.UNLIMITED);
+ protected void setUp() throws Exception {
+ super.setUp();
+ IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, MaxFieldLength.UNLIMITED);
for (int i = 0; i < NUM_DOCS; i++) {
Document d = new Document();
Index: contrib/misc/src/test/org/apache/lucene/queryParser/analyzing/TestAnalyzingQueryParser.java
===================================================================
--- contrib/misc/src/test/org/apache/lucene/queryParser/analyzing/TestAnalyzingQueryParser.java (revision 916146)
+++ contrib/misc/src/test/org/apache/lucene/queryParser/analyzing/TestAnalyzingQueryParser.java (working copy)
@@ -19,8 +19,6 @@
import java.io.Reader;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.ASCIIFoldingFilter;
import org.apache.lucene.analysis.LowerCaseFilter;
@@ -28,12 +26,12 @@
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.queryParser.ParseException;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
/**
* @version $Revision$, $Date$
*/
-public class TestAnalyzingQueryParser extends TestCase {
+public class TestAnalyzingQueryParser extends LuceneTestCase {
private Analyzer a;
@@ -47,7 +45,8 @@
private String[] fuzzyExpected;
@Override
- public void setUp() {
+ protected void setUp() throws Exception {
+ super.setUp();
wildcardInput = new String[] { "übersetzung über*ung",
"Mötley Cr\u00fce Mötl?* Crü?", "Renée Zellweger Ren?? Zellw?ger" };
wildcardExpected = new String[] { "ubersetzung uber*ung", "motley crue motl?* cru?",
@@ -99,7 +98,7 @@
}
private String parseWithAnalyzingQueryParser(String s, Analyzer a) throws ParseException {
- AnalyzingQueryParser qp = new AnalyzingQueryParser(Version.LUCENE_CURRENT, "field", a);
+ AnalyzingQueryParser qp = new AnalyzingQueryParser(TEST_VERSION_CURRENT, "field", a);
org.apache.lucene.search.Query q = qp.parse(s);
return q.toString("field");
}
@@ -112,10 +111,10 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new StandardTokenizer(LuceneTestCase.TEST_VERSION_CURRENT, reader);
result = new StandardFilter(result);
result = new ASCIIFoldingFilter(result);
- result = new LowerCaseFilter(result);
+ result = new LowerCaseFilter(LuceneTestCase.TEST_VERSION_CURRENT, result);
return result;
}
}
Index: contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java
===================================================================
--- contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (revision 916146)
+++ contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (working copy)
@@ -19,8 +19,6 @@
import java.util.HashSet;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
@@ -33,11 +31,11 @@
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
-public class TestComplexPhraseQuery extends TestCase {
+public class TestComplexPhraseQuery extends LuceneTestCase {
- Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
DocData docsContent[] = { new DocData("john smith", "1"),
new DocData("johathon smith", "2"),
@@ -72,7 +70,7 @@
}
private void checkBadQuery(String qString) {
- QueryParser qp = new ComplexPhraseQueryParser(Version.LUCENE_CURRENT, defaultFieldName, analyzer);
+ QueryParser qp = new ComplexPhraseQueryParser(TEST_VERSION_CURRENT, defaultFieldName, analyzer);
Throwable expected = null;
try {
qp.parse(qString);
@@ -85,7 +83,7 @@
private void checkMatches(String qString, String expectedVals)
throws Exception {
- QueryParser qp = new ComplexPhraseQueryParser(Version.LUCENE_CURRENT, defaultFieldName, analyzer);
+ QueryParser qp = new ComplexPhraseQueryParser(TEST_VERSION_CURRENT, defaultFieldName, analyzer);
qp.setFuzzyPrefixLength(1); // usually a good idea
Query q = qp.parse(qString);
@@ -113,6 +111,7 @@
@Override
protected void setUp() throws Exception {
+ super.setUp();
RAMDirectory rd = new RAMDirectory();
IndexWriter w = new IndexWriter(rd, analyzer, MaxFieldLength.UNLIMITED);
for (int i = 0; i < docsContent.length; i++) {
@@ -130,6 +129,7 @@
@Override
protected void tearDown() throws Exception {
searcher.close();
+ super.tearDown();
}
static class DocData {
Index: contrib/misc/src/test/org/apache/lucene/queryParser/ext/TestExtendableQueryParser.java
===================================================================
--- contrib/misc/src/test/org/apache/lucene/queryParser/ext/TestExtendableQueryParser.java (revision 916146)
+++ contrib/misc/src/test/org/apache/lucene/queryParser/ext/TestExtendableQueryParser.java (working copy)
@@ -26,7 +26,6 @@
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.util.Version;
/**
* Testcase for the class {@link ExtendableQueryParser}
@@ -47,10 +46,10 @@
public QueryParser getParser(Analyzer a, Extensions extensions)
throws Exception {
if (a == null)
- a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
QueryParser qp = extensions == null ? new ExtendableQueryParser(
- Version.LUCENE_CURRENT, "field", a) : new ExtendableQueryParser(
- Version.LUCENE_CURRENT, "field", a, extensions);
+ TEST_VERSION_CURRENT, "field", a) : new ExtendableQueryParser(
+ TEST_VERSION_CURRENT, "field", a, extensions);
qp.setDefaultOperator(QueryParser.OR_OPERATOR);
return qp;
}
Index: contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java
===================================================================
--- contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java (revision 916146)
+++ contrib/misc/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java (working copy)
@@ -36,7 +36,6 @@
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.util.LocalizedTestCase;
-import org.apache.lucene.util.Version;
import java.io.IOException;
import java.io.Reader;
@@ -100,7 +99,7 @@
/** Filters LowerCaseTokenizer with StopFilter. */
@Override
public final TokenStream tokenStream(String fieldName, Reader reader) {
- return new QPTestFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader));
+ return new QPTestFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader));
}
}
@@ -123,14 +122,14 @@
private int originalMaxClauses;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
originalMaxClauses = BooleanQuery.getMaxClauseCount();
}
public PrecedenceQueryParser getParser(Analyzer a) throws Exception {
if (a == null)
- a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
PrecedenceQueryParser qp = new PrecedenceQueryParser("field", a);
qp.setDefaultOperator(PrecedenceQueryParser.OR_OPERATOR);
return qp;
@@ -175,7 +174,7 @@
public Query getQueryDOA(String query, Analyzer a)
throws Exception {
if (a == null)
- a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
PrecedenceQueryParser qp = new PrecedenceQueryParser("field", a);
qp.setDefaultOperator(PrecedenceQueryParser.AND_OPERATOR);
return qp.parse(query);
@@ -241,7 +240,7 @@
assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null,
"+(title:dog title:cat) -author:\"bob dole\"");
- PrecedenceQueryParser qp = new PrecedenceQueryParser("field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ PrecedenceQueryParser qp = new PrecedenceQueryParser("field", new StandardAnalyzer(TEST_VERSION_CURRENT));
// make sure OR is the default:
assertEquals(PrecedenceQueryParser.OR_OPERATOR, qp.getDefaultOperator());
qp.setDefaultOperator(PrecedenceQueryParser.AND_OPERATOR);
@@ -255,7 +254,7 @@
}
public void testPunct() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
assertQueryEquals("a&b", a, "a&b");
assertQueryEquals("a&&b", a, "a&&b");
assertQueryEquals(".NET", a, ".NET");
@@ -275,7 +274,7 @@
assertQueryEquals("term 1.0 1 2", null, "term");
assertQueryEquals("term term1 term2", null, "term term term");
- Analyzer a = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT);
assertQueryEquals("3", a, "3");
assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
assertQueryEquals("term term1 term2", a, "term term1 term2");
@@ -413,7 +412,7 @@
}
public void testEscaped() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
/*assertQueryEquals("\\[brackets", a, "\\[brackets");
assertQueryEquals("\\[brackets", null, "brackets");
@@ -518,7 +517,7 @@
public void testBoost()
throws Exception {
- StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, Collections.singleton("on"));
+ StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT, Collections.singleton("on"));
PrecedenceQueryParser qp = new PrecedenceQueryParser("field", oneStopAnalyzer);
Query q = qp.parse("on^1.0");
assertNotNull(q);
@@ -531,7 +530,7 @@
q = qp.parse("\"on\"^1.0");
assertNotNull(q);
- q = getParser(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)).parse("the^3");
+ q = getParser(new StandardAnalyzer(TEST_VERSION_CURRENT)).parse("the^3");
assertNotNull(q);
}
@@ -545,7 +544,7 @@
public void testCustomQueryParserWildcard() {
try {
- new QPTestParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("a?t");
+ new QPTestParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("a?t");
} catch (ParseException expected) {
return;
}
@@ -554,7 +553,7 @@
public void testCustomQueryParserFuzzy() throws Exception {
try {
- new QPTestParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("xunit~");
+ new QPTestParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("xunit~");
} catch (ParseException expected) {
return;
}
@@ -564,7 +563,7 @@
public void testBooleanQuery() throws Exception {
BooleanQuery.setMaxClauseCount(2);
try {
- getParser(new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("one two three");
+ getParser(new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("one two three");
fail("ParseException expected due to too many boolean clauses");
} catch (ParseException expected) {
// too many boolean clauses, so ParseException is expected
@@ -578,7 +577,7 @@
// failing tests disabled since PrecedenceQueryParser
// is currently unmaintained
public void _testPrecedence() throws Exception {
- PrecedenceQueryParser parser = getParser(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ PrecedenceQueryParser parser = getParser(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
Query query1 = parser.parse("A AND B OR C AND D");
Query query2 = parser.parse("(A AND B) OR (C AND D)");
assertEquals(query1, query2);
@@ -606,8 +605,9 @@
@Override
- public void tearDown() {
+ protected void tearDown() throws Exception {
BooleanQuery.setMaxClauseCount(originalMaxClauses);
+ super.tearDown();
}
}
Index: contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java
===================================================================
--- contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java (revision 916146)
+++ contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java (working copy)
@@ -19,8 +19,6 @@
import java.io.IOException;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -28,18 +26,17 @@
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
-public class BooleanFilterTest extends TestCase
-{
+public class BooleanFilterTest extends LuceneTestCase {
private RAMDirectory directory;
private IndexReader reader;
@Override
- protected void setUp() throws Exception
- {
+ protected void setUp() throws Exception {
+ super.setUp();
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
//Add series of docs with filterable fields : acces rights, prices, dates and "in-stock" flags
addDoc(writer, "admin guest", "010", "20040101","Y");
Index: contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
===================================================================
--- contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (revision 916146)
+++ contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (working copy)
@@ -20,8 +20,6 @@
import java.io.IOException;
import java.util.HashSet;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -30,9 +28,9 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
-public class DuplicateFilterTest extends TestCase
-{
+public class DuplicateFilterTest extends LuceneTestCase {
private static final String KEY_FIELD = "url";
private RAMDirectory directory;
private IndexReader reader;
@@ -40,10 +38,10 @@
private IndexSearcher searcher;
@Override
- protected void setUp() throws Exception
- {
+ protected void setUp() throws Exception {
+ super.setUp();
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
//Add series of docs with filterable fields : url, text and dates flags
addDoc(writer, "http://lucene.apache.org", "lucene 1.4.3 available", "20040101");
@@ -62,11 +60,11 @@
}
@Override
- protected void tearDown() throws Exception
- {
+ protected void tearDown() throws Exception {
reader.close();
searcher.close();
directory.close();
+ super.tearDown();
}
private void addDoc(IndexWriter writer, String url, String text, String date) throws IOException
Index: contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java
===================================================================
--- contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java (revision 916146)
+++ contrib/queries/src/test/org/apache/lucene/search/FuzzyLikeThisQueryTest.java (working copy)
@@ -20,8 +20,6 @@
import java.io.IOException;
import java.util.HashSet;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
@@ -30,17 +28,16 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
-public class FuzzyLikeThisQueryTest extends TestCase
-{
+public class FuzzyLikeThisQueryTest extends LuceneTestCase {
private RAMDirectory directory;
private IndexSearcher searcher;
- private Analyzer analyzer=new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ private Analyzer analyzer=new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
@Override
- protected void setUp() throws Exception
- {
+ protected void setUp() throws Exception {
+ super.setUp();
directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, analyzer,true, MaxFieldLength.UNLIMITED);
@@ -115,7 +112,7 @@
}
public void testFuzzyLikeThisQueryEquals() {
- Analyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer analyzer = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
FuzzyLikeThisQuery fltq1 = new FuzzyLikeThisQuery(10, analyzer);
fltq1.addTerms("javi", "subject", 0.5f, 2);
FuzzyLikeThisQuery fltq2 = new FuzzyLikeThisQuery(10, analyzer);
Index: contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java
===================================================================
--- contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java (revision 916146)
+++ contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java (working copy)
@@ -19,8 +19,6 @@
import java.util.HashSet;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -29,11 +27,11 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.OpenBitSet;
-import org.apache.lucene.util.Version;
-public class TermsFilterTest extends TestCase
-{
+public class TermsFilterTest extends LuceneTestCase {
+
public void testCachability() throws Exception
{
TermsFilter a=new TermsFilter();
@@ -56,7 +54,7 @@
{
String fieldName="field1";
RAMDirectory rd=new RAMDirectory();
- IndexWriter w=new IndexWriter(rd,new WhitespaceAnalyzer(Version.LUCENE_CURRENT),MaxFieldLength.UNLIMITED);
+ IndexWriter w=new IndexWriter(rd,new WhitespaceAnalyzer(TEST_VERSION_CURRENT),MaxFieldLength.UNLIMITED);
for (int i = 0; i < 100; i++)
{
Document doc=new Document();
Index: contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java
===================================================================
--- contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java (revision 916146)
+++ contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java (working copy)
@@ -43,8 +43,9 @@
@Override
protected void setUp() throws Exception {
+ super.setUp();
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
+ IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(TEST_VERSION_CURRENT),
true, MaxFieldLength.UNLIMITED);
// Add series of docs with specific information for MoreLikeThis
@@ -62,6 +63,7 @@
reader.close();
searcher.close();
directory.close();
+ super.tearDown();
}
private void addDoc(IndexWriter writer, String text) throws IOException {
@@ -96,7 +98,7 @@
for (int i = 0; i < clauses.size(); i++) {
BooleanClause clause = clauses.get(i);
TermQuery tq = (TermQuery) clause.getQuery();
- Float termBoost = (Float) originalValues.get(tq.getTerm().text());
+ Float termBoost = originalValues.get(tq.getTerm().text());
assertNotNull("Expected term " + tq.getTerm().text(), termBoost);
float totalBoost = termBoost.floatValue() * boostFactor;
Index: contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/QueryConfigHandler.java
===================================================================
--- contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/QueryConfigHandler.java (revision 916146)
+++ contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/QueryConfigHandler.java (working copy)
@@ -45,7 +45,7 @@
*/
public abstract class QueryConfigHandler extends AttributeSource {
- private LinkedList listeners = new LinkedList();;
+ private LinkedList listeners = new LinkedList();
/**
* Returns an implementation of
Index: contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/MultiFieldQueryParserWrapper.java
===================================================================
--- contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/MultiFieldQueryParserWrapper.java (revision 916146)
+++ contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/MultiFieldQueryParserWrapper.java (working copy)
@@ -77,7 +77,7 @@
@SuppressWarnings("unchecked")
public MultiFieldQueryParserWrapper(String[] fields, Analyzer analyzer, Map boosts) {
this(fields, analyzer);
- StandardQueryParser qpHelper = (StandardQueryParser) getQueryParserHelper();
+ StandardQueryParser qpHelper = getQueryParserHelper();
qpHelper.setMultiFields(fields);
qpHelper.setFieldsBoost(boosts);
@@ -113,7 +113,7 @@
public MultiFieldQueryParserWrapper(String[] fields, Analyzer analyzer) {
super(null, analyzer);
- StandardQueryParser qpHelper = (StandardQueryParser) getQueryParserHelper();
+ StandardQueryParser qpHelper = getQueryParserHelper();
qpHelper.setAnalyzer(analyzer);
qpHelper.setMultiFields(fields);
Index: contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/QueryParserWrapper.java
===================================================================
--- contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/QueryParserWrapper.java (revision 916146)
+++ contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/QueryParserWrapper.java (working copy)
@@ -345,7 +345,7 @@
try {
QueryNode queryTree = this.syntaxParser.parse(query, getField());
queryTree = this.processorPipeline.process(queryTree);
- return (Query) this.builder.build(queryTree);
+ return this.builder.build(queryTree);
} catch (QueryNodeException e) {
throw new ParseException("parse exception");
Index: contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/BoostAttributeImpl.java
===================================================================
--- contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/BoostAttributeImpl.java (revision 916146)
+++ contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/BoostAttributeImpl.java (working copy)
@@ -62,7 +62,7 @@
@Override
public boolean equals(Object other) {
- if (other instanceof BoostAttributeImpl && other != null
+ if (other != null && other instanceof BoostAttributeImpl
&& ((BoostAttributeImpl) other).boost == this.boost) {
return true;
Index: contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DefaultPhraseSlopAttributeImpl.java
===================================================================
--- contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DefaultPhraseSlopAttributeImpl.java (revision 916146)
+++ contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DefaultPhraseSlopAttributeImpl.java (working copy)
@@ -61,8 +61,7 @@
@Override
public boolean equals(Object other) {
- if (other instanceof DefaultPhraseSlopAttributeImpl
- && other != null
+ if (other != null && other instanceof DefaultPhraseSlopAttributeImpl
&& ((DefaultPhraseSlopAttributeImpl) other).defaultPhraseSlop == this.defaultPhraseSlop) {
return true;
Index: contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapAttributeImpl.java
===================================================================
--- contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapAttributeImpl.java (revision 916146)
+++ contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldBoostMapAttributeImpl.java (working copy)
@@ -66,7 +66,7 @@
@Override
public boolean equals(Object other) {
- if (other instanceof FieldBoostMapAttributeImpl && other != null
+ if (other != null && other instanceof FieldBoostMapAttributeImpl
&& ((FieldBoostMapAttributeImpl) other).boosts.equals(this.boosts) ) {
return true;
Index: contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttributeImpl.java
===================================================================
--- contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttributeImpl.java (revision 916146)
+++ contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttributeImpl.java (working copy)
@@ -63,7 +63,7 @@
@Override
public boolean equals(Object other) {
- if (other instanceof FieldDateResolutionMapAttributeImpl && other != null
+ if (other != null && other instanceof FieldDateResolutionMapAttributeImpl
&& ((FieldDateResolutionMapAttributeImpl) other).dateRes.equals(this.dateRes) ) {
return true;
Index: contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FuzzyAttributeImpl.java
===================================================================
--- contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FuzzyAttributeImpl.java (revision 916146)
+++ contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FuzzyAttributeImpl.java (working copy)
@@ -72,7 +72,7 @@
@Override
public boolean equals(Object other) {
- if (other instanceof FuzzyAttributeImpl && other != null
+ if (other != null && other instanceof FuzzyAttributeImpl
&& ((FuzzyAttributeImpl) other).prefixLength == this.prefixLength) {
return true;
Index: contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/PositionIncrementsAttributeImpl.java
===================================================================
--- contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/PositionIncrementsAttributeImpl.java (revision 916146)
+++ contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/PositionIncrementsAttributeImpl.java (working copy)
@@ -60,8 +60,7 @@
@Override
public boolean equals(Object other) {
- if (other instanceof PositionIncrementsAttributeImpl
- && other != null
+ if (other != null && other instanceof PositionIncrementsAttributeImpl
&& ((PositionIncrementsAttributeImpl) other).positionIncrementsEnabled == this.positionIncrementsEnabled) {
return true;
Index: contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/parser/StandardSyntaxParserTokenManager.java
===================================================================
--- contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/parser/StandardSyntaxParserTokenManager.java (revision 916146)
+++ contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/parser/StandardSyntaxParserTokenManager.java (working copy)
@@ -19,6 +19,7 @@
/** Token Manager. */
+@SuppressWarnings("unused")
public class StandardSyntaxParserTokenManager implements StandardSyntaxParserConstants
{
@@ -304,7 +305,7 @@
}
else
{
- int hiByte = (int)(curChar >> 8);
+ int hiByte = (curChar >> 8);
int i1 = hiByte >> 6;
long l1 = 1L << (hiByte & 077);
int i2 = (curChar & 0xff) >> 6;
@@ -508,7 +509,7 @@
}
else
{
- int hiByte = (int)(curChar >> 8);
+ int hiByte = (curChar >> 8);
int i1 = hiByte >> 6;
long l1 = 1L << (hiByte & 077);
int i2 = (curChar & 0xff) >> 6;
@@ -615,7 +616,7 @@
}
else
{
- int hiByte = (int)(curChar >> 8);
+ int hiByte = (curChar >> 8);
int i1 = hiByte >> 6;
long l1 = 1L << (hiByte & 077);
int i2 = (curChar & 0xff) >> 6;
@@ -786,7 +787,7 @@
}
else
{
- int hiByte = (int)(curChar >> 8);
+ int hiByte = (curChar >> 8);
int i1 = hiByte >> 6;
long l1 = 1L << (hiByte & 077);
int i2 = (curChar & 0xff) >> 6;
Index: contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/parser/Token.java
===================================================================
--- contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/parser/Token.java (revision 916146)
+++ contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/parser/Token.java (working copy)
@@ -97,6 +97,7 @@
/**
* Returns the image.
*/
+ @Override
public String toString()
{
return image;
Index: contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/parser/TokenMgrError.java
===================================================================
--- contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/parser/TokenMgrError.java (revision 916146)
+++ contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/parser/TokenMgrError.java (working copy)
@@ -121,6 +121,7 @@
*
* from this method for such cases in the release version of your parser.
*/
+ @Override
public String getMessage() {
return super.getMessage();
}
Index: contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerQPHelper.java
===================================================================
--- contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerQPHelper.java (revision 916146)
+++ contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerQPHelper.java (working copy)
@@ -17,12 +17,10 @@
* limitations under the License.
*/
-import java.io.IOException;
import java.io.Reader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -31,10 +29,8 @@
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.queryParser.core.QueryNodeException;
-import org.apache.lucene.queryParser.standard.StandardQueryParser;
import org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute.Operator;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
/**
* This test case is a copy of the core Lucene query parser test, it was adapted
@@ -156,9 +152,9 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
result = new TestFilter(result);
- result = new LowerCaseFilter(Version.LUCENE_CURRENT, result);
+ result = new LowerCaseFilter(TEST_VERSION_CURRENT, result);
return result;
}
}
@@ -226,9 +222,9 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
result = new TestPosIncrementFilter(result);
- result = new LowerCaseFilter(Version.LUCENE_CURRENT, result);
+ result = new LowerCaseFilter(TEST_VERSION_CURRENT, result);
return result;
}
}
Index: contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerWrapper.java
===================================================================
--- contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerWrapper.java (revision 916146)
+++ contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiAnalyzerWrapper.java (working copy)
@@ -17,12 +17,10 @@
* limitations under the License.
*/
-import java.io.IOException;
import java.io.Reader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardTokenizer;
@@ -31,9 +29,7 @@
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.queryParser.ParseException;
-import org.apache.lucene.queryParser.standard.QueryParserWrapper;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
/**
* This test case is a copy of the core Lucene query parser test, it was adapted
@@ -150,9 +146,9 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
result = new TestFilter(result);
- result = new LowerCaseFilter(Version.LUCENE_CURRENT, result);
+ result = new LowerCaseFilter(TEST_VERSION_CURRENT, result);
return result;
}
}
@@ -220,9 +216,9 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
+ TokenStream result = new StandardTokenizer(TEST_VERSION_CURRENT, reader);
result = new TestPosIncrementFilter(result);
- result = new LowerCaseFilter(Version.LUCENE_CURRENT, result);
+ result = new LowerCaseFilter(TEST_VERSION_CURRENT, result);
return result;
}
}
Index: contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java
===================================================================
--- contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java (revision 916146)
+++ contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java (working copy)
@@ -22,14 +22,12 @@
import java.util.Map;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.queryParser.core.QueryNodeException;
-import org.apache.lucene.queryParser.standard.config.StandardQueryConfigHandler;
import org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute.Operator;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.IndexSearcher;
@@ -83,7 +81,7 @@
String[] fields = { "b", "t" };
StandardQueryParser mfqp = new StandardQueryParser();
mfqp.setMultiFields(fields);
- mfqp.setAnalyzer(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ mfqp.setAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT));
Query q = mfqp.parse("one", null);
assertEquals("b:one t:one", q.toString());
@@ -153,7 +151,7 @@
StandardQueryParser mfqp = new StandardQueryParser();
mfqp.setMultiFields(fields);
mfqp.setFieldsBoost(boosts);
- mfqp.setAnalyzer(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ mfqp.setAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT));
// Check for simple
Query q = mfqp.parse("one", null);
@@ -181,24 +179,24 @@
public void testStaticMethod1() throws QueryNodeException {
String[] fields = { "b", "t" };
String[] queries = { "one", "two" };
- Query q = QueryParserUtil.parse(queries, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ Query q = QueryParserUtil.parse(queries, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("b:one t:two", q.toString());
String[] queries2 = { "+one", "+two" };
- q = QueryParserUtil.parse(queries2, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = QueryParserUtil.parse(queries2, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("(+b:one) (+t:two)", q.toString());
String[] queries3 = { "one", "+two" };
- q = QueryParserUtil.parse(queries3, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = QueryParserUtil.parse(queries3, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("b:one (+t:two)", q.toString());
String[] queries4 = { "one +more", "+two" };
- q = QueryParserUtil.parse(queries4, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = QueryParserUtil.parse(queries4, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("(b:one +b:more) (+t:two)", q.toString());
String[] queries5 = { "blah" };
try {
- q = QueryParserUtil.parse(queries5, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = QueryParserUtil.parse(queries5, fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -222,15 +220,15 @@
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT };
Query q = QueryParserUtil.parse("one", fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+b:one -t:one", q.toString());
- q = QueryParserUtil.parse("one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = QueryParserUtil.parse("one two", fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
- q = QueryParserUtil.parse("blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = QueryParserUtil.parse("blah", fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -243,19 +241,19 @@
BooleanClause.Occur.MUST_NOT };
StandardQueryParser parser = new StandardQueryParser();
parser.setMultiFields(fields);
- parser.setAnalyzer(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ parser.setAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT));
Query q = QueryParserUtil.parse("one", fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));// , fields, flags, new
+ new StandardAnalyzer(TEST_VERSION_CURRENT));// , fields, flags, new
// StandardAnalyzer());
assertEquals("+b:one -t:one", q.toString());
- q = QueryParserUtil.parse("one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = QueryParserUtil.parse("one two", fields, flags, new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
- q = QueryParserUtil.parse("blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = QueryParserUtil.parse("blah", fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -268,13 +266,13 @@
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD };
Query q = QueryParserUtil.parse(queries, fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+f1:one -f2:two f3:three", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = QueryParserUtil
- .parse(queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ .parse(queries, fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -287,13 +285,13 @@
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT };
Query q = QueryParserUtil.parse(queries, fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+b:one -t:two", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = QueryParserUtil
- .parse(queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ .parse(queries, fields, flags2, new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -319,7 +317,7 @@
}
public void testStopWordSearching() throws Exception {
- Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
Directory ramDir = new RAMDirectory();
IndexWriter iw = new IndexWriter(ramDir, analyzer, true,
IndexWriter.MaxFieldLength.LIMITED);
@@ -345,7 +343,7 @@
* Return empty tokens for field "f1".
*/
private static class AnalyzerReturningNull extends Analyzer {
- StandardAnalyzer stdAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ StandardAnalyzer stdAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
public AnalyzerReturningNull() {
}
Index: contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java
===================================================================
--- contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java (revision 916146)
+++ contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java (working copy)
@@ -22,7 +22,6 @@
import java.util.Map;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
@@ -78,7 +77,7 @@
public void testSimple() throws Exception {
String[] fields = { "b", "t" };
MultiFieldQueryParserWrapper mfqp = new MultiFieldQueryParserWrapper(
- fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ fields, new StandardAnalyzer(TEST_VERSION_CURRENT));
Query q = mfqp.parse("one");
assertEquals("b:one t:one", q.toString());
@@ -146,7 +145,7 @@
boosts.put("t", Float.valueOf(10));
String[] fields = { "b", "t" };
MultiFieldQueryParserWrapper mfqp = new MultiFieldQueryParserWrapper(
- fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), boosts);
+ fields, new StandardAnalyzer(TEST_VERSION_CURRENT), boosts);
// Check for simple
Query q = mfqp.parse("one");
@@ -175,28 +174,28 @@
String[] fields = { "b", "t" };
String[] queries = { "one", "two" };
Query q = MultiFieldQueryParserWrapper.parse(queries, fields,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("b:one t:two", q.toString());
String[] queries2 = { "+one", "+two" };
q = MultiFieldQueryParserWrapper.parse(queries2, fields,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("(+b:one) (+t:two)", q.toString());
String[] queries3 = { "one", "+two" };
q = MultiFieldQueryParserWrapper.parse(queries3, fields,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("b:one (+t:two)", q.toString());
String[] queries4 = { "one +more", "+two" };
q = MultiFieldQueryParserWrapper.parse(queries4, fields,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("(b:one +b:more) (+t:two)", q.toString());
String[] queries5 = { "blah" };
try {
q = MultiFieldQueryParserWrapper.parse(queries5, fields,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -220,17 +219,17 @@
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT };
Query q = MultiFieldQueryParserWrapper.parse("one", fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+b:one -t:one", q.toString());
q = MultiFieldQueryParserWrapper.parse("one two", fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = MultiFieldQueryParserWrapper.parse("blah", fields, flags2,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -243,21 +242,19 @@
// MultiFieldQueryParserWrapper.PROHIBITED_FIELD};
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT };
- MultiFieldQueryParserWrapper parser = new MultiFieldQueryParserWrapper(
- fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
Query q = MultiFieldQueryParserWrapper.parse("one", fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));// , fields, flags, new StandardAnalyzer());
+ new StandardAnalyzer(TEST_VERSION_CURRENT));// , fields, flags, new StandardAnalyzer());
assertEquals("+b:one -t:one", q.toString());
q = MultiFieldQueryParserWrapper.parse("one two", fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = MultiFieldQueryParserWrapper.parse("blah", fields, flags2,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -270,13 +267,13 @@
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD };
Query q = MultiFieldQueryParserWrapper.parse(queries, fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+f1:one -f2:two f3:three", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = MultiFieldQueryParserWrapper.parse(queries, fields, flags2,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -289,13 +286,13 @@
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT };
Query q = MultiFieldQueryParserWrapper.parse(queries, fields, flags,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
assertEquals("+b:one -t:two", q.toString());
try {
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
q = MultiFieldQueryParserWrapper.parse(queries, fields, flags2,
- new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
fail();
} catch (IllegalArgumentException e) {
// expected exception, array length differs
@@ -319,7 +316,7 @@
}
public void testStopWordSearching() throws Exception {
- Analyzer analyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer analyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
Directory ramDir = new RAMDirectory();
IndexWriter iw = new IndexWriter(ramDir, analyzer, true,
IndexWriter.MaxFieldLength.LIMITED);
@@ -343,7 +340,7 @@
* Return empty tokens for field "f1".
*/
private static class AnalyzerReturningNull extends Analyzer {
- StandardAnalyzer stdAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ StandardAnalyzer stdAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT);
public AnalyzerReturningNull() {
}
Index: contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java
===================================================================
--- contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java (revision 916146)
+++ contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java (working copy)
@@ -38,7 +38,6 @@
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.StopFilter;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
@@ -78,7 +77,6 @@
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.util.LocalizedTestCase;
-import org.apache.lucene.util.Version;
/**
* This test case is a copy of the core Lucene query parser test, it was adapted
@@ -144,7 +142,7 @@
/** Filters LowerCaseTokenizer with StopFilter. */
@Override
public final TokenStream tokenStream(String fieldName, Reader reader) {
- return new QPTestFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader));
+ return new QPTestFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader));
}
}
@@ -197,14 +195,14 @@
private int originalMaxClauses;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
originalMaxClauses = BooleanQuery.getMaxClauseCount();
}
public StandardQueryParser getParser(Analyzer a) throws Exception {
if (a == null)
- a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(a);
@@ -294,7 +292,7 @@
public Query getQueryDOA(String query, Analyzer a) throws Exception {
if (a == null)
- a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(a);
qp.setDefaultOperator(Operator.AND);
@@ -314,7 +312,7 @@
}
public void testConstantScoreAutoRewrite() throws Exception {
- StandardQueryParser qp = new StandardQueryParser(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ StandardQueryParser qp = new StandardQueryParser(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
Query q = qp.parse("foo*bar", "field");
assertTrue(q instanceof WildcardQuery);
assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((MultiTermQuery) q).getRewriteMethod());
@@ -339,9 +337,9 @@
public void testSimple() throws Exception {
assertQueryEquals("\"term germ\"~2", null, "\"term germ\"~2");
assertQueryEquals("term term term", null, "term term term");
- assertQueryEquals("t�rm term term", new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ assertQueryEquals("t�rm term term", new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
"t�rm term term");
- assertQueryEquals("�mlaut", new WhitespaceAnalyzer(Version.LUCENE_CURRENT), "�mlaut");
+ assertQueryEquals("�mlaut", new WhitespaceAnalyzer(TEST_VERSION_CURRENT), "�mlaut");
assertQueryEquals("\"\"", new KeywordAnalyzer(), "");
assertQueryEquals("foo:\"\"", new KeywordAnalyzer(), "foo:");
@@ -398,7 +396,7 @@
}
public void testPunct() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
assertQueryEquals("a&b", a, "a&b");
assertQueryEquals("a&&b", a, "a&&b");
assertQueryEquals(".NET", a, ".NET");
@@ -419,7 +417,7 @@
assertQueryEquals("term 1.0 1 2", null, "term");
assertQueryEquals("term term1 term2", null, "term term term");
- Analyzer a = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT);
assertQueryEquals("3", a, "3");
assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
assertQueryEquals("term term1 term2", a, "term term1 term2");
@@ -573,7 +571,7 @@
public void testFarsiRangeCollating() throws Exception {
RAMDirectory ramDir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
@@ -583,7 +581,7 @@
IndexSearcher is = new IndexSearcher(ramDir, true);
StandardQueryParser qp = new StandardQueryParser();
- qp.setAnalyzer(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ qp.setAnalyzer(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the
@@ -737,7 +735,7 @@
}
public void testEscaped() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
/*
* assertQueryEquals("\\[brackets", a, "\\[brackets");
@@ -836,7 +834,7 @@
}
public void testQueryStringEscaping() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
assertEscapedQueryEquals("a-b:c", a, "a\\-b\\:c");
assertEscapedQueryEquals("a+b:c", a, "a\\+b\\:c");
@@ -905,7 +903,7 @@
}
public void testBoost() throws Exception {
- StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(Version.LUCENE_CURRENT, Collections.singleton("on"));
+ StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT, Collections.singleton("on"));
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(oneStopAnalyzer);
@@ -921,7 +919,7 @@
assertNotNull(q);
StandardQueryParser qp2 = new StandardQueryParser();
- qp2.setAnalyzer(new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ qp2.setAnalyzer(new StandardAnalyzer(TEST_VERSION_CURRENT));
q = qp2.parse("the^3", "field");
// "the" is a stop word so the result is an empty query:
@@ -951,7 +949,7 @@
public void testCustomQueryParserWildcard() {
try {
- new QPTestParser(new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("a?t", "contents");
+ new QPTestParser(new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("a?t", "contents");
fail("Wildcard queries should not be allowed");
} catch (QueryNodeException expected) {
// expected exception
@@ -960,7 +958,7 @@
public void testCustomQueryParserFuzzy() throws Exception {
try {
- new QPTestParser(new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("xunit~", "contents");
+ new QPTestParser(new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("xunit~", "contents");
fail("Fuzzy queries should not be allowed");
} catch (QueryNodeException expected) {
// expected exception
@@ -971,7 +969,7 @@
BooleanQuery.setMaxClauseCount(2);
try {
StandardQueryParser qp = new StandardQueryParser();
- qp.setAnalyzer(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ qp.setAnalyzer(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
qp.parse("one two three", "field");
fail("ParseException expected due to too many boolean clauses");
@@ -985,7 +983,7 @@
*/
public void testPrecedence() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
- qp.setAnalyzer(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ qp.setAnalyzer(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
Query query1 = qp.parse("A AND B OR C AND D", "field");
Query query2 = qp.parse("+A +B +C +D", "field");
@@ -996,7 +994,7 @@
public void testLocalDateFormat() throws IOException, QueryNodeException {
RAMDirectory ramDir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
@@ -1077,7 +1075,7 @@
public void testStopwords() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(
- new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet(Version.LUCENE_CURRENT, "the", "foo" )));
+ new StopAnalyzer(TEST_VERSION_CURRENT, StopFilter.makeStopSet(TEST_VERSION_CURRENT, "the", "foo" )));
Query result = qp.parse("a:the OR a:foo", "a");
assertNotNull("result is null and it shouldn't be", result);
@@ -1100,7 +1098,7 @@
public void testPositionIncrement() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
qp.setAnalyzer(
- new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet(Version.LUCENE_CURRENT, "the", "in", "are", "this" )));
+ new StopAnalyzer(TEST_VERSION_CURRENT, StopFilter.makeStopSet(TEST_VERSION_CURRENT, "the", "in", "are", "this" )));
qp.setEnablePositionIncrements(true);
@@ -1121,7 +1119,7 @@
public void testMatchAllDocs() throws Exception {
StandardQueryParser qp = new StandardQueryParser();
- qp.setAnalyzer(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ qp.setAnalyzer(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
assertEquals(new MatchAllDocsQuery(), qp.parse("*:*", "field"));
assertEquals(new MatchAllDocsQuery(), qp.parse("(*:*)", "field"));
@@ -1133,7 +1131,7 @@
private void assertHits(int expected, String query, IndexSearcher is)
throws IOException, QueryNodeException {
StandardQueryParser qp = new StandardQueryParser();
- qp.setAnalyzer(new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ qp.setAnalyzer(new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
qp.setLocale(Locale.ENGLISH);
Query q = qp.parse(query, "date");
@@ -1153,9 +1151,9 @@
}
@Override
- public void tearDown() throws Exception {
- super.tearDown();
+ protected void tearDown() throws Exception {
BooleanQuery.setMaxClauseCount(originalMaxClauses);
+ super.tearDown();
}
private class CannedTokenStream extends TokenStream {
Index: contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java
===================================================================
--- contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java (revision 916146)
+++ contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQueryParserWrapper.java (working copy)
@@ -36,7 +36,6 @@
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.StopFilter;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
@@ -73,7 +72,6 @@
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LocalizedTestCase;
-import org.apache.lucene.util.Version;
/**
* This test case is a copy of the core Lucene query parser test, it was adapted
@@ -139,7 +137,7 @@
/** Filters LowerCaseTokenizer with StopFilter. */
@Override
public final TokenStream tokenStream(String fieldName, Reader reader) {
- return new QPTestFilter(new LowerCaseTokenizer(Version.LUCENE_CURRENT, reader));
+ return new QPTestFilter(new LowerCaseTokenizer(TEST_VERSION_CURRENT, reader));
}
}
@@ -210,14 +208,14 @@
private int originalMaxClauses;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
originalMaxClauses = BooleanQuery.getMaxClauseCount();
}
public QueryParserWrapper getParser(Analyzer a) throws Exception {
if (a == null)
- a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
QueryParserWrapper qp = new QueryParserWrapper("field", a);
qp.setDefaultOperator(QueryParserWrapper.OR_OPERATOR);
return qp;
@@ -302,7 +300,7 @@
public Query getQueryDOA(String query, Analyzer a) throws Exception {
if (a == null)
- a = new SimpleAnalyzer(Version.LUCENE_CURRENT);
+ a = new SimpleAnalyzer(TEST_VERSION_CURRENT);
QueryParserWrapper qp = new QueryParserWrapper("field", a);
qp.setDefaultOperator(QueryParserWrapper.AND_OPERATOR);
return qp.parse(query);
@@ -329,9 +327,9 @@
public void testSimple() throws Exception {
assertQueryEquals("\"term germ\"~2", null, "\"term germ\"~2");
assertQueryEquals("term term term", null, "term term term");
- assertQueryEquals("t�rm term term", new WhitespaceAnalyzer(Version.LUCENE_CURRENT),
+ assertQueryEquals("t�rm term term", new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
"t�rm term term");
- assertQueryEquals("�mlaut", new WhitespaceAnalyzer(Version.LUCENE_CURRENT), "�mlaut");
+ assertQueryEquals("�mlaut", new WhitespaceAnalyzer(TEST_VERSION_CURRENT), "�mlaut");
assertQueryEquals("\"\"", new KeywordAnalyzer(), "");
assertQueryEquals("foo:\"\"", new KeywordAnalyzer(), "foo:");
@@ -386,7 +384,7 @@
"+(title:dog title:cat) -author:\"bob dole\"");
QueryParserWrapper qp = new QueryParserWrapper("field",
- new StandardAnalyzer(Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
// make sure OR is the default:
assertEquals(QueryParserWrapper.OR_OPERATOR, qp.getDefaultOperator());
qp.setDefaultOperator(QueryParserWrapper.AND_OPERATOR);
@@ -396,7 +394,7 @@
}
public void testPunct() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
assertQueryEquals("a&b", a, "a&b");
assertQueryEquals("a&&b", a, "a&&b");
assertQueryEquals(".NET", a, ".NET");
@@ -417,7 +415,7 @@
assertQueryEquals("term 1.0 1 2", null, "term");
assertQueryEquals("term term1 term2", null, "term term term");
- Analyzer a = new StandardAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new StandardAnalyzer(TEST_VERSION_CURRENT);
assertQueryEquals("3", a, "3");
assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
assertQueryEquals("term term1 term2", a, "term term1 term2");
@@ -552,7 +550,7 @@
assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod());
QueryParserWrapper qp = new QueryParserWrapper("field",
- new SimpleAnalyzer(Version.LUCENE_CURRENT));
+ new SimpleAnalyzer(TEST_VERSION_CURRENT));
qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]")).getRewriteMethod());
@@ -571,7 +569,7 @@
public void testFarsiRangeCollating() throws Exception {
RAMDirectory ramDir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628", Field.Store.YES,
@@ -581,7 +579,7 @@
IndexSearcher is = new IndexSearcher(ramDir, true);
QueryParserWrapper qp = new QueryParserWrapper("content",
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
@@ -683,7 +681,7 @@
final String monthField = "month";
final String hourField = "hour";
QueryParserWrapper qp = new QueryParserWrapper("field",
- new SimpleAnalyzer(Version.LUCENE_CURRENT));
+ new SimpleAnalyzer(TEST_VERSION_CURRENT));
// Don't set any date resolution and verify if DateField is used
assertDateRangeQueryEquals(qp, defaultField, startDate, endDate,
@@ -727,7 +725,7 @@
}
public void testEscaped() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
/*
* assertQueryEquals("\\[brackets", a, "\\[brackets");
@@ -824,7 +822,7 @@
}
public void testQueryStringEscaping() throws Exception {
- Analyzer a = new WhitespaceAnalyzer(Version.LUCENE_CURRENT);
+ Analyzer a = new WhitespaceAnalyzer(TEST_VERSION_CURRENT);
assertEscapedQueryEquals("a-b:c", a, "a\\-b\\:c");
assertEscapedQueryEquals("a+b:c", a, "a\\+b\\:c");
@@ -893,7 +891,7 @@
}
public void testBoost() throws Exception {
- StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(Version.LUCENE_CURRENT, Collections.singleton("on"));
+ StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(TEST_VERSION_CURRENT, Collections.singleton("on"));
QueryParserWrapper qp = new QueryParserWrapper("field", oneStopAnalyzer);
Query q = qp.parse("on^1.0");
assertNotNull(q);
@@ -907,7 +905,7 @@
assertNotNull(q);
QueryParserWrapper qp2 = new QueryParserWrapper("field",
- new StandardAnalyzer(Version.LUCENE_CURRENT));
+ new StandardAnalyzer(TEST_VERSION_CURRENT));
q = qp2.parse("the^3");
// "the" is a stop word so the result is an empty query:
assertNotNull(q);
@@ -935,7 +933,7 @@
public void testCustomQueryParserWildcard() {
try {
- new QPTestParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("a?t");
+ new QPTestParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("a?t");
fail("Wildcard queries should not be allowed");
} catch (ParseException expected) {
// expected exception
@@ -944,7 +942,7 @@
public void testCustomQueryParserFuzzy() throws Exception {
try {
- new QPTestParser("contents", new WhitespaceAnalyzer(Version.LUCENE_CURRENT)).parse("xunit~");
+ new QPTestParser("contents", new WhitespaceAnalyzer(TEST_VERSION_CURRENT)).parse("xunit~");
fail("Fuzzy queries should not be allowed");
} catch (ParseException expected) {
// expected exception
@@ -955,7 +953,7 @@
BooleanQuery.setMaxClauseCount(2);
try {
QueryParserWrapper qp = new QueryParserWrapper("field",
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
qp.parse("one two three");
fail("ParseException expected due to too many boolean clauses");
} catch (ParseException expected) {
@@ -968,7 +966,7 @@
*/
public void testPrecedence() throws Exception {
QueryParserWrapper qp = new QueryParserWrapper("field",
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
Query query1 = qp.parse("A AND B OR C AND D");
Query query2 = qp.parse("+A +B +C +D");
@@ -978,7 +976,7 @@
public void testLocalDateFormat() throws IOException, ParseException {
RAMDirectory ramDir = new RAMDirectory();
- IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter iw = new IndexWriter(ramDir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
@@ -1057,7 +1055,7 @@
}
public void testStopwords() throws Exception {
- QueryParserWrapper qp = new QueryParserWrapper("a", new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet(Version.LUCENE_CURRENT, "the", "foo")));
+ QueryParserWrapper qp = new QueryParserWrapper("a", new StopAnalyzer(TEST_VERSION_CURRENT, StopFilter.makeStopSet(TEST_VERSION_CURRENT, "the", "foo")));
Query result = qp.parse("a:the OR a:foo");
assertNotNull("result is null and it shouldn't be", result);
assertTrue("result is not a BooleanQuery", result instanceof BooleanQuery);
@@ -1076,7 +1074,7 @@
}
public void testPositionIncrement() throws Exception {
- QueryParserWrapper qp = new QueryParserWrapper("a", new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet(Version.LUCENE_CURRENT, "the", "in", "are", "this")));
+ QueryParserWrapper qp = new QueryParserWrapper("a", new StopAnalyzer(TEST_VERSION_CURRENT, StopFilter.makeStopSet(TEST_VERSION_CURRENT, "the", "in", "are", "this")));
qp.setEnablePositionIncrements(true);
String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\"";
// 0 2 5 7 8
@@ -1095,7 +1093,7 @@
public void testMatchAllDocs() throws Exception {
QueryParserWrapper qp = new QueryParserWrapper("field",
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
assertEquals(new MatchAllDocsQuery(), qp.parse("*:*"));
assertEquals(new MatchAllDocsQuery(), qp.parse("(*:*)"));
BooleanQuery bq = (BooleanQuery) qp.parse("+*:* -*:*");
@@ -1106,7 +1104,7 @@
private void assertHits(int expected, String query, IndexSearcher is)
throws ParseException, IOException {
QueryParserWrapper qp = new QueryParserWrapper("date",
- new WhitespaceAnalyzer(Version.LUCENE_CURRENT));
+ new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
qp.setLocale(Locale.ENGLISH);
Query q = qp.parse(query);
ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs;
@@ -1125,9 +1123,9 @@
}
@Override
- public void tearDown() throws Exception {
- super.tearDown();
+ protected void tearDown() throws Exception {
BooleanQuery.setMaxClauseCount(originalMaxClauses);
+ super.tearDown();
}
}
Index: contrib/regex/src/java/org/apache/lucene/search/regex/SpanRegexQuery.java
===================================================================
--- contrib/regex/src/java/org/apache/lucene/search/regex/SpanRegexQuery.java (revision 916146)
+++ contrib/regex/src/java/org/apache/lucene/search/regex/SpanRegexQuery.java (working copy)
@@ -19,6 +19,7 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BooleanClause;
@@ -51,7 +52,7 @@
public Query rewrite(IndexReader reader) throws IOException {
RegexQuery orig = new RegexQuery(term);
orig.setRegexImplementation(regexImpl);
- orig.setRewriteMethod(RegexQuery.SCORING_BOOLEAN_QUERY_REWRITE);
+ orig.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
BooleanQuery bq = (BooleanQuery) orig.rewrite(reader);
BooleanClause[] clauses = bq.getClauses();
Index: contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java
===================================================================
--- contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java (revision 916146)
+++ contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java (working copy)
@@ -17,7 +17,6 @@
* limitations under the License.
*/
-import junit.framework.TestCase;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
@@ -29,18 +28,19 @@
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanQuery;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
-public class TestRegexQuery extends TestCase {
+public class TestRegexQuery extends LuceneTestCase {
private IndexSearcher searcher;
private final String FN = "field";
@Override
- public void setUp() {
+ protected void setUp() throws Exception {
+ super.setUp();
RAMDirectory directory = new RAMDirectory();
try {
- IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.ANALYZED));
@@ -54,12 +54,9 @@
}
@Override
- public void tearDown() {
- try {
- searcher.close();
- } catch (Exception e) {
- fail(e.toString());
- }
+ protected void tearDown() throws Exception {
+ searcher.close();
+ super.tearDown();
}
private Term newTerm(String value) { return new Term(FN, value); }
Index: contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java
===================================================================
--- contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java (revision 916146)
+++ contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java (working copy)
@@ -19,8 +19,6 @@
import java.io.IOException;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
@@ -36,16 +34,17 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
-public class TestSpanRegexQuery extends TestCase {
+public class TestSpanRegexQuery extends LuceneTestCase {
+
Directory indexStoreA = new RAMDirectory();
Directory indexStoreB = new RAMDirectory();
public void testSpanRegex() throws Exception {
RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
// doc.add(new Field("field", "the quick brown fox jumps over the lazy dog",
// Field.Store.NO, Field.Index.ANALYZED));
@@ -110,14 +109,14 @@
Field.Index.ANALYZED_NO_NORMS));
// creating first index writer
- IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
+ IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(TEST_VERSION_CURRENT),
true, IndexWriter.MaxFieldLength.LIMITED);
writerA.addDocument(lDoc);
writerA.optimize();
writerA.close();
// creating second index writer
- IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
+ IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(TEST_VERSION_CURRENT),
true, IndexWriter.MaxFieldLength.LIMITED);
writerB.addDocument(lDoc2);
writerB.optimize();
Index: contrib/remote/src/test/org/apache/lucene/search/RemoteCachingWrapperFilterHelper.java
===================================================================
--- contrib/remote/src/test/org/apache/lucene/search/RemoteCachingWrapperFilterHelper.java (revision 916146)
+++ contrib/remote/src/test/org/apache/lucene/search/RemoteCachingWrapperFilterHelper.java (working copy)
@@ -18,9 +18,8 @@
*/
import java.io.IOException;
-import java.util.BitSet;
-import junit.framework.TestCase;
+import junit.framework.Assert;
import org.apache.lucene.index.IndexReader;
@@ -45,11 +44,11 @@
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
Filter cachedFilter = FilterManager.getInstance().getFilter(filter);
- TestCase.assertNotNull("Filter should not be null", cachedFilter);
+ Assert.assertNotNull("Filter should not be null", cachedFilter);
if (!shouldHaveCache) {
- TestCase.assertSame("First time filter should be the same ", filter, cachedFilter);
+ Assert.assertSame("First time filter should be the same ", filter, cachedFilter);
} else {
- TestCase.assertNotSame("We should have a cached version of the filter", filter, cachedFilter);
+ Assert.assertNotSame("We should have a cached version of the filter", filter, cachedFilter);
}
if (filter instanceof CachingWrapperFilterHelper) {
Index: contrib/remote/src/test/org/apache/lucene/search/TestRemoteCachingWrapperFilter.java
===================================================================
--- contrib/remote/src/test/org/apache/lucene/search/TestRemoteCachingWrapperFilter.java (revision 916146)
+++ contrib/remote/src/test/org/apache/lucene/search/TestRemoteCachingWrapperFilter.java (working copy)
@@ -21,7 +21,6 @@
import java.rmi.registry.LocateRegistry;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.analysis.SimpleAnalyzer;
@@ -58,7 +57,7 @@
private static void startServer() throws Exception {
// construct an index
RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(Version.LUCENE_CURRENT), true,
+ IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("test", "test text", Field.Store.YES, Field.Index.ANALYZED));
Index: contrib/remote/src/test/org/apache/lucene/search/TestRemoteSearchable.java
===================================================================
--- contrib/remote/src/test/org/apache/lucene/search/TestRemoteSearchable.java (revision 916146)
+++ contrib/remote/src/test/org/apache/lucene/search/TestRemoteSearchable.java (working copy)
@@ -18,7 +18,6 @@
*/
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.*;
@@ -59,7 +58,7 @@
private static void startServer() throws Exception {
// construct an index
RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore,new SimpleAnalyzer(Version.LUCENE_CURRENT),true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter(indexStore,new SimpleAnalyzer(TEST_VERSION_CURRENT),true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("test", "test text", Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("other", "other test text", Field.Store.YES, Field.Index.ANALYZED));
@@ -86,9 +85,9 @@
assertTrue("document is null and it shouldn't be", document != null);
assertEquals("test text", document.get("test"));
assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 2, document.getFields().size() == 2);
- Set ftl = new HashSet();
+ Set ftl = new HashSet();
ftl.add("other");
- FieldSelector fs = new SetBasedFieldSelector(ftl, Collections.EMPTY_SET);
+ FieldSelector fs = new SetBasedFieldSelector(ftl, Collections.emptySet());
document = searcher.doc(0, fs);
assertTrue("document is null and it shouldn't be", document != null);
assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 1, document.getFields().size() == 1);
Index: contrib/remote/src/test/org/apache/lucene/search/TestRemoteSort.java
===================================================================
--- contrib/remote/src/test/org/apache/lucene/search/TestRemoteSort.java (revision 916146)
+++ contrib/remote/src/test/org/apache/lucene/search/TestRemoteSort.java (working copy)
@@ -38,7 +38,6 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
import org.apache.lucene.util._TestUtil;
/**
@@ -110,7 +109,7 @@
private Searcher getIndex (boolean even, boolean odd)
throws IOException {
RAMDirectory indexStore = new RAMDirectory ();
- IndexWriter writer = new IndexWriter (indexStore, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
+ IndexWriter writer = new IndexWriter (indexStore, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
writer.setMergeFactor(1000);
for (int i=0; i value(int slot) {
return Integer.valueOf(slotValues[slot]);
}
}
@@ -248,9 +247,9 @@
public void testNormalizedScores() throws Exception {
// capture relevancy scores
- HashMap scoresX = getScores (full.search (queryX, null, 1000).scoreDocs, full);
- HashMap scoresY = getScores (full.search (queryY, null, 1000).scoreDocs, full);
- HashMap scoresA = getScores (full.search (queryA, null, 1000).scoreDocs, full);
+ HashMap scoresX = getScores (full.search (queryX, null, 1000).scoreDocs, full);
+ HashMap scoresY = getScores (full.search (queryY, null, 1000).scoreDocs, full);
+ HashMap scoresA = getScores (full.search (queryA, null, 1000).scoreDocs, full);
// we'll test searching locally, remote and multi
MultiSearcher remote = new MultiSearcher (new Searchable[] { getRemote() });
@@ -387,9 +386,9 @@
assertEquals (expectedResult, buff.toString());
}
- private HashMap getScores (ScoreDoc[] hits, Searcher searcher)
+ private HashMap getScores (ScoreDoc[] hits, Searcher searcher)
throws IOException {
- HashMap scoreMap = new HashMap();
+ HashMap scoreMap = new HashMap();
int n = hits.length;
for (int i=0; i m1, HashMap, ?> m2) {
int n = m1.size();
int m = m2.size();
assertEquals (n, m);
- Iterator iter = m1.keySet().iterator();
+ Iterator> iter = m1.keySet().iterator();
while (iter.hasNext()) {
Object key = iter.next();
Object o1 = m1.get(key);
Index: contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFieldComparatorSource.java
===================================================================
--- contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFieldComparatorSource.java (revision 916146)
+++ contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFieldComparatorSource.java (working copy)
@@ -54,20 +54,17 @@
@Override
public FieldComparator newComparator(String fieldname, int numHits,
int sortPos, boolean reversed) throws IOException {
- dsdlc = new DistanceScoreDocLookupComparator(distanceFilter, numHits);
+ dsdlc = new DistanceScoreDocLookupComparator(numHits);
return dsdlc;
}
private class DistanceScoreDocLookupComparator extends FieldComparator {
- private DistanceFilter distanceFilter;
private double[] values;
private double bottom;
private int offset =0;
- public DistanceScoreDocLookupComparator(DistanceFilter distanceFilter,
- int numHits) {
- this.distanceFilter = distanceFilter;
+ public DistanceScoreDocLookupComparator(int numHits) {
values = new double[numHits];
return;
}
Index: contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceHandler.java
===================================================================
--- contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceHandler.java (revision 916146)
+++ contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceHandler.java (working copy)
@@ -31,7 +31,7 @@
*/
public class DistanceHandler {
- public enum Precision {EXACT, TWOFEET, TWENTYFEET, TWOHUNDREDFEET};
+ public enum Precision {EXACT, TWOFEET, TWENTYFEET, TWOHUNDREDFEET}
private Map distances;
private Map distanceLookupCache;
Index: contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java
===================================================================
--- contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (revision 916146)
+++ contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (working copy)
@@ -21,8 +21,6 @@
import java.util.List;
import java.util.Map;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -49,18 +47,11 @@
import org.apache.lucene.spatial.tier.projections.SinusoidalProjector;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.Version;
-/**
- *
- */
-public class TestCartesian extends TestCase{
+public class TestCartesian extends LuceneTestCase {
- /**
- * @param args
- */
-
private Directory directory;
private IndexSearcher searcher;
// reston va
@@ -76,10 +67,11 @@
@Override
- protected void setUp() throws IOException {
+ protected void setUp() throws Exception {
+ super.setUp();
directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
setUpPlotter( 2, 15);
@@ -495,8 +487,8 @@
// As the radius filter has performed the distance calculations
// already, pass in the filter to reuse the results.
//
- DistanceFieldComparatorSource dsort = new DistanceFieldComparatorSource(dq.distanceFilter);
- Sort sort = new Sort(new SortField("foo", dsort));
+ //DistanceFieldComparatorSource dsort = new DistanceFieldComparatorSource(dq.distanceFilter);
+ //Sort sort = new Sort(new SortField("foo", dsort));
// Perform the search, using the term query, the serial chain filter, and the
// distance sort
Index: contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java
===================================================================
--- contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java (revision 916146)
+++ contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java (working copy)
@@ -18,8 +18,6 @@
import java.io.IOException;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -28,16 +26,11 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.QueryWrapperFilter;
import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
-import org.apache.lucene.util.Version;
import org.apache.lucene.store.RAMDirectory;
-
-/**
- *
- */
-public class TestDistance extends TestCase{
-
+public class TestDistance extends LuceneTestCase {
private RAMDirectory directory;
// reston va
@@ -48,16 +41,18 @@
private IndexWriter writer;
@Override
- protected void setUp() throws IOException {
+ protected void setUp() throws Exception {
+ super.setUp();
directory = new RAMDirectory();
- writer = new IndexWriter(directory, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ writer = new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
addData(writer);
}
@Override
- protected void tearDown() throws IOException {
+ protected void tearDown() throws Exception {
writer.close();
+ super.tearDown();
}
private void addPoint(IndexWriter writer, String name, double lat, double lng) throws IOException{
Index: contrib/spellchecker/src/java/org/apache/lucene/search/spell/JaroWinklerDistance.java
===================================================================
--- contrib/spellchecker/src/java/org/apache/lucene/search/spell/JaroWinklerDistance.java (revision 916146)
+++ contrib/spellchecker/src/java/org/apache/lucene/search/spell/JaroWinklerDistance.java (working copy)
@@ -82,7 +82,7 @@
public float getDistance(String s1, String s2) {
int[] mtp = matches(s1, s2);
- float m = (float) mtp[0];
+ float m = mtp[0];
if (m == 0) {
return 0f;
}
Index: contrib/spellchecker/src/java/org/apache/lucene/search/spell/NGramDistance.java
===================================================================
--- contrib/spellchecker/src/java/org/apache/lucene/search/spell/NGramDistance.java (revision 916146)
+++ contrib/spellchecker/src/java/org/apache/lucene/search/spell/NGramDistance.java (working copy)
@@ -138,7 +138,7 @@
// our last action in the above loop was to switch d and p, so p now
// actually has the most recent cost counts
- return 1.0f - ((float) p[sl] / Math.max(tl, sl));
+ return 1.0f - (p[sl] / Math.max(tl, sl));
}
}
Index: contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java
===================================================================
--- contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java (revision 916146)
+++ contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java (working copy)
@@ -20,8 +20,6 @@
import java.io.IOException;
import java.util.Iterator;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -29,28 +27,27 @@
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.Version;
+import org.apache.lucene.util.LuceneTestCase;
/**
* Test case for LuceneDictionary.
* It first creates a simple index and then a couple of instances of LuceneDictionary
* on different fields and checks if all the right text comes back.
- *
*/
-public class TestLuceneDictionary extends TestCase {
+public class TestLuceneDictionary extends LuceneTestCase {
private Directory store = new RAMDirectory();
private IndexReader indexReader = null;
private LuceneDictionary ld;
- private Iterator it;
+ private Iterator it;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
+ super.setUp();
+ IndexWriter writer = new IndexWriter(store, new WhitespaceAnalyzer(LuceneTestCase.TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
- IndexWriter writer = new IndexWriter(store, new WhitespaceAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
-
Document doc;
doc = new Document();
Index: contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java
===================================================================
--- contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java (revision 916146)
+++ contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java (working copy)
@@ -38,13 +38,9 @@
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.English;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util.Version;
-
/**
* Spell checker test case
- *
- *
*/
public class TestSpellChecker extends LuceneTestCase {
private SpellCheckerMock spellChecker;
@@ -58,7 +54,7 @@
//create a user index
userindex = new RAMDirectory();
- IndexWriter writer = new IndexWriter(userindex, new SimpleAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter writer = new IndexWriter(userindex, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.UNLIMITED);
for (int i = 0; i < 1000; i++) {
Document doc = new Document();
@@ -335,16 +331,17 @@
assertEquals(0, searcher.getIndexReader().getRefCount());
}
}
-
- private void showSearchersOpen() {
- int count = 0;
- for (IndexSearcher searcher : searchers) {
- if(searcher.getIndexReader().getRefCount() > 0)
- ++count;
- }
- System.out.println(count);
- }
+ // For debug
+// private void showSearchersOpen() {
+// int count = 0;
+// for (IndexSearcher searcher : searchers) {
+// if(searcher.getIndexReader().getRefCount() > 0)
+// ++count;
+// }
+// System.out.println(count);
+// }
+
private class SpellCheckWorker implements Runnable {
private final IndexReader reader;
Index: contrib/surround/src/java/org/apache/lucene/queryParser/surround/parser/ParseException.java
===================================================================
--- contrib/surround/src/java/org/apache/lucene/queryParser/surround/parser/ParseException.java (revision 916146)
+++ contrib/surround/src/java/org/apache/lucene/queryParser/surround/parser/ParseException.java (working copy)
@@ -96,6 +96,7 @@
* of the final stack trace, and hence the correct error message
* gets displayed.
*/
+ @Override
public String getMessage() {
if (!specialConstructor) {
return super.getMessage();
Index: contrib/surround/src/java/org/apache/lucene/queryParser/surround/parser/QueryParser.java
===================================================================
--- contrib/surround/src/java/org/apache/lucene/queryParser/surround/parser/QueryParser.java (revision 916146)
+++ contrib/surround/src/java/org/apache/lucene/queryParser/surround/parser/QueryParser.java (working copy)
@@ -6,8 +6,6 @@
import java.io.StringReader;
-import org.apache.lucene.analysis.TokenStream;
-
import org.apache.lucene.queryParser.surround.query.SrndQuery;
import org.apache.lucene.queryParser.surround.query.FieldsQuery;
import org.apache.lucene.queryParser.surround.query.OrQuery;
@@ -56,22 +54,22 @@
}
protected SrndQuery getFieldsQuery(
- SrndQuery q, ArrayList fieldNames) {
+ SrndQuery q, ArrayList fieldNames) {
/* FIXME: check acceptable subquery: at least one subquery should not be
* a fields query.
*/
return new FieldsQuery(q, fieldNames, fieldOperator);
}
- protected SrndQuery getOrQuery(List queries, boolean infix, Token orToken) {
+ protected SrndQuery getOrQuery(List queries, boolean infix, Token orToken) {
return new OrQuery(queries, infix, orToken.image);
}
- protected SrndQuery getAndQuery(List queries, boolean infix, Token andToken) {
+ protected SrndQuery getAndQuery(List queries, boolean infix, Token andToken) {
return new AndQuery( queries, infix, andToken.image);
}
- protected SrndQuery getNotQuery(List queries, Token notToken) {
+ protected SrndQuery getNotQuery(List queries, Token notToken) {
return new NotQuery( queries, notToken.image);
}
@@ -91,7 +89,7 @@
}
protected SrndQuery getDistanceQuery(
- List queries,
+ List queries,
boolean infix,
Token dToken,
boolean ordered) throws ParseException {
@@ -144,20 +142,19 @@
final public SrndQuery FieldsQuery() throws ParseException {
SrndQuery q;
- ArrayList fieldNames;
+ ArrayList fieldNames;
fieldNames = OptionalFields();
q = OrQuery();
{if (true) return (fieldNames == null) ? q : getFieldsQuery(q, fieldNames);}
throw new Error("Missing return statement in function");
}
- final public ArrayList OptionalFields() throws ParseException {
+ final public ArrayList OptionalFields() throws ParseException {
Token fieldName;
- ArrayList fieldNames = null;
+ ArrayList fieldNames = null;
label_1:
while (true) {
if (jj_2_1(2)) {
- ;
} else {
break label_1;
}
@@ -165,7 +162,7 @@
fieldName = jj_consume_token(TERM);
jj_consume_token(COLON);
if (fieldNames == null) {
- fieldNames = new ArrayList();
+ fieldNames = new ArrayList();
}
fieldNames.add(fieldName.image);
}
@@ -175,14 +172,13 @@
final public SrndQuery OrQuery() throws ParseException {
SrndQuery q;
- ArrayList queries = null;
+ ArrayList queries = null;
Token oprt = null;
q = AndQuery();
label_2:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case OR:
- ;
break;
default:
jj_la1[0] = jj_gen;
@@ -191,7 +187,7 @@
oprt = jj_consume_token(OR);
/* keep only last used operator */
if (queries == null) {
- queries = new ArrayList();
+ queries = new ArrayList();
queries.add(q);
}
q = AndQuery();
@@ -203,14 +199,13 @@
final public SrndQuery AndQuery() throws ParseException {
SrndQuery q;
- ArrayList queries = null;
+ ArrayList queries = null;
Token oprt = null;
q = NotQuery();
label_3:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case AND:
- ;
break;
default:
jj_la1[1] = jj_gen;
@@ -219,7 +214,7 @@
oprt = jj_consume_token(AND);
/* keep only last used operator */
if (queries == null) {
- queries = new ArrayList();
+ queries = new ArrayList();
queries.add(q);
}
q = NotQuery();
@@ -231,14 +226,13 @@
final public SrndQuery NotQuery() throws ParseException {
SrndQuery q;
- ArrayList queries = null;
+ ArrayList queries = null;
Token oprt = null;
q = NQuery();
label_4:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case NOT:
- ;
break;
default:
jj_la1[2] = jj_gen;
@@ -247,7 +241,7 @@
oprt = jj_consume_token(NOT);
/* keep only last used operator */
if (queries == null) {
- queries = new ArrayList();
+ queries = new ArrayList();
queries.add(q);
}
q = NQuery();
@@ -259,21 +253,20 @@
final public SrndQuery NQuery() throws ParseException {
SrndQuery q;
- ArrayList queries;
+ ArrayList queries;
Token dt;
q = WQuery();
label_5:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case N:
- ;
break;
default:
jj_la1[3] = jj_gen;
break label_5;
}
dt = jj_consume_token(N);
- queries = new ArrayList();
+ queries = new ArrayList();
queries.add(q); /* left associative */
q = WQuery();
@@ -286,21 +279,20 @@
final public SrndQuery WQuery() throws ParseException {
SrndQuery q;
- ArrayList queries;
+ ArrayList queries;
Token wt;
q = PrimaryQuery();
label_6:
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case W:
- ;
break;
default:
jj_la1[4] = jj_gen;
break label_6;
}
wt = jj_consume_token(W);
- queries = new ArrayList();
+ queries = new ArrayList();
queries.add(q); /* left associative */
q = PrimaryQuery();
@@ -345,7 +337,7 @@
final public SrndQuery PrefixOperatorQuery() throws ParseException {
Token oprt;
- List queries;
+ List queries;
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case OR:
oprt = jj_consume_token(OR);
@@ -379,9 +371,9 @@
throw new Error("Missing return statement in function");
}
- final public List FieldsQueryList() throws ParseException {
+ final public List FieldsQueryList() throws ParseException {
SrndQuery q;
- ArrayList queries = new ArrayList();
+ ArrayList queries = new ArrayList();
jj_consume_token(LPAREN);
q = FieldsQuery();
queries.add(q);
@@ -392,7 +384,6 @@
queries.add(q);
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case COMMA:
- ;
break;
default:
jj_la1[7] = jj_gen;
@@ -453,7 +444,6 @@
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case CARAT:
- ;
break;
default:
jj_la1[9] = jj_gen;
@@ -639,8 +629,8 @@
for (int i = 0; i < jj_endpos; i++) {
jj_expentry[i] = jj_lasttokens[i];
}
- jj_entries_loop: for (java.util.Iterator it = jj_expentries.iterator(); it.hasNext();) {
- int[] oldentry = (int[])(it.next());
+ jj_entries_loop: for (java.util.Iterator it = jj_expentries.iterator(); it.hasNext();) {
+ int[] oldentry = (it.next());
if (oldentry.length == jj_expentry.length) {
for (int i = 0; i < jj_expentry.length; i++) {
if (oldentry[i] != jj_expentry[i]) {
Index: contrib/surround/src/java/org/apache/lucene/queryParser/surround/parser/QueryParserTokenManager.java
===================================================================
--- contrib/surround/src/java/org/apache/lucene/queryParser/surround/parser/QueryParserTokenManager.java (revision 916146)
+++ contrib/surround/src/java/org/apache/lucene/queryParser/surround/parser/QueryParserTokenManager.java (working copy)
@@ -1,20 +1,8 @@
/* Generated By:JavaCC: Do not edit this line. QueryParserTokenManager.java */
package org.apache.lucene.queryParser.surround.parser;
-import java.util.ArrayList;
-import java.util.List;
-import java.io.StringReader;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.queryParser.surround.query.SrndQuery;
-import org.apache.lucene.queryParser.surround.query.FieldsQuery;
-import org.apache.lucene.queryParser.surround.query.OrQuery;
-import org.apache.lucene.queryParser.surround.query.AndQuery;
-import org.apache.lucene.queryParser.surround.query.NotQuery;
-import org.apache.lucene.queryParser.surround.query.DistanceQuery;
-import org.apache.lucene.queryParser.surround.query.SrndTermQuery;
-import org.apache.lucene.queryParser.surround.query.SrndPrefixQuery;
-import org.apache.lucene.queryParser.surround.query.SrndTruncQuery;
/** Token Manager. */
+@SuppressWarnings("unused")
public class QueryParserTokenManager implements QueryParserConstants
{
@@ -348,7 +336,7 @@
}
else
{
- int hiByte = (int)(curChar >> 8);
+ int hiByte = (curChar >> 8);
int i1 = hiByte >> 6;
long l1 = 1L << (hiByte & 077);
int i2 = (curChar & 0xff) >> 6;
@@ -468,7 +456,7 @@
}
else
{
- int hiByte = (int)(curChar >> 8);
+ int hiByte = (curChar >> 8);
int i1 = hiByte >> 6;
long l1 = 1L << (hiByte & 077);
int i2 = (curChar & 0xff) >> 6;
Index: contrib/surround/src/java/org/apache/lucene/queryParser/surround/parser/Token.java
===================================================================
--- contrib/surround/src/java/org/apache/lucene/queryParser/surround/parser/Token.java (revision 916146)
+++ contrib/surround/src/java/org/apache/lucene/queryParser/surround/parser/Token.java (working copy)
@@ -90,6 +90,7 @@
/**
* Returns the image.
*/
+ @Override
public String toString()
{
return image;
Index: contrib/surround/src/java/org/apache/lucene/queryParser/surround/parser/TokenMgrError.java
===================================================================
--- contrib/surround/src/java/org/apache/lucene/queryParser/surround/parser/TokenMgrError.java (revision 916146)
+++ contrib/surround/src/java/org/apache/lucene/queryParser/surround/parser/TokenMgrError.java (working copy)
@@ -115,7 +115,8 @@
*
* from this method for such cases in the release version of your parser.
*/
- public String getMessage() {
+ @Override
+ public String getMessage() {
return super.getMessage();
}
Index: contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/DistanceQuery.java
===================================================================
--- contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/DistanceQuery.java (revision 916146)
+++ contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/DistanceQuery.java (working copy)
@@ -46,7 +46,7 @@
public boolean subQueriesOrdered() {return ordered;}
public String distanceSubQueryNotAllowed() {
- Iterator sqi = getSubQueriesIterator();
+ Iterator> sqi = getSubQueriesIterator();
while (sqi.hasNext()) {
Object leq = sqi.next();
if (leq instanceof DistanceSubQuery) {
@@ -94,7 +94,7 @@
float boost,
BasicQueryFactory qf) throws IOException {
SpanQuery[] spanNearClauses = new SpanQuery[getNrSubQueries()];
- Iterator sqi = getSubQueriesIterator();
+ Iterator> sqi = getSubQueriesIterator();
int qi = 0;
while (sqi.hasNext()) {
SpanNearClauseFactory sncf = new SpanNearClauseFactory(reader, fieldName, qf);
Index: contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/FieldsQuery.java
===================================================================
--- contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/FieldsQuery.java (revision 916146)
+++ contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/FieldsQuery.java (working copy)
@@ -57,7 +57,6 @@
qc = (SrndQuery) q.clone();
queries.add( new FieldsQuery( qc, fni.next(), fieldOp));
}
- boolean infix = true;
OrQuery oq = new OrQuery(queries,
true /* infix OR for field names */,
OrOperatorName);
Index: contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SimpleTerm.java
===================================================================
--- contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SimpleTerm.java (revision 916146)
+++ contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SimpleTerm.java (working copy)
@@ -44,7 +44,7 @@
return this.toStringUnquoted().compareTo( ost.toStringUnquoted());
}
- protected void suffixToString(StringBuilder r) {;} /* override for prefix query */
+ protected void suffixToString(StringBuilder r) {} /* override for prefix query */
@Override
public String toString() {
Index: contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SpanNearClauseFactory.java
===================================================================
--- contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SpanNearClauseFactory.java (revision 916146)
+++ contrib/surround/src/java/org/apache/lucene/queryParser/surround/query/SpanNearClauseFactory.java (working copy)
@@ -52,23 +52,17 @@
- SpanNotQuery: treat similar to subquery SpanNearQuery. (ok?)
*/
+import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
-import java.util.Comparator;
-import java.util.Arrays;
-
-import java.io.IOException;
-
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermEnum;
-
import org.apache.lucene.search.Query;
-
-import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanOrQuery;
+import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
Index: contrib/surround/src/test/org/apache/lucene/queryParser/surround/query/BooleanQueryTst.java
===================================================================
--- contrib/surround/src/test/org/apache/lucene/queryParser/surround/query/BooleanQueryTst.java (revision 916146)
+++ contrib/surround/src/test/org/apache/lucene/queryParser/surround/query/BooleanQueryTst.java (working copy)
@@ -28,6 +28,7 @@
import org.apache.lucene.queryParser.surround.parser.QueryParser;
+import junit.framework.Assert;
import junit.framework.TestCase;
public class BooleanQueryTst {
@@ -87,8 +88,8 @@
float score = scorer.score();
docNr += docBase;
/* System.out.println(docNr + " '" + dBase.getDocs()[docNr] + "': " + score); */
- TestCase.assertTrue(queryText + ": positive score", score > 0.0);
- TestCase.assertTrue(queryText + ": too many hits", totalMatched < expectedDocNrs.length);
+ Assert.assertTrue(queryText + ": positive score", score > 0.0);
+ Assert.assertTrue(queryText + ": too many hits", totalMatched < expectedDocNrs.length);
int i;
for (i = 0; i < expectedDocNrs.length; i++) {
if ((! encountered[i]) && (expectedDocNrs[i] == docNr)) {
@@ -97,13 +98,13 @@
}
}
if (i == expectedDocNrs.length) {
- TestCase.assertTrue(queryText + ": doc nr for hit not expected: " + docNr, false);
+ Assert.assertTrue(queryText + ": doc nr for hit not expected: " + docNr, false);
}
totalMatched++;
}
void checkNrHits() {
- TestCase.assertEquals(queryText + ": nr of hits", expectedDocNrs.length, totalMatched);
+ Assert.assertEquals(queryText + ": nr of hits", expectedDocNrs.length, totalMatched);
}
}
Index: contrib/swing/src/test/org/apache/lucene/swing/models/BaseListModel.java
===================================================================
--- contrib/swing/src/test/org/apache/lucene/swing/models/BaseListModel.java (revision 916146)
+++ contrib/swing/src/test/org/apache/lucene/swing/models/BaseListModel.java (working copy)
@@ -24,9 +24,9 @@
public class BaseListModel extends AbstractListModel {
- private List data = new ArrayList();
+ private List data = new ArrayList();
- public BaseListModel(Iterator iterator) {
+ public BaseListModel(Iterator> iterator) {
while (iterator.hasNext()) {
data.add(iterator.next());
}
Index: contrib/swing/src/test/org/apache/lucene/swing/models/BaseTableModel.java
===================================================================
--- contrib/swing/src/test/org/apache/lucene/swing/models/BaseTableModel.java (revision 916146)
+++ contrib/swing/src/test/org/apache/lucene/swing/models/BaseTableModel.java (working copy)
@@ -24,10 +24,10 @@
public class BaseTableModel extends AbstractTableModel {
- private List columnNames = new ArrayList();
- private List rows = new ArrayList();
+ private List columnNames = new ArrayList();
+ private List rows = new ArrayList();
- public BaseTableModel(Iterator data) {
+ public BaseTableModel(Iterator> data) {
columnNames.add("Name");
columnNames.add("Type");
columnNames.add("Phone");
@@ -37,7 +37,7 @@
columnNames.add("Zip");
while (data.hasNext()) {
- Object nextRow = (Object) data.next();
+ Object nextRow = data.next();
rows.add(nextRow);
}
}
@@ -66,7 +66,7 @@
}
@Override
- public Class getColumnClass(int columnIndex) {
+ public Class> getColumnClass(int columnIndex) {
return String.class;
}
Index: contrib/swing/src/test/org/apache/lucene/swing/models/DataStore.java
===================================================================
--- contrib/swing/src/test/org/apache/lucene/swing/models/DataStore.java (revision 916146)
+++ contrib/swing/src/test/org/apache/lucene/swing/models/DataStore.java (working copy)
@@ -30,7 +30,7 @@
private static final String STEAK_CATEGORY = "Steak";
private static int id = 0;
- static Collection restaurants = new ArrayList();
+ static Collection restaurants = new ArrayList();
static RestaurantInfo pinos = new RestaurantInfo();
static RestaurantInfo canolis = new RestaurantInfo();
static RestaurantInfo picadillo = new RestaurantInfo();
@@ -47,7 +47,7 @@
static RestaurantInfo outback4 = new RestaurantInfo();
- public static Iterator getRestaurants(){
+ public static Iterator getRestaurants(){
return restaurants.iterator();
}
Index: contrib/swing/src/test/org/apache/lucene/swing/models/TestBasicList.java
===================================================================
--- contrib/swing/src/test/org/apache/lucene/swing/models/TestBasicList.java (revision 916146)
+++ contrib/swing/src/test/org/apache/lucene/swing/models/TestBasicList.java (working copy)
@@ -28,11 +28,11 @@
public class TestBasicList extends TestCase {
private ListModel baseListModel;
private ListSearcher listSearcher;
- private List list;
+ private List list;
@Override
protected void setUp() throws Exception {
- list = new ArrayList();
+ list = new ArrayList();
list.add(DataStore.canolis);
list.add(DataStore.chris);
Index: contrib/swing/src/test/org/apache/lucene/swing/models/TestBasicTable.java
===================================================================
--- contrib/swing/src/test/org/apache/lucene/swing/models/TestBasicTable.java (revision 916146)
+++ contrib/swing/src/test/org/apache/lucene/swing/models/TestBasicTable.java (working copy)
@@ -27,11 +27,11 @@
public class TestBasicTable extends TestCase {
private TableModel baseTableModel;
private TableSearcher tableSearcher;
- private List list;
+ private List list;
@Override
protected void setUp() throws Exception {
- list = new ArrayList();
+ list = new ArrayList();
list.add(DataStore.canolis);
list.add(DataStore.chris);
Index: contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizer.java
===================================================================
--- contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizer.java (revision 916146)
+++ contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizer.java (working copy)
@@ -17,7 +17,6 @@
package org.apache.lucene.wikipedia.analysis;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.FlagsAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
Index: contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerImpl.java
===================================================================
--- contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerImpl.java (revision 916146)
+++ contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerImpl.java (working copy)
@@ -21,6 +21,7 @@
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+@SuppressWarnings("unused")
/**
* This class is a scanner generated by
Index: contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerImpl.jflex
===================================================================
--- contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerImpl.jflex (revision 916146)
+++ contrib/wikipedia/src/java/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerImpl.jflex (working copy)
@@ -19,6 +19,7 @@
import org.apache.lucene.analysis.Token;
+@SuppressWarnings("unused")
%%
%class WikipediaTokenizerImpl
Index: contrib/wikipedia/src/test/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerTest.java
===================================================================
--- contrib/wikipedia/src/test/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerTest.java (revision 916146)
+++ contrib/wikipedia/src/test/org/apache/lucene/wikipedia/analysis/WikipediaTokenizerTest.java (working copy)
@@ -67,7 +67,7 @@
"'''same [[Category:foo]] goes for this '''''and2 [[Category:foo]] and this" +
" [http://foo.boo.com/test/test/ Test Test] [http://foo.boo.com/test/test/test.html Test Test]" +
" [http://foo.boo.com/test/test/test.html?g=b&c=d Test Test] [Citation] martian code ";
- Map tcm = new HashMap();//map tokens to types
+ Map tcm = new HashMap();//map tokens to types
tcm.put("link", WikipediaTokenizer.INTERNAL_LINK);
tcm.put("display", WikipediaTokenizer.INTERNAL_LINK);
tcm.put("info", WikipediaTokenizer.INTERNAL_LINK);
@@ -144,7 +144,7 @@
while (tf.incrementToken()) {
String tokText = termAtt.term();
//System.out.println("Text: " + tokText + " Type: " + token.type());
- String expectedType = (String) tcm.get(tokText);
+ String expectedType = tcm.get(tokText);
assertTrue("expectedType is null and it shouldn't be for: " + tf.toString(), expectedType != null);
assertTrue(typeAtt.type() + " is not equal to " + expectedType + " for " + tf.toString(), typeAtt.type().equals(expectedType) == true);
count++;
@@ -264,7 +264,7 @@
}
public void testLucene1133() throws Exception {
- Set untoks = new HashSet();
+ Set untoks = new HashSet();
untoks.add(WikipediaTokenizer.CATEGORY);
untoks.add(WikipediaTokenizer.ITALICS);
//should be exactly the same, regardless of untoks
@@ -342,7 +342,7 @@
}
public void testBoth() throws Exception {
- Set untoks = new HashSet();
+ Set untoks = new HashSet();
untoks.add(WikipediaTokenizer.CATEGORY);
untoks.add(WikipediaTokenizer.ITALICS);
String test = "[[Category:a b c d]] [[Category:e f g]] [[link here]] [[link there]] ''italics here'' something ''more italics'' [[Category:h i j]]";
Index: contrib/wordnet/src/java/org/apache/lucene/wordnet/AnalyzerUtil.java
===================================================================
--- contrib/wordnet/src/java/org/apache/lucene/wordnet/AnalyzerUtil.java (revision 916146)
+++ contrib/wordnet/src/java/org/apache/lucene/wordnet/AnalyzerUtil.java (working copy)
@@ -46,7 +46,7 @@
*/
public class AnalyzerUtil {
- private AnalyzerUtil() {};
+ private AnalyzerUtil() {}
/**
* Returns a simple analyzer wrapper that logs all tokens produced by the
@@ -367,7 +367,7 @@
public void setValue(int value) { this.value = value; }
@Override
public String toString() { return String.valueOf(value); }
- };
+ }
@@ -400,7 +400,7 @@
// TODO: don't split on floating point numbers, e.g. 3.1415 (digit before or after '.')
/** Divides text into sentences; Includes inverted spanish exclamation and question mark */
- private static final Pattern SENTENCES = Pattern.compile("[!\\.\\?\\xA1\\xBF]+");
+// private static final Pattern SENTENCES = Pattern.compile("[!\\.\\?\\xA1\\xBF]+");
/**
* Returns at most the first N sentences of the given text. Delimiting
Index: contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java
===================================================================
--- contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java (revision 916146)
+++ contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java (working copy)
@@ -131,7 +131,7 @@
while ( it.hasNext())
{
// [2a] add to level words in
- String word = (String) it.next();
+ String word = it.next();
TermQuery tq = new TermQuery( new Term( field, word));
tmp.add( tq, BooleanClause.Occur.SHOULD);
Index: contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java
===================================================================
--- contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java (revision 916146)
+++ contrib/wordnet/src/java/org/apache/lucene/wordnet/SynonymMap.java (working copy)
@@ -68,10 +68,12 @@
* xxxx:[]
*
*
- * @see
+ * See also:
+ * prologdb
- * man page
- * @see Dave's synonym demo site
+ * man page
+ * Dave's synonym demo site
*/
public class SynonymMap {
@@ -389,7 +391,7 @@
System.arraycopy(output, 0, buffer, 0, len);
return buffer;
} finally {
- if (input != null) input.close();
+ input.close();
}
}
Index: contrib/wordnet/src/test/org/apache/lucene/wordnet/TestSynonymTokenFilter.java
===================================================================
--- contrib/wordnet/src/test/org/apache/lucene/wordnet/TestSynonymTokenFilter.java (revision 916146)
+++ contrib/wordnet/src/test/org/apache/lucene/wordnet/TestSynonymTokenFilter.java (working copy)
@@ -21,7 +21,6 @@
import java.io.FileInputStream;
import java.io.IOException;
import java.io.Reader;
-import java.io.StringReader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
@@ -29,7 +28,6 @@
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
-import org.apache.lucene.util.Version;
public class TestSynonymTokenFilter extends BaseTokenStreamTestCase {
File dataDir = new File(System.getProperty("dataDir", "./bin"));
@@ -96,8 +94,8 @@
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream ts = new WhitespaceTokenizer(reader);
- ts = new LowerCaseFilter(Version.LUCENE_CURRENT, ts);
+ TokenStream ts = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
+ ts = new LowerCaseFilter(TEST_VERSION_CURRENT, ts);
ts = new SynonymTokenFilter(ts, synonyms, maxSynonyms);
return ts;
}
@@ -105,7 +103,7 @@
private class SavedStreams {
Tokenizer source;
TokenStream result;
- };
+ }
@Override
public TokenStream reusableTokenStream(String fieldName, Reader reader)
@@ -113,8 +111,8 @@
SavedStreams streams = (SavedStreams) getPreviousTokenStream();
if (streams == null) {
streams = new SavedStreams();
- streams.source = new WhitespaceTokenizer(reader);
- streams.result = new LowerCaseFilter(Version.LUCENE_CURRENT, streams.source);
+ streams.source = new WhitespaceTokenizer(TEST_VERSION_CURRENT, reader);
+ streams.result = new LowerCaseFilter(TEST_VERSION_CURRENT, streams.source);
streams.result = new SynonymTokenFilter(streams.result, synonyms, maxSynonyms);
setPreviousTokenStream(streams);
} else {
Index: contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/QueryTemplateManager.java
===================================================================
--- contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/QueryTemplateManager.java (revision 916146)
+++ contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/QueryTemplateManager.java (working copy)
@@ -11,7 +11,6 @@
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.Result;
-import javax.xml.transform.Source;
import javax.xml.transform.Templates;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerConfigurationException;
Index: contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/BoostingTermBuilder.java
===================================================================
--- contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/BoostingTermBuilder.java (revision 916146)
+++ contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/BoostingTermBuilder.java (working copy)
@@ -2,7 +2,6 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.search.spans.SpanQuery;
-import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.search.payloads.PayloadTermQuery;
import org.apache.lucene.search.payloads.AveragePayloadFunction;
import org.apache.lucene.xmlparser.DOMUtils;
Index: contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/DuplicateFilterBuilder.java
===================================================================
--- contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/DuplicateFilterBuilder.java (revision 916146)
+++ contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/DuplicateFilterBuilder.java (working copy)
@@ -3,17 +3,12 @@
*/
package org.apache.lucene.xmlparser.builders;
-import org.apache.lucene.search.BooleanClause;
-import org.apache.lucene.search.BooleanFilter;
import org.apache.lucene.search.DuplicateFilter;
import org.apache.lucene.search.Filter;
-import org.apache.lucene.search.FilterClause;
import org.apache.lucene.xmlparser.DOMUtils;
import org.apache.lucene.xmlparser.FilterBuilder;
import org.apache.lucene.xmlparser.ParserException;
import org.w3c.dom.Element;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
Index: contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java
===================================================================
--- contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java (revision 916146)
+++ contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java (working copy)
@@ -7,8 +7,6 @@
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.TransformerException;
-import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Field;
@@ -16,6 +14,7 @@
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
import org.w3c.dom.Document;
import org.xml.sax.SAXException;
@@ -39,10 +38,10 @@
* This class illustrates how form input (such as from a web page or Swing gui) can be
* turned into Lucene queries using a choice of XSL templates for different styles of queries.
*/
-public class TestQueryTemplateManager extends TestCase {
+public class TestQueryTemplateManager extends LuceneTestCase {
CoreParser builder;
- Analyzer analyzer=new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+ Analyzer analyzer=new StandardAnalyzer(TEST_VERSION_CURRENT);
private IndexSearcher searcher;
//A collection of documents' field values for use in our tests
@@ -160,5 +159,6 @@
@Override
protected void tearDown() throws Exception {
searcher.close();
+ super.tearDown();
}
}
Index: src/demo/org/apache/lucene/demo/IndexFiles.java
===================================================================
--- src/demo/org/apache/lucene/demo/IndexFiles.java (revision 916146)
+++ src/demo/org/apache/lucene/demo/IndexFiles.java (working copy)
@@ -91,7 +91,6 @@
// at least on windows, some temporary files raise this exception with an "access denied" message
// checking if the file can be read doesn't help
catch (FileNotFoundException fnfe) {
- ;
}
}
}
Index: src/demo/org/apache/lucene/demo/html/Entities.java
===================================================================
--- src/demo/org/apache/lucene/demo/html/Entities.java (revision 916146)
+++ src/demo/org/apache/lucene/demo/html/Entities.java (working copy)
@@ -38,7 +38,7 @@
new Character((char)Integer.parseInt(entity.substring(start), radix));
return c.toString();
} else {
- String s = (String)decoder.get(entity);
+ String s = decoder.get(entity);
if (s != null)
return s;
else return "";
@@ -50,7 +50,7 @@
StringBuffer buffer = new StringBuffer(length * 2);
for (int i = 0; i < length; i++) {
char c = s.charAt(i);
- int j = (int)c;
+ int j = c;
if (j < 0x100 && encoder[j] != null) {
buffer.append(encoder[j]); // have a named encoding
buffer.append(';');
Index: src/demo/org/apache/lucene/demo/html/HTMLParser.java
===================================================================
--- src/demo/org/apache/lucene/demo/html/HTMLParser.java (revision 916146)
+++ src/demo/org/apache/lucene/demo/html/HTMLParser.java (working copy)
@@ -178,7 +178,6 @@
case Entity:
case Space:
case Punct:
- ;
break;
default:
jj_la1[0] = jj_gen;
@@ -244,7 +243,6 @@
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case ArgName:
- ;
break;
default:
jj_la1[2] = jj_gen;
@@ -284,12 +282,10 @@
break;
default:
jj_la1[3] = jj_gen;
- ;
}
break;
default:
jj_la1[4] = jj_gen;
- ;
}
}
jj_consume_token(TagEnd);
@@ -353,7 +349,6 @@
case ArgValue:
case ArgQuote1:
case ArgQuote2:
- ;
break;
default:
jj_la1[8] = jj_gen;
@@ -390,7 +385,6 @@
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case CommentText1:
- ;
break;
default:
jj_la1[10] = jj_gen;
@@ -406,7 +400,6 @@
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case CommentText2:
- ;
break;
default:
jj_la1[11] = jj_gen;
@@ -429,7 +422,6 @@
while (true) {
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case ScriptText:
- ;
break;
default:
jj_la1[13] = jj_gen;
Index: src/demo/org/apache/lucene/demo/html/HTMLParserTokenManager.java
===================================================================
--- src/demo/org/apache/lucene/demo/html/HTMLParserTokenManager.java (revision 916146)
+++ src/demo/org/apache/lucene/demo/html/HTMLParserTokenManager.java (working copy)
@@ -1,11 +1,9 @@
/* Generated By:JavaCC: Do not edit this line. HTMLParserTokenManager.java */
package org.apache.lucene.demo.html;
-import java.io.*;
-import java.util.Properties;
/** Token Manager. */
-public class HTMLParserTokenManager implements HTMLParserConstants
-{
+@SuppressWarnings("unused")
+public class HTMLParserTokenManager implements HTMLParserConstants {
/** Debug output. */
public java.io.PrintStream debugStream = System.out;
@@ -546,7 +544,7 @@
}
else if (curChar < 128)
{
- long l = 1L << (curChar & 077);
+ long l = 1L << (curChar & 077);
do
{
switch(jjstateSet[--i])
Index: src/demo/org/apache/lucene/demo/html/ParseException.java
===================================================================
--- src/demo/org/apache/lucene/demo/html/ParseException.java (revision 916146)
+++ src/demo/org/apache/lucene/demo/html/ParseException.java (working copy)
@@ -96,6 +96,7 @@
* of the final stack trace, and hence the correct error message
* gets displayed.
*/
+ @Override
public String getMessage() {
if (!specialConstructor) {
return super.getMessage();
Index: src/demo/org/apache/lucene/demo/html/Token.java
===================================================================
--- src/demo/org/apache/lucene/demo/html/Token.java (revision 916146)
+++ src/demo/org/apache/lucene/demo/html/Token.java (working copy)
@@ -90,6 +90,7 @@
/**
* Returns the image.
*/
+ @Override
public String toString()
{
return image;
Index: src/demo/org/apache/lucene/demo/html/TokenMgrError.java
===================================================================
--- src/demo/org/apache/lucene/demo/html/TokenMgrError.java (revision 916146)
+++ src/demo/org/apache/lucene/demo/html/TokenMgrError.java (working copy)
@@ -115,7 +115,8 @@
*
* from this method for such cases in the release version of your parser.
*/
- public String getMessage() {
+ @Override
+ public String getMessage() {
return super.getMessage();
}
Index: src/java/org/apache/lucene/analysis/CharArrayMap.java
===================================================================
--- src/java/org/apache/lucene/analysis/CharArrayMap.java (revision 916146)
+++ src/java/org/apache/lucene/analysis/CharArrayMap.java (working copy)
@@ -488,7 +488,7 @@
@Override
public String toString() {
return new StringBuilder().append(keys[pos]).append('=')
- .append(((Object) values[pos] == (Object) CharArrayMap.this) ? "(this Map)" : values[pos])
+ .append((values[pos] == CharArrayMap.this) ? "(this Map)" : values[pos])
.toString();
}
}
Index: src/java/org/apache/lucene/analysis/CharArraySet.java
===================================================================
--- src/java/org/apache/lucene/analysis/CharArraySet.java (revision 916146)
+++ src/java/org/apache/lucene/analysis/CharArraySet.java (working copy)
@@ -17,7 +17,6 @@
* limitations under the License.
*/
-import java.util.Arrays;
import java.util.AbstractSet;
import java.util.Collection;
import java.util.Iterator;
Index: src/java/org/apache/lucene/analysis/NumericTokenStream.java
===================================================================
--- src/java/org/apache/lucene/analysis/NumericTokenStream.java (revision 916146)
+++ src/java/org/apache/lucene/analysis/NumericTokenStream.java (working copy)
@@ -22,8 +22,6 @@
import org.apache.lucene.document.NumericField; // for javadocs
import org.apache.lucene.search.NumericRangeQuery; // for javadocs
import org.apache.lucene.search.NumericRangeFilter; // for javadocs
-import org.apache.lucene.search.SortField; // for javadocs
-import org.apache.lucene.search.FieldCache; // javadocs
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
@@ -162,7 +160,7 @@
* new Field(name, new NumericTokenStream(precisionStep).setIntValue(value))
*/
public NumericTokenStream setIntValue(final int value) {
- this.value = (long) value;
+ this.value = value;
valSize = 32;
shift = 0;
return this;
@@ -188,7 +186,7 @@
* new Field(name, new NumericTokenStream(precisionStep).setFloatValue(value))
*/
public NumericTokenStream setFloatValue(final float value) {
- this.value = (long) NumericUtils.floatToSortableInt(value);
+ this.value = NumericUtils.floatToSortableInt(value);
valSize = 32;
shift = 0;
return this;
Index: src/java/org/apache/lucene/analysis/StopAnalyzer.java
===================================================================
--- src/java/org/apache/lucene/analysis/StopAnalyzer.java (revision 916146)
+++ src/java/org/apache/lucene/analysis/StopAnalyzer.java (working copy)
@@ -24,7 +24,6 @@
import java.util.Set;
import java.util.List;
-import org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents; // javadoc @link
import org.apache.lucene.util.Version;
/** Filters {@link LetterTokenizer} with {@link LowerCaseFilter} and {@link StopFilter}.
@@ -91,9 +90,12 @@
}
/**
- * Creates {@link TokenStreamComponents} used to tokenize all the text in the provided {@link Reader}.
- *
- * @return {@link TokenStreamComponents} built from a {@link LowerCaseTokenizer} filtered with
+ * Creates
+ * {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * used to tokenize all the text in the provided {@link Reader}.
+ *
+ * @return {@link org.apache.lucene.analysis.ReusableAnalyzerBase.TokenStreamComponents}
+ * built from a {@link LowerCaseTokenizer} filtered with
* {@link StopFilter}
*/
@Override
Index: src/java/org/apache/lucene/analysis/TeeSinkTokenFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/TeeSinkTokenFilter.java (revision 916146)
+++ src/java/org/apache/lucene/analysis/TeeSinkTokenFilter.java (working copy)
@@ -124,7 +124,7 @@
* This instance is exhausted after this, but all sinks are instant available.
*/
public void consumeAllTokens() throws IOException {
- while (incrementToken());
+ while (incrementToken()) {}
}
@Override
Index: src/java/org/apache/lucene/analysis/Token.java
===================================================================
--- src/java/org/apache/lucene/analysis/Token.java (revision 916146)
+++ src/java/org/apache/lucene/analysis/Token.java (working copy)
@@ -525,7 +525,7 @@
Token t = (Token)super.clone();
// Do a deep clone
if (termBuffer != null) {
- t.termBuffer = (char[]) termBuffer.clone();
+ t.termBuffer = termBuffer.clone();
}
if (payload != null) {
t.payload = (Payload) payload.clone();
Index: src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java
===================================================================
--- src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java (revision 916146)
+++ src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java (working copy)
@@ -47,7 +47,7 @@
/**
* Specifies whether deprecated acronyms should be replaced with HOST type.
- * See {@linkplain https://issues.apache.org/jira/browse/LUCENE-1068}
+ * See {@linkplain "https://issues.apache.org/jira/browse/LUCENE-1068"}
*/
private final boolean replaceInvalidAcronym;
Index: src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java
===================================================================
--- src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java (revision 916146)
+++ src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java (working copy)
@@ -20,7 +20,6 @@
import java.io.IOException;
import java.io.Reader;
-import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
Index: src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java
===================================================================
--- src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java (revision 916146)
+++ src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java (working copy)
@@ -34,6 +34,7 @@
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+@SuppressWarnings("unused")
/**
* This class is a scanner generated by
Index: src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex
===================================================================
--- src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex (revision 916146)
+++ src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex (working copy)
@@ -32,6 +32,7 @@
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+@SuppressWarnings("unused")
%%
%class StandardTokenizerImpl
Index: src/java/org/apache/lucene/analysis/tokenattributes/TermAttributeImpl.java
===================================================================
--- src/java/org/apache/lucene/analysis/tokenattributes/TermAttributeImpl.java (revision 916146)
+++ src/java/org/apache/lucene/analysis/tokenattributes/TermAttributeImpl.java (working copy)
@@ -183,7 +183,7 @@
TermAttributeImpl t = (TermAttributeImpl)super.clone();
// Do a deep clone
if (termBuffer != null) {
- t.termBuffer = (char[]) termBuffer.clone();
+ t.termBuffer = termBuffer.clone();
}
return t;
}
Index: src/java/org/apache/lucene/document/AbstractField.java
===================================================================
--- src/java/org/apache/lucene/document/AbstractField.java (revision 916146)
+++ src/java/org/apache/lucene/document/AbstractField.java (working copy)
@@ -18,6 +18,7 @@
import org.apache.lucene.search.PhraseQuery; // for javadocs
import org.apache.lucene.search.spans.SpanQuery; // for javadocs
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.index.FieldInvertState;
import org.apache.lucene.util.StringHelper; // for javadocs
@@ -85,7 +86,7 @@
* the range of that encoding.
*
* @see org.apache.lucene.document.Document#setBoost(float)
- * @see org.apache.lucene.search.Similarity#computeNorm(String, org.apache.lucene.index.FieldInvertState)
+ * @see org.apache.lucene.search.Similarity#computeNorm(String, FieldInvertState)
* @see org.apache.lucene.search.Similarity#encodeNormValue(float)
*/
public void setBoost(float boost) {
Index: src/java/org/apache/lucene/document/NumericField.java
===================================================================
--- src/java/org/apache/lucene/document/NumericField.java (revision 916146)
+++ src/java/org/apache/lucene/document/NumericField.java (working copy)
@@ -140,7 +140,7 @@
*/
public final class NumericField extends AbstractField {
- private final NumericTokenStream tokenStream;
+ private final NumericTokenStream numericTS;
/**
* Creates a field for numeric values using the default precisionStep
@@ -195,12 +195,12 @@
public NumericField(String name, int precisionStep, Field.Store store, boolean index) {
super(name, store, index ? Field.Index.ANALYZED_NO_NORMS : Field.Index.NO, Field.TermVector.NO);
setOmitTermFreqAndPositions(true);
- tokenStream = new NumericTokenStream(precisionStep);
+ numericTS = new NumericTokenStream(precisionStep);
}
/** Returns a {@link NumericTokenStream} for indexing the numeric value. */
public TokenStream tokenStreamValue() {
- return isIndexed() ? tokenStream : null;
+ return isIndexed() ? numericTS : null;
}
/** Returns always null for numeric fields */
@@ -231,7 +231,7 @@
* document.add(new NumericField(name, precisionStep).setLongValue(value))
*/
public NumericField setLongValue(final long value) {
- tokenStream.setLongValue(value);
+ numericTS.setLongValue(value);
fieldsData = Long.valueOf(value);
return this;
}
@@ -243,7 +243,7 @@
* document.add(new NumericField(name, precisionStep).setIntValue(value))
*/
public NumericField setIntValue(final int value) {
- tokenStream.setIntValue(value);
+ numericTS.setIntValue(value);
fieldsData = Integer.valueOf(value);
return this;
}
@@ -255,7 +255,7 @@
* document.add(new NumericField(name, precisionStep).setDoubleValue(value))
*/
public NumericField setDoubleValue(final double value) {
- tokenStream.setDoubleValue(value);
+ numericTS.setDoubleValue(value);
fieldsData = Double.valueOf(value);
return this;
}
@@ -267,7 +267,7 @@
* document.add(new NumericField(name, precisionStep).setFloatValue(value))
*/
public NumericField setFloatValue(final float value) {
- tokenStream.setFloatValue(value);
+ numericTS.setFloatValue(value);
fieldsData = Float.valueOf(value);
return this;
}
Index: src/java/org/apache/lucene/index/CheckIndex.java
===================================================================
--- src/java/org/apache/lucene/index/CheckIndex.java (revision 916146)
+++ src/java/org/apache/lucene/index/CheckIndex.java (working copy)
@@ -899,7 +899,7 @@
System.out.println("");
final int exitCode;
- if (result != null && result.clean == true)
+ if (result.clean == true)
exitCode = 0;
else
exitCode = 1;
Index: src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java
===================================================================
--- src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java (revision 916146)
+++ src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java (working copy)
@@ -350,13 +350,13 @@
protected class MergeThread extends Thread {
- IndexWriter writer;
+ IndexWriter tWriter;
MergePolicy.OneMerge startMerge;
MergePolicy.OneMerge runningMerge;
private volatile boolean done;
public MergeThread(IndexWriter writer, MergePolicy.OneMerge startMerge) throws IOException {
- this.writer = writer;
+ this.tWriter = writer;
this.startMerge = startMerge;
}
@@ -408,9 +408,9 @@
// Subsequent times through the loop we do any new
// merge that writer says is necessary:
- merge = writer.getNextMerge();
+ merge = tWriter.getNextMerge();
if (merge != null) {
- writer.mergeInit(merge);
+ tWriter.mergeInit(merge);
updateMergeThreads();
if (verbose())
message(" merge thread: do another merge " + merge.segString(dir));
Index: src/java/org/apache/lucene/index/DocFieldConsumers.java
===================================================================
--- src/java/org/apache/lucene/index/DocFieldConsumers.java (revision 916146)
+++ src/java/org/apache/lucene/index/DocFieldConsumers.java (working copy)
@@ -132,21 +132,21 @@
class PerDoc extends DocumentsWriter.DocWriter {
- DocumentsWriter.DocWriter one;
- DocumentsWriter.DocWriter two;
+ DocumentsWriter.DocWriter writerOne;
+ DocumentsWriter.DocWriter writerTwo;
@Override
public long sizeInBytes() {
- return one.sizeInBytes() + two.sizeInBytes();
+ return writerOne.sizeInBytes() + writerTwo.sizeInBytes();
}
@Override
public void finish() throws IOException {
try {
try {
- one.finish();
+ writerOne.finish();
} finally {
- two.finish();
+ writerTwo.finish();
}
} finally {
freePerDoc(this);
@@ -157,9 +157,9 @@
public void abort() {
try {
try {
- one.abort();
+ writerOne.abort();
} finally {
- two.abort();
+ writerTwo.abort();
}
} finally {
freePerDoc(this);
Index: src/java/org/apache/lucene/index/DocFieldConsumersPerThread.java
===================================================================
--- src/java/org/apache/lucene/index/DocFieldConsumersPerThread.java (revision 916146)
+++ src/java/org/apache/lucene/index/DocFieldConsumersPerThread.java (working copy)
@@ -62,8 +62,8 @@
both.docID = docState.docID;
assert oneDoc.docID == docState.docID;
assert twoDoc.docID == docState.docID;
- both.one = oneDoc;
- both.two = twoDoc;
+ both.writerOne = oneDoc;
+ both.writerTwo = twoDoc;
return both;
}
}
Index: src/java/org/apache/lucene/index/FieldsReader.java
===================================================================
--- src/java/org/apache/lucene/index/FieldsReader.java (revision 916146)
+++ src/java/org/apache/lucene/index/FieldsReader.java (working copy)
@@ -352,9 +352,9 @@
final byte[] b = new byte[toRead];
fieldsStream.readBytes(b, 0, b.length);
if (compressed) {
- doc.add(new Field(fi.name, uncompress(b), Field.Store.YES));
+ doc.add(new Field(fi.name, uncompress(b)));
} else {
- doc.add(new Field(fi.name, b, Field.Store.YES));
+ doc.add(new Field(fi.name, b));
}
} else {
Field.Store store = Field.Store.YES;
@@ -400,7 +400,7 @@
sizebytes[1] = (byte) (bytesize>>>16);
sizebytes[2] = (byte) (bytesize>>> 8);
sizebytes[3] = (byte) bytesize ;
- doc.add(new Field(fi.name, sizebytes, Field.Store.YES));
+ doc.add(new Field(fi.name, sizebytes));
return size;
}
Index: src/java/org/apache/lucene/index/IndexReader.java
===================================================================
--- src/java/org/apache/lucene/index/IndexReader.java (revision 916146)
+++ src/java/org/apache/lucene/index/IndexReader.java (working copy)
@@ -66,8 +66,8 @@
make changes with the resulting IndexReader.
- NOTE : {@link
- IndexReader} instances are completely thread
+
NOTE : {@link
+ IndexReader} instances are completely thread
safe, meaning multiple threads can call any of its methods,
concurrently. If your application requires external
synchronization, you should not synchronize on the
@@ -428,8 +428,6 @@
* mutable state obeys "copy on write" semantics to ensure
* the changes are not seen by other readers.
*
- * @throws CorruptIndexException if the index is corrupt
- * @throws IOException if there is a low-level IO error
*/
@Override
public synchronized Object clone() {
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java (revision 916146)
+++ src/java/org/apache/lucene/index/IndexWriter.java (working copy)
@@ -137,8 +137,8 @@
last commit. You can also just call {@link #rollback()}
directly.
- NOTE : {@link
- IndexWriter} instances are completely thread
+
NOTE : {@link
+ IndexWriter} instances are completely thread
safe, meaning multiple threads can call any of its
methods, concurrently. If your application requires
external synchronization, you should not
Index: src/java/org/apache/lucene/index/MultiReader.java
===================================================================
--- src/java/org/apache/lucene/index/MultiReader.java (revision 916146)
+++ src/java/org/apache/lucene/index/MultiReader.java (working copy)
@@ -48,7 +48,6 @@
* left to the subreaders.
* Note that all subreaders are closed if this Multireader is closed.
* @param subReaders set of (sub)readers
- * @throws IOException
*/
public MultiReader(IndexReader... subReaders) {
initialize(subReaders, true);
@@ -61,7 +60,6 @@
* @param closeSubReaders indicates whether the subreaders should be closed
* when this MultiReader is closed
* @param subReaders set of (sub)readers
- * @throws IOException
*/
public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) {
initialize(subReaders, closeSubReaders);
Index: src/java/org/apache/lucene/index/Payload.java
===================================================================
--- src/java/org/apache/lucene/index/Payload.java (revision 916146)
+++ src/java/org/apache/lucene/index/Payload.java (working copy)
@@ -164,7 +164,7 @@
// Only copy the part of data that belongs to this Payload
if (offset == 0 && length == data.length) {
// It is the whole thing, so just clone it.
- clone.data = (byte[]) data.clone();
+ clone.data = data.clone();
}
else {
// Just get the part
Index: src/java/org/apache/lucene/index/ReusableStringReader.java
===================================================================
--- src/java/org/apache/lucene/index/ReusableStringReader.java (revision 916146)
+++ src/java/org/apache/lucene/index/ReusableStringReader.java (working copy)
@@ -53,6 +53,6 @@
}
}
@Override
- public void close() {};
+ public void close() {}
}
Index: src/java/org/apache/lucene/index/SegmentInfo.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentInfo.java (revision 916146)
+++ src/java/org/apache/lucene/index/SegmentInfo.java (working copy)
@@ -318,7 +318,7 @@
si.hasSingleNormFile = hasSingleNormFile;
si.diagnostics = new HashMap(diagnostics);
if (normGen != null) {
- si.normGen = (long[]) normGen.clone();
+ si.normGen = normGen.clone();
}
si.docStoreOffset = docStoreOffset;
si.docStoreSegment = docStoreSegment;
Index: src/java/org/apache/lucene/index/SegmentInfos.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentInfos.java (revision 916146)
+++ src/java/org/apache/lucene/index/SegmentInfos.java (working copy)
@@ -705,9 +705,7 @@
message("fallback to prior segment file '" + prevSegmentFileName + "'");
try {
Object v = doBody(prevSegmentFileName);
- if (exc != null) {
- message("success on fallback " + prevSegmentFileName);
- }
+ message("success on fallback " + prevSegmentFileName);
return v;
} catch (IOException err2) {
message("secondary Exception on '" + prevSegmentFileName + "': " + err2 + "'; will retry");
Index: src/java/org/apache/lucene/index/StoredFieldsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/StoredFieldsWriter.java (revision 916146)
+++ src/java/org/apache/lucene/index/StoredFieldsWriter.java (working copy)
@@ -63,7 +63,6 @@
if (fieldsWriter == null) {
final String docStoreSegment = docWriter.getDocStoreSegment();
if (docStoreSegment != null) {
- assert docStoreSegment != null;
fieldsWriter = new FieldsWriter(docWriter.directory,
docStoreSegment,
fieldInfos);
Index: src/java/org/apache/lucene/index/TermVectorsReader.java
===================================================================
--- src/java/org/apache/lucene/index/TermVectorsReader.java (revision 916146)
+++ src/java/org/apache/lucene/index/TermVectorsReader.java (working copy)
@@ -207,7 +207,7 @@
// make all effort to close up. Keep the first exception
// and throw it as a new one.
IOException keep = null;
- if (tvx != null) try { tvx.close(); } catch (IOException e) { if (keep == null) keep = e; }
+ if (tvx != null) try { tvx.close(); } catch (IOException e) { keep = e; }
if (tvd != null) try { tvd.close(); } catch (IOException e) { if (keep == null) keep = e; }
if (tvf != null) try { tvf.close(); } catch (IOException e) { if (keep == null) keep = e; }
if (keep != null) throw (IOException) keep.fillInStackTrace();
Index: src/java/org/apache/lucene/index/TermVectorsTermsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/TermVectorsTermsWriter.java (revision 916146)
+++ src/java/org/apache/lucene/index/TermVectorsTermsWriter.java (working copy)
@@ -149,8 +149,6 @@
if (docStoreSegment == null)
return;
- assert docStoreSegment != null;
-
// If we hit an exception while init'ing the term
// vector output files, we must abort this segment
// because those files will be in an unknown
@@ -193,8 +191,8 @@
tvd.writeVLong(pos-lastPos);
lastPos = pos;
}
- perDoc.tvf.writeTo(tvf);
- perDoc.tvf.reset();
+ perDoc.perDocTvf.writeTo(tvf);
+ perDoc.perDocTvf.reset();
perDoc.numVectorFields = 0;
}
@@ -247,14 +245,14 @@
// TODO: use something more memory efficient; for small
// docs the 1024 buffer size of RAMOutputStream wastes alot
- RAMOutputStream tvf = new RAMOutputStream();
+ RAMOutputStream perDocTvf = new RAMOutputStream();
int numVectorFields;
int[] fieldNumbers = new int[1];
long[] fieldPointers = new long[1];
void reset() {
- tvf.reset();
+ perDocTvf.reset();
numVectorFields = 0;
}
@@ -272,13 +270,13 @@
fieldPointers = ArrayUtil.grow(fieldPointers);
}
fieldNumbers[numVectorFields] = fieldNumber;
- fieldPointers[numVectorFields] = tvf.getFilePointer();
+ fieldPointers[numVectorFields] = perDocTvf.getFilePointer();
numVectorFields++;
}
@Override
public long sizeInBytes() {
- return tvf.sizeInBytes();
+ return perDocTvf.sizeInBytes();
}
@Override
Index: src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java
===================================================================
--- src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java (revision 916146)
+++ src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java (working copy)
@@ -74,8 +74,8 @@
perThread.doc = termsWriter.getPerDoc();
perThread.doc.docID = docState.docID;
assert perThread.doc.numVectorFields == 0;
- assert 0 == perThread.doc.tvf.length();
- assert 0 == perThread.doc.tvf.getFilePointer();
+ assert 0 == perThread.doc.perDocTvf.length();
+ assert 0 == perThread.doc.perDocTvf.getFilePointer();
} else {
assert perThread.doc.docID == docState.docID;
@@ -114,7 +114,7 @@
if (numPostings > maxNumPostings)
maxNumPostings = numPostings;
- final IndexOutput tvf = perThread.doc.tvf;
+ final IndexOutput tvf = perThread.doc.perDocTvf;
// This is called once, after inverting all occurrences
// of a given field in the doc. At this point we flush
@@ -216,7 +216,7 @@
p.freq = 1;
if (doVectorOffsets) {
- int startOffset = fieldState.offset + offsetAttribute.startOffset();;
+ int startOffset = fieldState.offset + offsetAttribute.startOffset();
int endOffset = fieldState.offset + offsetAttribute.endOffset();
termsHashPerField.writeVInt(1, startOffset);
@@ -239,7 +239,7 @@
p.freq++;
if (doVectorOffsets) {
- int startOffset = fieldState.offset + offsetAttribute.startOffset();;
+ int startOffset = fieldState.offset + offsetAttribute.startOffset();
int endOffset = fieldState.offset + offsetAttribute.endOffset();
termsHashPerField.writeVInt(1, startOffset - p.lastOffset);
Index: src/java/org/apache/lucene/index/TermVectorsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/TermVectorsWriter.java (revision 916146)
+++ src/java/org/apache/lucene/index/TermVectorsWriter.java (working copy)
@@ -199,7 +199,7 @@
try {
tvx.close();
} catch (IOException e) {
- if (keep == null) keep = e;
+ keep = e;
}
if (tvd != null)
try {
Index: src/java/org/apache/lucene/index/TermsHash.java
===================================================================
--- src/java/org/apache/lucene/index/TermsHash.java (revision 916146)
+++ src/java/org/apache/lucene/index/TermsHash.java (working copy)
@@ -61,7 +61,7 @@
// targets 25-50% fill factor; approximate this
// as 3X # pointers
bytesPerPosting = consumer.bytesPerPosting() + 4*DocumentsWriter.POINTER_NUM_BYTE;
- postingsFreeChunk = (int) (DocumentsWriter.BYTE_BLOCK_SIZE / bytesPerPosting);
+ postingsFreeChunk = (DocumentsWriter.BYTE_BLOCK_SIZE / bytesPerPosting);
}
@Override
Index: src/java/org/apache/lucene/index/TermsHashPerField.java
===================================================================
--- src/java/org/apache/lucene/index/TermsHashPerField.java (revision 916146)
+++ src/java/org/apache/lucene/index/TermsHashPerField.java (working copy)
@@ -351,7 +351,7 @@
// term text into textStart address
// Get the text of this term.
- final char[] tokenText = termAtt.termBuffer();;
+ final char[] tokenText = termAtt.termBuffer();
final int tokenTextLen = termAtt.termLength();
// Compute hashcode & replace any invalid UTF16 sequences
Index: src/java/org/apache/lucene/queryParser/FastCharStream.java
===================================================================
--- src/java/org/apache/lucene/queryParser/FastCharStream.java (revision 916146)
+++ src/java/org/apache/lucene/queryParser/FastCharStream.java (working copy)
@@ -103,9 +103,12 @@
}
}
+ @Deprecated
public final int getColumn() {
return bufferStart + bufferPosition;
}
+
+ @Deprecated
public final int getLine() {
return 1;
}
Index: src/java/org/apache/lucene/queryParser/ParseException.java
===================================================================
--- src/java/org/apache/lucene/queryParser/ParseException.java (revision 916146)
+++ src/java/org/apache/lucene/queryParser/ParseException.java (working copy)
@@ -96,6 +96,7 @@
* of the final stack trace, and hence the correct error message
* gets displayed.
*/
+ @Override
public String getMessage() {
if (!specialConstructor) {
return super.getMessage();
Index: src/java/org/apache/lucene/queryParser/QueryParser.java
===================================================================
--- src/java/org/apache/lucene/queryParser/QueryParser.java (revision 916146)
+++ src/java/org/apache/lucene/queryParser/QueryParser.java (working copy)
@@ -1122,7 +1122,6 @@
break;
default:
jj_la1[1] = jj_gen;
- ;
}
{if (true) return ret;}
throw new Error("Missing return statement in function");
@@ -1155,7 +1154,6 @@
break;
default:
jj_la1[3] = jj_gen;
- ;
}
{if (true) return ret;}
throw new Error("Missing return statement in function");
@@ -1196,7 +1194,6 @@
case RANGEIN_START:
case RANGEEX_START:
case NUMBER:
- ;
break;
default:
jj_la1[4] = jj_gen;
@@ -1236,7 +1233,6 @@
throw new ParseException();
}
} else {
- ;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case STAR:
@@ -1260,7 +1256,6 @@
break;
default:
jj_la1[6] = jj_gen;
- ;
}
break;
default:
@@ -1322,7 +1317,6 @@
break;
default:
jj_la1[9] = jj_gen;
- ;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case CARAT:
@@ -1335,12 +1329,10 @@
break;
default:
jj_la1[10] = jj_gen;
- ;
}
break;
default:
jj_la1[11] = jj_gen;
- ;
}
String termImage=discardEscapeChar(term.image);
if (wildcard) {
@@ -1382,7 +1374,6 @@
break;
default:
jj_la1[13] = jj_gen;
- ;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case RANGEIN_GOOP:
@@ -1404,7 +1395,6 @@
break;
default:
jj_la1[15] = jj_gen;
- ;
}
if (goop1.kind == RANGEIN_QUOTED) {
goop1.image = goop1.image.substring(1, goop1.image.length()-1);
@@ -1434,7 +1424,6 @@
break;
default:
jj_la1[17] = jj_gen;
- ;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case RANGEEX_GOOP:
@@ -1456,7 +1445,6 @@
break;
default:
jj_la1[19] = jj_gen;
- ;
}
if (goop1.kind == RANGEEX_QUOTED) {
goop1.image = goop1.image.substring(1, goop1.image.length()-1);
@@ -1475,7 +1463,6 @@
break;
default:
jj_la1[20] = jj_gen;
- ;
}
switch ((jj_ntk==-1)?jj_ntk():jj_ntk) {
case CARAT:
@@ -1484,7 +1471,6 @@
break;
default:
jj_la1[21] = jj_gen;
- ;
}
int s = phraseSlop;
Index: src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java
===================================================================
--- src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java (revision 916146)
+++ src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java (working copy)
@@ -1,42 +1,9 @@
/* Generated By:JavaCC: Do not edit this line. QueryParserTokenManager.java */
package org.apache.lucene.queryParser;
-import java.io.IOException;
-import java.io.StringReader;
-import java.text.Collator;
-import java.text.DateFormat;
-import java.util.ArrayList;
-import java.util.Calendar;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
-import java.util.Vector;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.CachingTokenFilter;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.analysis.tokenattributes.TermAttribute;
-import org.apache.lucene.document.DateField;
-import org.apache.lucene.document.DateTools;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.search.BooleanClause;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.FuzzyQuery;
-import org.apache.lucene.search.MultiTermQuery;
-import org.apache.lucene.search.MatchAllDocsQuery;
-import org.apache.lucene.search.MultiPhraseQuery;
-import org.apache.lucene.search.PhraseQuery;
-import org.apache.lucene.search.PrefixQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.TermRangeQuery;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.WildcardQuery;
-import org.apache.lucene.util.Version;
/** Token Manager. */
-public class QueryParserTokenManager implements QueryParserConstants
-{
+@SuppressWarnings("unused")
+public class QueryParserTokenManager implements QueryParserConstants {
/** Debug output. */
public java.io.PrintStream debugStream = System.out;
@@ -428,7 +395,7 @@
}
else
{
- int hiByte = (int)(curChar >> 8);
+ int hiByte = (curChar >> 8);
int i1 = hiByte >> 6;
long l1 = 1L << (hiByte & 077);
int i2 = (curChar & 0xff) >> 6;
@@ -674,7 +641,7 @@
}
else
{
- int hiByte = (int)(curChar >> 8);
+ int hiByte = (curChar >> 8);
int i1 = hiByte >> 6;
long l1 = 1L << (hiByte & 077);
int i2 = (curChar & 0xff) >> 6;
@@ -781,7 +748,7 @@
}
else
{
- int hiByte = (int)(curChar >> 8);
+ int hiByte = (curChar >> 8);
int i1 = hiByte >> 6;
long l1 = 1L << (hiByte & 077);
int i2 = (curChar & 0xff) >> 6;
@@ -952,7 +919,7 @@
}
else
{
- int hiByte = (int)(curChar >> 8);
+ int hiByte = (curChar >> 8);
int i1 = hiByte >> 6;
long l1 = 1L << (hiByte & 077);
int i2 = (curChar & 0xff) >> 6;
Index: src/java/org/apache/lucene/queryParser/Token.java
===================================================================
--- src/java/org/apache/lucene/queryParser/Token.java (revision 916146)
+++ src/java/org/apache/lucene/queryParser/Token.java (working copy)
@@ -90,6 +90,7 @@
/**
* Returns the image.
*/
+ @Override
public String toString()
{
return image;
Index: src/java/org/apache/lucene/queryParser/TokenMgrError.java
===================================================================
--- src/java/org/apache/lucene/queryParser/TokenMgrError.java (revision 916146)
+++ src/java/org/apache/lucene/queryParser/TokenMgrError.java (working copy)
@@ -115,7 +115,8 @@
*
* from this method for such cases in the release version of your parser.
*/
- public String getMessage() {
+ @Override
+ public String getMessage() {
return super.getMessage();
}
Index: src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
===================================================================
--- src/java/org/apache/lucene/search/DisjunctionMaxScorer.java (revision 916146)
+++ src/java/org/apache/lucene/search/DisjunctionMaxScorer.java (working copy)
@@ -84,7 +84,7 @@
return doc;
}
- /** Determine the current document score. Initially invalid, until {@link #next()} is called the first time.
+ /** Determine the current document score. Initially invalid, until {@link #nextDoc()} is called the first time.
* @return the score of the current generated document
*/
@Override
Index: src/java/org/apache/lucene/search/DisjunctionSumScorer.java
===================================================================
--- src/java/org/apache/lucene/search/DisjunctionSumScorer.java (revision 916146)
+++ src/java/org/apache/lucene/search/DisjunctionSumScorer.java (working copy)
@@ -188,7 +188,7 @@
}
/** Returns the score of the current document matching the query.
- * Initially invalid, until {@link #next()} is called the first time.
+ * Initially invalid, until {@link #nextDoc()} is called the first time.
*/
@Override
public float score() throws IOException { return currentScore; }
@@ -199,7 +199,7 @@
}
/** Returns the number of subscorers matching the current document.
- * Initially invalid, until {@link #next()} is called the first time.
+ * Initially invalid, until {@link #nextDoc()} is called the first time.
*/
public int nrMatchers() {
return nrMatchers;
Index: src/java/org/apache/lucene/search/DocIdSetIterator.java
===================================================================
--- src/java/org/apache/lucene/search/DocIdSetIterator.java (revision 916146)
+++ src/java/org/apache/lucene/search/DocIdSetIterator.java (working copy)
@@ -28,8 +28,6 @@
*/
public abstract class DocIdSetIterator {
- private int doc = -1;
-
/**
* When returned by {@link #nextDoc()}, {@link #advance(int)} and
* {@link #docID()} it means there are no more docs in the iterator.
Index: src/java/org/apache/lucene/search/FieldCache.java
===================================================================
--- src/java/org/apache/lucene/search/FieldCache.java (revision 916146)
+++ src/java/org/apache/lucene/search/FieldCache.java (working copy)
@@ -501,7 +501,7 @@
public static abstract class CacheEntry {
public abstract Object getReaderKey();
public abstract String getFieldName();
- public abstract Class getCacheType();
+ public abstract Class> getCacheType();
public abstract Object getCustom();
public abstract Object getValue();
private String size = null;
Index: src/java/org/apache/lucene/search/FieldCacheImpl.java
===================================================================
--- src/java/org/apache/lucene/search/FieldCacheImpl.java (revision 916146)
+++ src/java/org/apache/lucene/search/FieldCacheImpl.java (working copy)
@@ -296,7 +296,7 @@
}
return retArray;
}
- };
+ }
// inherit javadocs
public short[] getShorts (IndexReader reader, String field) throws IOException {
@@ -343,7 +343,7 @@
}
return retArray;
}
- };
+ }
// inherit javadocs
public int[] getInts (IndexReader reader, String field) throws IOException {
@@ -398,7 +398,7 @@
retArray = new int[reader.maxDoc()];
return retArray;
}
- };
+ }
// inherit javadocs
@@ -456,7 +456,7 @@
retArray = new float[reader.maxDoc()];
return retArray;
}
- };
+ }
public long[] getLongs(IndexReader reader, String field) throws IOException {
@@ -510,7 +510,7 @@
retArray = new long[reader.maxDoc()];
return retArray;
}
- };
+ }
// inherit javadocs
public double[] getDoubles(IndexReader reader, String field)
@@ -566,7 +566,7 @@
retArray = new double[reader.maxDoc()];
return retArray;
}
- };
+ }
// inherit javadocs
public String[] getStrings(IndexReader reader, String field)
@@ -602,7 +602,7 @@
}
return retArray;
}
- };
+ }
// inherit javadocs
public StringIndex getStringIndex(IndexReader reader, String field)
@@ -666,7 +666,7 @@
StringIndex value = new StringIndex (retArray, mterms);
return value;
}
- };
+ }
private volatile PrintStream infoStream;
Index: src/java/org/apache/lucene/search/FieldComparator.java
===================================================================
--- src/java/org/apache/lucene/search/FieldComparator.java (revision 916146)
+++ src/java/org/apache/lucene/search/FieldComparator.java (working copy)
@@ -160,7 +160,7 @@
* @param slot the value
* @return value in this slot upgraded to Comparable
*/
- public abstract Comparable value(int slot);
+ public abstract Comparable> value(int slot);
/** Parses field's values as byte (using {@link
* FieldCache#getBytes} and sorts by ascending value */
@@ -203,7 +203,7 @@
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Byte.valueOf(values[slot]);
}
}
@@ -249,7 +249,7 @@
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Integer.valueOf(docIDs[slot]);
}
}
@@ -310,7 +310,7 @@
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Double.valueOf(values[slot]);
}
}
@@ -375,7 +375,7 @@
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Float.valueOf(values[slot]);
}
}
@@ -444,7 +444,7 @@
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Integer.valueOf(values[slot]);
}
}
@@ -509,7 +509,7 @@
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Long.valueOf(values[slot]);
}
}
@@ -564,7 +564,7 @@
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Float.valueOf(scores[slot]);
}
}
@@ -610,7 +610,7 @@
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Short.valueOf(values[slot]);
}
}
@@ -676,7 +676,7 @@
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return values[slot];
}
}
@@ -825,7 +825,7 @@
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return values[slot];
}
@@ -904,7 +904,7 @@
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return values[slot];
}
}
Index: src/java/org/apache/lucene/search/FieldValueHitQueue.java
===================================================================
--- src/java/org/apache/lucene/search/FieldValueHitQueue.java (revision 916146)
+++ src/java/org/apache/lucene/search/FieldValueHitQueue.java (working copy)
@@ -203,7 +203,7 @@
*/
FieldDoc fillFields(final Entry entry) {
final int n = comparators.length;
- final Comparable[] fields = new Comparable[n];
+ final Comparable>[] fields = new Comparable[n];
for (int i = 0; i < n; ++i) {
fields[i] = comparators[i].value(entry.slot);
}
Index: src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java
===================================================================
--- src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java (revision 916146)
+++ src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java (working copy)
@@ -45,7 +45,7 @@
* Validation method to determine whether a docid should be in the result set.
* @param doc docid to be tested
* @return true if input docid should be in the result set, false otherwise.
- * @see #FilteredDocIdSetIterator(DocIdSetIterator).
+ * @see #FilteredDocIdSetIterator(DocIdSetIterator)
*/
abstract protected boolean match(int doc) throws IOException;
Index: src/java/org/apache/lucene/search/IndexSearcher.java
===================================================================
--- src/java/org/apache/lucene/search/IndexSearcher.java (revision 916146)
+++ src/java/org/apache/lucene/search/IndexSearcher.java (working copy)
@@ -36,8 +36,8 @@
* or {@link #search(Query,Filter,int)} methods. For performance reasons it is
* recommended to open only one IndexSearcher and use it for all of your searches.
*
- * NOTE : {@link
- * IndexSearcher} instances are completely
+ *
NOTE : {@link
+ * IndexSearcher} instances are completely
* thread safe, meaning multiple threads can call any of its
* methods, concurrently. If your application requires
* external synchronization, you should not
@@ -55,9 +55,9 @@
/** Creates a searcher searching the index in the named
* directory, with readOnly=true
+ * @param path directory where IndexReader will be opened
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
- * @param path directory where IndexReader will be opened
*/
public IndexSearcher(Directory path) throws CorruptIndexException, IOException {
this(IndexReader.open(path, true), true);
@@ -68,11 +68,11 @@
* gives much better concurrent performance, unless you
* intend to do write operations (delete documents or
* change norms) with the underlying IndexReader.
- * @throws CorruptIndexException if the index is corrupt
- * @throws IOException if there is a low-level IO error
* @param path directory where IndexReader will be opened
* @param readOnly if true, the underlying IndexReader
* will be opened readOnly
+ * @throws CorruptIndexException if the index is corrupt
+ * @throws IOException if there is a low-level IO error
*/
public IndexSearcher(Directory path, boolean readOnly) throws CorruptIndexException, IOException {
this(IndexReader.open(path, readOnly), true);
Index: src/java/org/apache/lucene/search/ReqExclScorer.java
===================================================================
--- src/java/org/apache/lucene/search/ReqExclScorer.java (revision 916146)
+++ src/java/org/apache/lucene/search/ReqExclScorer.java (working copy)
@@ -95,7 +95,7 @@
}
/** Returns the score of the current document matching the query.
- * Initially invalid, until {@link #next()} is called the first time.
+ * Initially invalid, until {@link #nextDoc()} is called the first time.
* @return The score of the required scorer.
*/
@Override
Index: src/java/org/apache/lucene/search/ReqOptSumScorer.java
===================================================================
--- src/java/org/apache/lucene/search/ReqOptSumScorer.java (revision 916146)
+++ src/java/org/apache/lucene/search/ReqOptSumScorer.java (working copy)
@@ -59,7 +59,7 @@
}
/** Returns the score of the current document matching the query.
- * Initially invalid, until {@link #next()} is called the first time.
+ * Initially invalid, until {@link #nextDoc()} is called the first time.
* @return The score of the required scorer, eventually increased by the score
* of the optional scorer when it also matches the current document.
*/
Index: src/java/org/apache/lucene/search/Similarity.java
===================================================================
--- src/java/org/apache/lucene/search/Similarity.java (revision 916146)
+++ src/java/org/apache/lucene/search/Similarity.java (working copy)
@@ -402,7 +402,7 @@
*
* The sum of squared weights (of the query terms) is
* computed by the query {@link org.apache.lucene.search.Weight} object.
- * For example, a {@link org.apache.lucene.search.BooleanQuery boolean query}
+ * For example, a {@link org.apache.lucene.search.BooleanQuery}
* computes this value as:
*
*
@@ -609,7 +609,7 @@
* @return the calculated float norm
*/
public float computeNorm(String field, FieldInvertState state) {
- return (float) (state.getBoost() * lengthNorm(field, state.getLength()));
+ return (state.getBoost() * lengthNorm(field, state.getLength()));
}
/** Computes the normalization value for a field given the total number of
Index: src/java/org/apache/lucene/search/function/ByteFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/ByteFieldSource.java (revision 916146)
+++ src/java/org/apache/lucene/search/function/ByteFieldSource.java (working copy)
@@ -41,7 +41,7 @@
* best to switch your application to pass only atomic
* (single segment) readers to this API. Alternatively, for
* a short-term fix, you could wrap your ValueSource using
- * {@link MultiValueSource}, which costs more CPU per lookup
+ * MultiValueSource, which costs more CPU per lookup
* but will not consume double the FieldCache RAM.
*/
public class ByteFieldSource extends FieldCacheSource {
@@ -76,7 +76,7 @@
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */
@Override
public float floatVal(int doc) {
- return (float) arr[doc];
+ return arr[doc];
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#intVal(int) */
@Override
Index: src/java/org/apache/lucene/search/function/CustomScoreProvider.java
===================================================================
--- src/java/org/apache/lucene/search/function/CustomScoreProvider.java (revision 916146)
+++ src/java/org/apache/lucene/search/function/CustomScoreProvider.java (working copy)
@@ -20,7 +20,6 @@
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.ComplexExplanation;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.FieldCache; // for javadocs
Index: src/java/org/apache/lucene/search/function/CustomScoreQuery.java
===================================================================
--- src/java/org/apache/lucene/search/function/CustomScoreQuery.java (revision 916146)
+++ src/java/org/apache/lucene/search/function/CustomScoreQuery.java (working copy)
@@ -294,7 +294,6 @@
private final float qWeight;
private Scorer subQueryScorer;
private Scorer[] valSrcScorers;
- private IndexReader reader;
private final CustomScoreProvider provider;
private float vScores[]; // reused in score() to avoid allocating this array for each doc
@@ -305,7 +304,6 @@
this.qWeight = w.getValue();
this.subQueryScorer = subQueryScorer;
this.valSrcScorers = valSrcScorers;
- this.reader = reader;
this.vScores = new float[valSrcScorers.length];
this.provider = CustomScoreQuery.this.getCustomScoreProvider(reader);
}
Index: src/java/org/apache/lucene/search/function/DocValues.java
===================================================================
--- src/java/org/apache/lucene/search/function/DocValues.java (revision 916146)
+++ src/java/org/apache/lucene/search/function/DocValues.java (working copy)
@@ -70,7 +70,7 @@
* @param doc document whose double value is requested.
*/
public double doubleVal(int doc) {
- return (double) floatVal(doc);
+ return floatVal(doc);
}
/**
Index: src/java/org/apache/lucene/search/function/FloatFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/FloatFieldSource.java (revision 916146)
+++ src/java/org/apache/lucene/search/function/FloatFieldSource.java (working copy)
@@ -41,7 +41,7 @@
* best to switch your application to pass only atomic
* (single segment) readers to this API. Alternatively, for
* a short-term fix, you could wrap your ValueSource using
- * {@link MultiValueSource}, which costs more CPU per lookup
+ * MultiValueSource, which costs more CPU per lookup
* but will not consume double the FieldCache RAM.
*/
public class FloatFieldSource extends FieldCacheSource {
Index: src/java/org/apache/lucene/search/function/IntFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/IntFieldSource.java (revision 916146)
+++ src/java/org/apache/lucene/search/function/IntFieldSource.java (working copy)
@@ -41,7 +41,7 @@
* best to switch your application to pass only atomic
* (single segment) readers to this API. Alternatively, for
* a short-term fix, you could wrap your ValueSource using
- * {@link MultiValueSource}, which costs more CPU per lookup
+ * MultiValueSource, which costs more CPU per lookup
* but will not consume double the FieldCache RAM.
*/
public class IntFieldSource extends FieldCacheSource {
@@ -76,7 +76,7 @@
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */
@Override
public float floatVal(int doc) {
- return (float) arr[doc];
+ return arr[doc];
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#intVal(int) */
@Override
Index: src/java/org/apache/lucene/search/function/OrdFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/OrdFieldSource.java (revision 916146)
+++ src/java/org/apache/lucene/search/function/OrdFieldSource.java (working copy)
@@ -74,7 +74,7 @@
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */
@Override
public float floatVal(int doc) {
- return (float)arr[doc];
+ return arr[doc];
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#strVal(int) */
@Override
Index: src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java (revision 916146)
+++ src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java (working copy)
@@ -79,7 +79,7 @@
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */
@Override
public float floatVal(int doc) {
- return (float)(end - arr[doc]);
+ return (end - arr[doc]);
}
/* (non-Javadoc) @see org.apache.lucene.search.function.DocValues#intVal(int) */
@Override
Index: src/java/org/apache/lucene/search/function/ShortFieldSource.java
===================================================================
--- src/java/org/apache/lucene/search/function/ShortFieldSource.java (revision 916146)
+++ src/java/org/apache/lucene/search/function/ShortFieldSource.java (working copy)
@@ -41,7 +41,7 @@
* best to switch your application to pass only atomic
* (single segment) readers to this API. Alternatively, for
* a short-term fix, you could wrap your ValueSource using
- * {@link MultiValueSource}, which costs more CPU per lookup
+ * MultiValueSource, which costs more CPU per lookup
* but will not consume double the FieldCache RAM.
*/
public class ShortFieldSource extends FieldCacheSource {
@@ -76,7 +76,7 @@
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */
@Override
public float floatVal(int doc) {
- return (float) arr[doc];
+ return arr[doc];
}
/*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#intVal(int) */
@Override
Index: src/java/org/apache/lucene/search/function/ValueSourceQuery.java
===================================================================
--- src/java/org/apache/lucene/search/function/ValueSourceQuery.java (revision 916146)
+++ src/java/org/apache/lucene/search/function/ValueSourceQuery.java (working copy)
@@ -125,7 +125,6 @@
* be used. (assuming field is indexed for this doc, with a single token.)
*/
private class ValueSourceScorer extends Scorer {
- private final ValueSourceWeight weight;
private final float qWeight;
private final DocValues vals;
private final TermDocs termDocs;
@@ -134,8 +133,7 @@
// constructor
private ValueSourceScorer(Similarity similarity, IndexReader reader, ValueSourceWeight w) throws IOException {
super(similarity);
- this.weight = w;
- this.qWeight = w.getValue();
+ qWeight = w.getValue();
// this is when/where the values are first created.
vals = valSrc.getValues(reader);
termDocs = reader.termDocs(null);
Index: src/java/org/apache/lucene/store/IndexOutput.java
===================================================================
--- src/java/org/apache/lucene/store/IndexOutput.java (revision 916146)
+++ src/java/org/apache/lucene/store/IndexOutput.java (working copy)
@@ -119,7 +119,7 @@
throws IOException {
final int end = start + length;
for (int i = start; i < end; i++) {
- final int code = (int)s.charAt(i);
+ final int code = s.charAt(i);
if (code >= 0x01 && code <= 0x7F)
writeByte((byte)code);
else if (((code >= 0x80) && (code <= 0x7FF)) || code == 0) {
@@ -145,7 +145,7 @@
throws IOException {
final int end = start + length;
for (int i = start; i < end; i++) {
- final int code = (int)s[i];
+ final int code = s[i];
if (code >= 0x01 && code <= 0x7F)
writeByte((byte)code);
else if (((code >= 0x80) && (code <= 0x7FF)) || code == 0) {
@@ -210,7 +210,7 @@
* undefined. Otherwise the file is truncated.
* @param length file length
*/
- public void setLength(long length) throws IOException {};
+ public void setLength(long length) throws IOException {}
public void writeStringStringMap(Map map) throws IOException {
if (map == null) {
Index: src/java/org/apache/lucene/store/LockObtainFailedException.java
===================================================================
--- src/java/org/apache/lucene/store/LockObtainFailedException.java (revision 916146)
+++ src/java/org/apache/lucene/store/LockObtainFailedException.java (working copy)
@@ -24,7 +24,7 @@
* could not be acquired. This
* happens when a writer tries to open an index
* that another writer already has open.
- * @see Lock#obtain(long).
+ * @see Lock#obtain(long)
*/
public class LockObtainFailedException extends IOException {
public LockObtainFailedException(String message) {
Index: src/java/org/apache/lucene/store/LockReleaseFailedException.java
===================================================================
--- src/java/org/apache/lucene/store/LockReleaseFailedException.java (revision 916146)
+++ src/java/org/apache/lucene/store/LockReleaseFailedException.java (working copy)
@@ -22,7 +22,7 @@
/**
* This exception is thrown when the write.lock
* could not be released.
- * @see Lock#release().
+ * @see Lock#release()
*/
public class LockReleaseFailedException extends IOException {
public LockReleaseFailedException(String message) {
Index: src/java/org/apache/lucene/store/MMapDirectory.java
===================================================================
--- src/java/org/apache/lucene/store/MMapDirectory.java (revision 916146)
+++ src/java/org/apache/lucene/store/MMapDirectory.java (working copy)
@@ -375,7 +375,7 @@
RuntimeException newException = new RuntimeException(ioe);
newException.initCause(ioe);
throw newException;
- };
+ }
return clone;
}
@@ -404,7 +404,7 @@
File f = new File(getDirectory(), name);
RandomAccessFile raf = new RandomAccessFile(f, "r");
try {
- return (raf.length() <= (long) maxBBuf)
+ return (raf.length() <= maxBBuf)
? (IndexInput) new MMapIndexInput(raf)
: (IndexInput) new MultiMMapIndexInput(raf, maxBBuf);
} finally {
Index: src/java/org/apache/lucene/store/NoLockFactory.java
===================================================================
--- src/java/org/apache/lucene/store/NoLockFactory.java (revision 916146)
+++ src/java/org/apache/lucene/store/NoLockFactory.java (working copy)
@@ -52,8 +52,8 @@
}
@Override
- public void clearLock(String lockName) {};
-};
+ public void clearLock(String lockName) {}
+}
class NoLock extends Lock {
@Override
Index: src/java/org/apache/lucene/store/SimpleFSLockFactory.java
===================================================================
--- src/java/org/apache/lucene/store/SimpleFSLockFactory.java (revision 916146)
+++ src/java/org/apache/lucene/store/SimpleFSLockFactory.java (working copy)
@@ -100,7 +100,7 @@
}
}
}
-};
+}
class SimpleFSLock extends Lock {
Index: src/java/org/apache/lucene/store/SingleInstanceLockFactory.java
===================================================================
--- src/java/org/apache/lucene/store/SingleInstanceLockFactory.java (revision 916146)
+++ src/java/org/apache/lucene/store/SingleInstanceLockFactory.java (working copy)
@@ -51,7 +51,7 @@
}
}
}
-};
+}
class SingleInstanceLock extends Lock {
Index: src/java/org/apache/lucene/util/AverageGuessMemoryModel.java
===================================================================
--- src/java/org/apache/lucene/util/AverageGuessMemoryModel.java (revision 916146)
+++ src/java/org/apache/lucene/util/AverageGuessMemoryModel.java (working copy)
@@ -26,7 +26,7 @@
*/
public class AverageGuessMemoryModel extends MemoryModel {
// best guess primitive sizes
- private final Map sizes = new IdentityHashMap() {
+ private final Map,Integer> sizes = new IdentityHashMap,Integer>() {
{
put(boolean.class, Integer.valueOf(1));
put(byte.class, Integer.valueOf(1));
@@ -63,7 +63,7 @@
* @see org.apache.lucene.util.MemoryModel#getPrimitiveSize(java.lang.Class)
*/
@Override
- public int getPrimitiveSize(Class clazz) {
+ public int getPrimitiveSize(Class> clazz) {
return sizes.get(clazz).intValue();
}
Index: src/java/org/apache/lucene/util/IndexableBinaryStringTools.java
===================================================================
--- src/java/org/apache/lucene/util/IndexableBinaryStringTools.java (revision 916146)
+++ src/java/org/apache/lucene/util/IndexableBinaryStringTools.java (working copy)
@@ -104,7 +104,7 @@
public static int getEncodedLength(byte[] inputArray, int inputOffset,
int inputLength) {
// Use long for intermediaries to protect against overflow
- return (int)(((long)inputLength * 8L + 14L) / 15L) + 1;
+ return (int)((inputLength * 8L + 14L) / 15L) + 1;
}
Index: src/java/org/apache/lucene/util/MemoryModel.java
===================================================================
--- src/java/org/apache/lucene/util/MemoryModel.java (revision 916146)
+++ src/java/org/apache/lucene/util/MemoryModel.java (working copy)
@@ -38,7 +38,7 @@
* short, double, int
* @return the size in bytes of given primitive Class
*/
- public abstract int getPrimitiveSize(Class clazz);
+ public abstract int getPrimitiveSize(Class> clazz);
/**
* @return size of reference
Index: src/java/org/apache/lucene/util/NumericUtils.java
===================================================================
--- src/java/org/apache/lucene/util/NumericUtils.java (revision 916146)
+++ src/java/org/apache/lucene/util/NumericUtils.java (working copy)
@@ -210,10 +210,10 @@
if (ch>0x7f) {
throw new NumberFormatException(
"Invalid prefixCoded numerical value representation (char "+
- Integer.toHexString((int)ch)+" at position "+i+" is invalid)"
+ Integer.toHexString(ch)+" at position "+i+" is invalid)"
);
}
- sortableBits |= (long)ch;
+ sortableBits |= ch;
}
return (sortableBits << shift) ^ 0x8000000000000000L;
}
@@ -237,10 +237,10 @@
if (ch>0x7f) {
throw new NumberFormatException(
"Invalid prefixCoded numerical value representation (char "+
- Integer.toHexString((int)ch)+" at position "+i+" is invalid)"
+ Integer.toHexString(ch)+" at position "+i+" is invalid)"
);
}
- sortableBits |= (int)ch;
+ sortableBits |= ch;
}
return (sortableBits << shift) ^ 0x80000000;
}
@@ -346,7 +346,7 @@
public static void splitIntRange(final IntRangeBuilder builder,
final int precisionStep, final int minBound, final int maxBound
) {
- splitRange(builder, 32, precisionStep, (long)minBound, (long)maxBound);
+ splitRange(builder, 32, precisionStep, minBound, maxBound);
}
/** This helper does the splitting for both 32 and 64 bit. */
Index: src/java/org/apache/lucene/util/OpenBitSet.java
===================================================================
--- src/java/org/apache/lucene/util/OpenBitSet.java (revision 916146)
+++ src/java/org/apache/lucene/util/OpenBitSet.java (working copy)
@@ -642,7 +642,7 @@
public Object clone() {
try {
OpenBitSet obs = (OpenBitSet)super.clone();
- obs.bits = (long[]) obs.bits.clone(); // hopefully an array clone is as fast(er) than arraycopy
+ obs.bits = obs.bits.clone(); // hopefully an array clone is as fast(er) than arraycopy
return obs;
} catch (CloneNotSupportedException e) {
throw new RuntimeException(e);
Index: src/java/org/apache/lucene/util/Parameter.java
===================================================================
--- src/java/org/apache/lucene/util/Parameter.java (revision 916146)
+++ src/java/org/apache/lucene/util/Parameter.java (working copy)
@@ -30,16 +30,11 @@
*/
@Deprecated
@SuppressWarnings("serial")
-public abstract class Parameter implements Serializable
-{
+public abstract class Parameter implements Serializable {
static Map allParameters = new HashMap();
private String name;
- private Parameter() {
- // typesafe enum pattern, no public constructor
- }
-
protected Parameter(String name) {
// typesafe enum pattern, no public constructor
this.name = name;
Index: src/java/org/apache/lucene/util/RamUsageEstimator.java
===================================================================
--- src/java/org/apache/lucene/util/RamUsageEstimator.java (revision 916146)
+++ src/java/org/apache/lucene/util/RamUsageEstimator.java (working copy)
@@ -124,7 +124,7 @@
// add to seen
seen.put(obj, null);
- Class clazz = obj.getClass();
+ Class> clazz = obj.getClass();
if (clazz.isArray()) {
return sizeOfArray(obj);
}
@@ -167,7 +167,7 @@
return 0;
}
long size = arraySize;
- Class arrayElementClazz = obj.getClass().getComponentType();
+ Class> arrayElementClazz = obj.getClass().getComponentType();
if (arrayElementClazz.isPrimitive()) {
size += len * memoryModel.getPrimitiveSize(arrayElementClazz);
} else {
Index: src/java/org/apache/lucene/util/UnicodeUtil.java
===================================================================
--- src/java/org/apache/lucene/util/UnicodeUtil.java (revision 916146)
+++ src/java/org/apache/lucene/util/UnicodeUtil.java (working copy)
@@ -118,7 +118,7 @@
while(true) {
- final int code = (int) source[i++];
+ final int code = source[i++];
if (upto+4 > out.length) {
out = result.result = ArrayUtil.grow(out, upto+4);
@@ -139,7 +139,7 @@
// surrogate pair
// confirm valid high surrogate
if (code < 0xDC00 && source[i] != 0xffff) {
- int utf32 = (int) source[i];
+ int utf32 = source[i];
// confirm valid low surrogate and write pair
if (utf32 >= 0xDC00 && utf32 <= 0xDFFF) {
utf32 = ((code - 0xD7C0) << 10) + (utf32 & 0x3FF);
@@ -174,7 +174,7 @@
while(i < end) {
- final int code = (int) source[i++];
+ final int code = source[i++];
if (upto+4 > out.length) {
out = result.result = ArrayUtil.grow(out, upto+4);
@@ -192,7 +192,7 @@
// surrogate pair
// confirm valid high surrogate
if (code < 0xDC00 && i < end && source[i] != 0xffff) {
- int utf32 = (int) source[i];
+ int utf32 = source[i];
// confirm valid low surrogate and write pair
if (utf32 >= 0xDC00 && utf32 <= 0xDFFF) {
utf32 = ((code - 0xD7C0) << 10) + (utf32 & 0x3FF);
@@ -225,7 +225,7 @@
int upto = 0;
for(int i=offset;i out.length) {
out = result.result = ArrayUtil.grow(out, upto+4);
@@ -243,7 +243,7 @@
// surrogate pair
// confirm valid high surrogate
if (code < 0xDC00 && (i < end-1)) {
- int utf32 = (int) s.charAt(i+1);
+ int utf32 = s.charAt(i+1);
// confirm valid low surrogate and write pair
if (utf32 >= 0xDC00 && utf32 <= 0xDFFF) {
utf32 = ((code - 0xD7C0) << 10) + (utf32 & 0x3FF);
Index: src/test/org/apache/lucene/TestMergeSchedulerExternal.java
===================================================================
--- src/test/org/apache/lucene/TestMergeSchedulerExternal.java (revision 916146)
+++ src/test/org/apache/lucene/TestMergeSchedulerExternal.java (working copy)
@@ -20,7 +20,6 @@
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.store.MockRAMDirectory;
-import org.apache.lucene.store.Directory;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.MergePolicy;
import org.apache.lucene.index.ConcurrentMergeScheduler;
@@ -38,14 +37,6 @@
volatile boolean mergeThreadCreated;
volatile boolean excCalled;
- private class MyMergeException extends RuntimeException {
- Directory dir;
- public MyMergeException(Throwable exc, Directory dir) {
- super(exc);
- this.dir = dir;
- }
- }
-
private class MyMergeScheduler extends ConcurrentMergeScheduler {
private class MyMergeThread extends ConcurrentMergeScheduler.MergeThread {
@@ -99,7 +90,7 @@
MyMergeScheduler ms = new MyMergeScheduler();
writer.setMergeScheduler(ms);
writer.setMaxBufferedDocs(2);
- writer.setRAMBufferSizeMB(writer.DISABLE_AUTO_FLUSH);
+ writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
for(int i=0;i<20;i++)
writer.addDocument(doc);
Index: src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java (revision 916146)
+++ src/test/org/apache/lucene/analysis/TestASCIIFoldingFilter.java (working copy)
@@ -1892,7 +1892,7 @@
ASCIIFoldingFilter filter = new ASCIIFoldingFilter(stream);
TermAttribute termAtt = filter.getAttribute(TermAttribute.class);
Iterator expectedIter = expectedOutputTokens.iterator();
- while (expectedIter.hasNext()) {;
+ while (expectedIter.hasNext()) {
assertTermEquals(expectedIter.next(), filter, termAtt);
}
assertFalse(filter.incrementToken());
Index: src/test/org/apache/lucene/analysis/TestAnalyzers.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestAnalyzers.java (revision 916146)
+++ src/test/org/apache/lucene/analysis/TestAnalyzers.java (working copy)
@@ -107,6 +107,7 @@
// LUCENE-1150: Just a compile time test, to ensure the
// StandardAnalyzer constants remain publicly accessible
+ @SuppressWarnings("unused")
public void _testStandardConstants() {
int x = StandardTokenizer.ALPHANUM;
x = StandardTokenizer.APOSTROPHE;
Index: src/test/org/apache/lucene/analysis/TestCharArrayMap.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestCharArrayMap.java (revision 916146)
+++ src/test/org/apache/lucene/analysis/TestCharArrayMap.java (working copy)
@@ -76,7 +76,8 @@
int n=0;
for (Object o : cs) {
assertTrue(cm.containsKey(o));
- assertTrue(cm.containsKey((char[]) o));
+ char[] co = (char[]) o;
+ assertTrue(cm.containsKey(co, 0, co.length));
n++;
}
assertEquals(hm.size(), n);
Index: src/test/org/apache/lucene/analysis/TestCharArraySet.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestCharArraySet.java (revision 916146)
+++ src/test/org/apache/lucene/analysis/TestCharArraySet.java (working copy)
@@ -341,6 +341,7 @@
setCaseSensitive.addAll(Arrays.asList(TEST_STOP_WORDS));
setCaseSensitive.add(Integer.valueOf(1));
+ // This should use the deprecated methods, because it checks a bw compatibility.
CharArraySet copy = CharArraySet.copy(setIngoreCase);
CharArraySet copyCaseSens = CharArraySet.copy(setCaseSensitive);
@@ -474,8 +475,8 @@
for (String stopword : TEST_STOP_WORDS) {
assertFalse(CharArraySet.EMPTY_SET.contains(stopword));
}
+ assertFalse(CharArraySet.EMPTY_SET.contains("foo"));
assertFalse(CharArraySet.EMPTY_SET.contains((Object) "foo"));
- assertFalse(CharArraySet.EMPTY_SET.contains((Object) "foo".toCharArray()));
assertFalse(CharArraySet.EMPTY_SET.contains("foo".toCharArray(),0,3));
}
Index: src/test/org/apache/lucene/analysis/TestCharTokenizers.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestCharTokenizers.java (revision 916146)
+++ src/test/org/apache/lucene/analysis/TestCharTokenizers.java (working copy)
@@ -172,6 +172,7 @@
return Character.isLetter(c);
}
+ @Deprecated
@Override
protected boolean isTokenChar(char c) {
return Character.isLetter(c);
@@ -183,6 +184,7 @@
super(matchVersion, input);
}
+ @Deprecated
@Override
protected char normalize(char c) {
return c;
@@ -200,6 +202,7 @@
super(matchVersion, input);
}
+ @Deprecated
@Override
protected char normalize(char c) {
return c;
@@ -215,6 +218,7 @@
return Character.isLetter(c);
}
+ @Deprecated
@Override
protected boolean isTokenChar(char c) {
return Character.isLetter(c);
Index: src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java (revision 916146)
+++ src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java (working copy)
@@ -38,7 +38,7 @@
private IndexSearcher searcher;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory,
Index: src/test/org/apache/lucene/analysis/TestMappingCharFilter.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestMappingCharFilter.java (revision 916146)
+++ src/test/org/apache/lucene/analysis/TestMappingCharFilter.java (working copy)
@@ -24,7 +24,7 @@
NormalizeCharMap normMap;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
normMap = new NormalizeCharMap();
Index: src/test/org/apache/lucene/analysis/TestToken.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestToken.java (revision 916146)
+++ src/test/org/apache/lucene/analysis/TestToken.java (working copy)
@@ -35,7 +35,6 @@
Token t = new Token();
char[] content = "hello".toCharArray();
t.setTermBuffer(content, 0, content.length);
- char[] buf = t.termBuffer();
assertNotSame(t.termBuffer(), content);
assertEquals("hello", t.term());
assertEquals("word", t.type());
Index: src/test/org/apache/lucene/collation/CollationTestBase.java
===================================================================
--- src/test/org/apache/lucene/collation/CollationTestBase.java (revision 916146)
+++ src/test/org/apache/lucene/collation/CollationTestBase.java (working copy)
@@ -40,10 +40,7 @@
import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
-import java.nio.CharBuffer;
-import java.nio.ByteBuffer;
-
public class CollationTestBase extends LuceneTestCase {
protected String firstRangeBeginningOriginal = "\u062F";
@@ -60,13 +57,11 @@
* @return The encoded collation key for the original String
*/
protected String encodeCollationKey(byte[] keyBits) {
- ByteBuffer begBuf = ByteBuffer.wrap(keyBits);
// Ensure that the backing char[] array is large enough to hold the encoded
// Binary String
- char[] encodedBegArray
- = new char[IndexableBinaryStringTools.getEncodedLength(begBuf)];
- CharBuffer encodedBegBuf = CharBuffer.wrap(encodedBegArray);
- IndexableBinaryStringTools.encode(begBuf, encodedBegBuf);
+ int encodedLength = IndexableBinaryStringTools.getEncodedLength(keyBits, 0, keyBits.length);
+ char[] encodedBegArray = new char[encodedLength];
+ IndexableBinaryStringTools.encode(keyBits, 0, keyBits.length, encodedBegArray, 0, encodedLength);
return new String(encodedBegArray);
}
Index: src/test/org/apache/lucene/collation/TestCollationKeyFilter.java
===================================================================
--- src/test/org/apache/lucene/collation/TestCollationKeyFilter.java (revision 916146)
+++ src/test/org/apache/lucene/collation/TestCollationKeyFilter.java (working copy)
@@ -46,16 +46,16 @@
public class TestAnalyzer extends Analyzer {
- private Collator collator;
+ private Collator _collator;
TestAnalyzer(Collator collator) {
- this.collator = collator;
+ _collator = collator;
}
@Override
public TokenStream tokenStream(String fieldName, Reader reader) {
TokenStream result = new KeywordTokenizer(reader);
- result = new CollationKeyFilter(result, collator);
+ result = new CollationKeyFilter(result, _collator);
return result;
}
}
Index: src/test/org/apache/lucene/document/TestBinaryDocument.java
===================================================================
--- src/test/org/apache/lucene/document/TestBinaryDocument.java (revision 916146)
+++ src/test/org/apache/lucene/document/TestBinaryDocument.java (working copy)
@@ -36,7 +36,7 @@
public void testBinaryFieldInIndex()
throws Exception
{
- Fieldable binaryFldStored = new Field("binaryStored", binaryValStored.getBytes(), Field.Store.YES);
+ Fieldable binaryFldStored = new Field("binaryStored", binaryValStored.getBytes());
Fieldable stringFldStored = new Field("stringStored", binaryValStored, Field.Store.YES, Field.Index.NO, Field.TermVector.NO);
try {
@@ -45,7 +45,6 @@
fail();
}
catch (IllegalArgumentException iae) {
- ;
}
Document doc = new Document();
@@ -87,8 +86,8 @@
public void testCompressionTools()
throws Exception
{
- Fieldable binaryFldCompressed = new Field("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes()), Field.Store.YES);
- Fieldable stringFldCompressed = new Field("stringCompressed", CompressionTools.compressString(binaryValCompressed), Field.Store.YES);
+ Fieldable binaryFldCompressed = new Field("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes()));
+ Fieldable stringFldCompressed = new Field("stringCompressed", CompressionTools.compressString(binaryValCompressed));
Document doc = new Document();
Index: src/test/org/apache/lucene/document/TestDocument.java
===================================================================
--- src/test/org/apache/lucene/document/TestDocument.java (revision 916146)
+++ src/test/org/apache/lucene/document/TestDocument.java (working copy)
@@ -42,8 +42,8 @@
{
Document doc = new Document();
Fieldable stringFld = new Field("string", binaryVal, Field.Store.YES, Field.Index.NO);
- Fieldable binaryFld = new Field("binary", binaryVal.getBytes(), Field.Store.YES);
- Fieldable binaryFld2 = new Field("binary", binaryVal2.getBytes(), Field.Store.YES);
+ Fieldable binaryFld = new Field("binary", binaryVal.getBytes());
+ Fieldable binaryFld2 = new Field("binary", binaryVal2.getBytes());
doc.add(stringFld);
doc.add(binaryFld);
@@ -259,8 +259,7 @@
}
public void testFieldSetValueChangeBinary() {
- Field field1 = new Field("field1", new byte[0],
- Field.Store.YES);
+ Field field1 = new Field("field1", new byte[0]);
Field field2 = new Field("field2", "",
Field.Store.YES, Field.Index.ANALYZED);
try {
Index: src/test/org/apache/lucene/index/DocHelper.java
===================================================================
--- src/test/org/apache/lucene/index/DocHelper.java (revision 916146)
+++ src/test/org/apache/lucene/index/DocHelper.java (working copy)
@@ -154,7 +154,7 @@
LAZY_FIELD_BINARY_BYTES = "These are some binary field bytes".getBytes("UTF8");
} catch (UnsupportedEncodingException e) {
}
- lazyFieldBinary = new Field(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES, Field.Store.YES);
+ lazyFieldBinary = new Field(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES);
fields[fields.length - 2] = lazyFieldBinary;
LARGE_LAZY_FIELD_TEXT = buffer.toString();
largeLazyField = new Field(LARGE_LAZY_FIELD_KEY, LARGE_LAZY_FIELD_TEXT, Field.Store.YES, Field.Index.ANALYZED);
Index: src/test/org/apache/lucene/index/TestAtomicUpdate.java
===================================================================
--- src/test/org/apache/lucene/index/TestAtomicUpdate.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestAtomicUpdate.java (working copy)
@@ -85,8 +85,6 @@
private static class IndexerThread extends TimedThread {
IndexWriter writer;
- public int count;
-
public IndexerThread(IndexWriter writer, TimedThread[] threads) {
super(threads);
this.writer = writer;
Index: src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
===================================================================
--- src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (working copy)
@@ -78,11 +78,9 @@
first */
public void unzip(String zipName, String destDirName) throws IOException {
- Enumeration entries;
- ZipFile zipFile;
- zipFile = new ZipFile(zipName + ".zip");
+ ZipFile zipFile = new ZipFile(zipName + ".zip");
- entries = zipFile.entries();
+ Enumeration extends ZipEntry> entries = zipFile.entries();
String dirName = fullDir(destDirName);
@@ -92,7 +90,7 @@
fileDir.mkdir();
while (entries.hasMoreElements()) {
- ZipEntry entry = (ZipEntry) entries.nextElement();
+ ZipEntry entry = entries.nextElement();
InputStream in = zipFile.getInputStream(entry);
OutputStream out = new BufferedOutputStream(new FileOutputStream(new File(fileDir, entry.getName())));
Index: src/test/org/apache/lucene/index/TestCompoundFile.java
===================================================================
--- src/test/org/apache/lucene/index/TestCompoundFile.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestCompoundFile.java (working copy)
@@ -55,7 +55,7 @@
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
File file = new File(System.getProperty("tempDir"), "testIndex");
_TestUtil.rmDir(file);
@@ -64,7 +64,7 @@
}
@Override
- public void tearDown() throws Exception {
+ protected void tearDown() throws Exception {
dir.close();
super.tearDown();
}
@@ -329,14 +329,14 @@
IndexInput in = fsdir.openInput(file);
// This read primes the buffer in IndexInput
- byte b = in.readByte();
+ in.readByte();
// Close the file
in.close();
// ERROR: this call should fail, but succeeds because the buffer
// is still filled
- b = in.readByte();
+ in.readByte();
// ERROR: this call should fail, but succeeds for some reason as well
in.seek(1099);
@@ -344,7 +344,7 @@
try {
// OK: this call correctly fails. We are now past the 1024 internal
// buffer, so an actual IO is attempted, which fails
- b = in.readByte();
+ in.readByte();
fail("expected readByte() to throw exception");
} catch (IOException e) {
// expected exception
@@ -587,7 +587,7 @@
// Open two files
try {
- IndexInput e1 = cr.openInput("bogus");
+ cr.openInput("bogus");
fail("File not found");
} catch (IOException e) {
@@ -608,7 +608,7 @@
is.readBytes(b, 0, 10);
try {
- byte test = is.readByte();
+ is.readByte();
fail("Single byte read past end of file");
} catch (IOException e) {
/* success */
Index: src/test/org/apache/lucene/index/TestDirectoryReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestDirectoryReader.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestDirectoryReader.java (working copy)
@@ -122,13 +122,6 @@
assertEquals( 1, reader.numDocs() );
}
-
- public void _testTermVectors() {
- MultiReader reader = new MultiReader(readers);
- assertTrue(reader != null);
- }
-
-
public void testIsCurrent() throws IOException {
RAMDirectory ramDir1=new RAMDirectory();
addDoc(ramDir1, "test foo", true);
Index: src/test/org/apache/lucene/index/TestDoc.java
===================================================================
--- src/test/org/apache/lucene/index/TestDoc.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestDoc.java (working copy)
@@ -55,7 +55,7 @@
* a few text files created in the current working directory.
*/
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
workDir = new File(System.getProperty("tempDir"),"TestDoc");
workDir.mkdirs();
Index: src/test/org/apache/lucene/index/TestFieldsReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestFieldsReader.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestFieldsReader.java (working copy)
@@ -60,7 +60,6 @@
assertTrue(dir != null);
assertTrue(fieldInfos != null);
FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
- assertTrue(reader != null);
assertTrue(reader.size() == 1);
Document doc = reader.doc(0, null);
assertTrue(doc != null);
@@ -98,7 +97,6 @@
assertTrue(dir != null);
assertTrue(fieldInfos != null);
FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
- assertTrue(reader != null);
assertTrue(reader.size() == 1);
Set loadFieldNames = new HashSet();
loadFieldNames.add(DocHelper.TEXT_FIELD_1_KEY);
@@ -148,7 +146,6 @@
assertTrue(dir != null);
assertTrue(fieldInfos != null);
FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
- assertTrue(reader != null);
assertTrue(reader.size() == 1);
Set loadFieldNames = new HashSet();
loadFieldNames.add(DocHelper.TEXT_FIELD_1_KEY);
@@ -177,7 +174,6 @@
assertTrue(dir != null);
assertTrue(fieldInfos != null);
FieldsReader reader = new FieldsReader(dir, TEST_SEGMENT_NAME, fieldInfos);
- assertTrue(reader != null);
assertTrue(reader.size() == 1);
LoadFirstFieldSelector fieldSelector = new LoadFirstFieldSelector();
Document doc = reader.doc(0, fieldSelector);
@@ -227,13 +223,13 @@
for (int i = 0; i < length; i++) {
reader = new FieldsReader(tmpDir, TEST_SEGMENT_NAME, fieldInfos);
- assertTrue(reader != null);
assertTrue(reader.size() == 1);
Document doc;
doc = reader.doc(0, null);//Load all of them
assertTrue("doc is null and it shouldn't be", doc != null);
Fieldable field = doc.getFieldable(DocHelper.LARGE_LAZY_FIELD_KEY);
+ assertTrue("field is null and it shouldn't be", field != null);
assertTrue("field is lazy", field.isLazy() == false);
String value;
long start;
@@ -243,7 +239,6 @@
value = field.stringValue();
finish = System.currentTimeMillis();
assertTrue("value is null and it shouldn't be", value != null);
- assertTrue("field is null and it shouldn't be", field != null);
regularTime += (finish - start);
reader.close();
reader = null;
Index: src/test/org/apache/lucene/index/TestIndexReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReader.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestIndexReader.java (working copy)
@@ -366,7 +366,7 @@
writer.close();
writer = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
- doc.add(new Field("bin1", bin, Field.Store.YES));
+ doc.add(new Field("bin1", bin));
doc.add(new Field("junk", "junk text", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
@@ -511,11 +511,8 @@
// Make sure you can set norms & commit even if a reader
// is open against the index:
- public void testWritingNorms() throws IOException
- {
+ public void testWritingNorms() throws IOException {
String tempDir = "target/test";
- if (tempDir == null)
- throw new IOException("tempDir undefined, cannot run test");
File indexDir = new File(tempDir, "lucenetestnormwriter");
Directory dir = FSDirectory.open(indexDir);
Index: src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (working copy)
@@ -247,11 +247,6 @@
iw.close();
}
- private void modifyNormsForF1(Directory dir) throws IOException {
- IndexReader ir = IndexReader.open(dir, false);
- modifyNormsForF1(ir);
- }
-
private void modifyNormsForF1(IndexReader ir) throws IOException {
int n = ir.maxDoc();
// System.out.println("modifyNormsForF1 maxDoc: "+n);
Index: src/test/org/apache/lucene/index/TestIndexWriter.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriter.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestIndexWriter.java (working copy)
@@ -3493,14 +3493,14 @@
final TermAttribute termAtt = addAttribute(TermAttribute.class);
final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
- final Iterator tokens = Arrays.asList(new String[]{"a","b","c"}).iterator();
+ final Iterator terms = Arrays.asList(new String[]{"a","b","c"}).iterator();
boolean first = true;
@Override
public boolean incrementToken() {
- if (!tokens.hasNext()) return false;
+ if (!terms.hasNext()) return false;
clearAttributes();
- termAtt.setTermBuffer( tokens.next());
+ termAtt.setTermBuffer( terms.next());
posIncrAtt.setPositionIncrement(first ? 0 : 1);
first = false;
return true;
@@ -3784,7 +3784,7 @@
c.joinThreads();
- assertEquals(100+NUM_COPY*(3*NUM_ITER/4)*c.NUM_THREADS*c.NUM_INIT_DOCS, c.writer2.numDocs());
+ assertEquals(100+NUM_COPY*(3*NUM_ITER/4)*RunAddIndexesThreads.NUM_THREADS*RunAddIndexesThreads.NUM_INIT_DOCS, c.writer2.numDocs());
c.close(true);
@@ -3793,7 +3793,7 @@
_TestUtil.checkIndex(c.dir2);
IndexReader reader = IndexReader.open(c.dir2, true);
- assertEquals(100+NUM_COPY*(3*NUM_ITER/4)*c.NUM_THREADS*c.NUM_INIT_DOCS, reader.numDocs());
+ assertEquals(100+NUM_COPY*(3*NUM_ITER/4)*RunAddIndexesThreads.NUM_THREADS*RunAddIndexesThreads.NUM_INIT_DOCS, reader.numDocs());
reader.close();
c.closeDir();
@@ -3969,7 +3969,7 @@
b[i] = (byte) (i+77);
Document doc = new Document();
- Field f = new Field("binary", b, 10, 17, Field.Store.YES);
+ Field f = new Field("binary", b, 10, 17);
byte[] bx = f.getBinaryValue();
assertTrue(bx != null);
assertEquals(50, bx.length);
@@ -4516,7 +4516,7 @@
b[i] = (byte) (i+77);
Document doc = new Document();
- Field f = new Field("binary", b, 10, 17, Field.Store.YES);
+ Field f = new Field("binary", b, 10, 17);
f.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc1field1")));
Field f2 = new Field("string", "value", Field.Store.YES,Field.Index.ANALYZED);
f2.setTokenStream(new WhitespaceTokenizer(TEST_VERSION_CURRENT, new StringReader("doc1field2")));
@@ -4688,7 +4688,7 @@
for(int iter=0;iter<2;iter++) {
Directory dir = new MockRAMDirectory();
- IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED);
+ IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("field", "go", Field.Store.NO, Field.Index.ANALYZED));
w.addDocument(doc);
Index: src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestIndexWriterLockRelease.java (working copy)
@@ -34,7 +34,7 @@
private java.io.File __test_dir;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
if (this.__test_dir == null) {
String tmp_dir = System.getProperty("java.io.tmpdir", "tmp");
@@ -52,8 +52,7 @@
}
@Override
- public void tearDown() throws Exception {
- super.tearDown();
+ protected void tearDown() throws Exception {
if (this.__test_dir != null) {
File[] files = this.__test_dir.listFiles();
@@ -69,16 +68,16 @@
throw new IOException("unable to remove test directory \"" + this.__test_dir.getPath() + "\" (please remove by hand)");
}
}
+ super.tearDown();
}
public void testIndexWriterLockRelease() throws IOException {
- IndexWriter im;
FSDirectory dir = FSDirectory.open(this.__test_dir);
try {
- im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
} catch (FileNotFoundException e) {
try {
- im = new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
+ new IndexWriter(dir, new org.apache.lucene.analysis.standard.StandardAnalyzer(TEST_VERSION_CURRENT), false, IndexWriter.MaxFieldLength.LIMITED);
} catch (FileNotFoundException e1) {
}
} finally {
Index: src/test/org/apache/lucene/index/TestIndexWriterReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriterReader.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestIndexWriterReader.java (working copy)
@@ -20,7 +20,6 @@
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Collections;
-import java.util.LinkedList;
import java.util.List;
import java.util.Random;
@@ -302,70 +301,6 @@
mainDir.close();
}
- private class DeleteThreads {
- final static int NUM_THREADS = 5;
- final Thread[] threads = new Thread[NUM_THREADS];
- IndexWriter mainWriter;
- List deletedTerms = new ArrayList();
- LinkedList toDeleteTerms = new LinkedList();
- Random random;
- final List failures = new ArrayList();
-
- public DeleteThreads(IndexWriter mainWriter) throws IOException {
- this.mainWriter = mainWriter;
- IndexReader reader = mainWriter.getReader();
- int maxDoc = reader.maxDoc();
- random = newRandom();
- int iter = random.nextInt(maxDoc);
- for (int x=0; x < iter; x++) {
- int doc = random.nextInt(iter);
- String id = reader.document(doc).get("id");
- toDeleteTerms.add(new Term("id", id));
- }
- }
-
- Term getDeleteTerm() {
- synchronized (toDeleteTerms) {
- return toDeleteTerms.removeFirst();
- }
- }
-
- void launchThreads(final int numIter) {
- for (int i = 0; i < NUM_THREADS; i++) {
- threads[i] = new Thread() {
- @Override
- public void run() {
- try {
- Term term = getDeleteTerm();
- mainWriter.deleteDocuments(term);
- synchronized (deletedTerms) {
- deletedTerms.add(term);
- }
- } catch (Throwable t) {
- handle(t);
- }
- }
- };
- }
- }
-
- void handle(Throwable t) {
- t.printStackTrace(System.out);
- synchronized (failures) {
- failures.add(t);
- }
- }
-
- void joinThreads() {
- for (int i = 0; i < NUM_THREADS; i++)
- try {
- threads[i].join();
- } catch (InterruptedException ie) {
- throw new ThreadInterruptedException(ie);
- }
- }
- }
-
private class AddDirectoriesThreads {
Directory addDir;
final static int NUM_THREADS = 5;
@@ -558,16 +493,15 @@
return doc;
}
- /**
+ /*
* Delete a document by term and return the doc id
*
- * @return
- *
* public static int deleteDocument(Term term, IndexWriter writer) throws
* IOException { IndexReader reader = writer.getReader(); TermDocs td =
* reader.termDocs(term); int doc = -1; //if (td.next()) { // doc = td.doc();
* //} //writer.deleteDocuments(term); td.close(); return doc; }
*/
+
public static void createIndex(Directory dir1, String indexName,
boolean multiSegment) throws IOException {
IndexWriter w = new IndexWriter(dir1, new WhitespaceAnalyzer(TEST_VERSION_CURRENT),
Index: src/test/org/apache/lucene/index/TestMultiReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestMultiReader.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestMultiReader.java (working copy)
@@ -40,7 +40,6 @@
assertTrue(dir != null);
assertTrue(sis != null);
- assertTrue(reader != null);
return reader;
}
Index: src/test/org/apache/lucene/index/TestNorms.java
===================================================================
--- src/test/org/apache/lucene/index/TestNorms.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestNorms.java (working copy)
@@ -17,7 +17,8 @@
* limitations under the License.
*/
-import org.apache.lucene.util.LuceneTestCase;
+import java.io.IOException;
+import java.util.ArrayList;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
@@ -29,11 +30,8 @@
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-
/**
* Test that norms info is preserved during index life - including
* separate norms, addDocument, addIndexesNoOptimize, optimize.
@@ -158,8 +156,8 @@
int n = ir.maxDoc();
for (int i = 0; i < n; i+=3) { // modify for every third doc
int k = (i*3) % modifiedNorms.size();
- float origNorm = ((Float)modifiedNorms.get(i)).floatValue();
- float newNorm = ((Float)modifiedNorms.get(k)).floatValue();
+ float origNorm = modifiedNorms.get(i).floatValue();
+ float newNorm = modifiedNorms.get(k).floatValue();
//System.out.println("Modifying: for "+i+" from "+origNorm+" to "+newNorm);
//System.out.println(" and: for "+k+" from "+newNorm+" to "+origNorm);
modifiedNorms.set(i, Float.valueOf(newNorm));
Index: src/test/org/apache/lucene/index/TestParallelTermEnum.java
===================================================================
--- src/test/org/apache/lucene/index/TestParallelTermEnum.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestParallelTermEnum.java (working copy)
@@ -67,10 +67,9 @@
@Override
protected void tearDown() throws Exception {
- super.tearDown();
-
ir1.close();
ir2.close();
+ super.tearDown();
}
public void test1() throws IOException {
Index: src/test/org/apache/lucene/index/TestPayloads.java
===================================================================
--- src/test/org/apache/lucene/index/TestPayloads.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestPayloads.java (working copy)
@@ -445,11 +445,8 @@
boolean hasNext = input.incrementToken();
if (hasNext) {
if (offset + length <= data.length) {
- Payload p = null;
- if (p == null) {
- p = new Payload();
- payloadAtt.setPayload(p);
- }
+ Payload p = new Payload();
+ payloadAtt.setPayload(p);
p.setData(data, offset, length);
offset += length;
} else {
Index: src/test/org/apache/lucene/index/TestSegmentReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestSegmentReader.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestSegmentReader.java (working copy)
@@ -134,8 +134,8 @@
TermPositions positions = reader.termPositions();
+ assertTrue(positions != null);
positions.seek(new Term(DocHelper.TEXT_FIELD_1_KEY, "field"));
- assertTrue(positions != null);
assertTrue(positions.doc() == 0);
assertTrue(positions.nextPosition() >= 0);
}
Index: src/test/org/apache/lucene/index/TestSegmentTermDocs.java
===================================================================
--- src/test/org/apache/lucene/index/TestSegmentTermDocs.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestSegmentTermDocs.java (working copy)
@@ -57,7 +57,6 @@
assertTrue(reader != null);
assertEquals(indexDivisor, reader.getTermInfosIndexDivisor());
SegmentTermDocs segTermDocs = new SegmentTermDocs(reader);
- assertTrue(segTermDocs != null);
segTermDocs.seek(new Term(DocHelper.TEXT_FIELD_2_KEY, "field"));
if (segTermDocs.next() == true)
{
@@ -79,7 +78,6 @@
SegmentReader reader = SegmentReader.get(true, info, indexDivisor);
assertTrue(reader != null);
SegmentTermDocs segTermDocs = new SegmentTermDocs(reader);
- assertTrue(segTermDocs != null);
segTermDocs.seek(new Term("textField2", "bad"));
assertTrue(segTermDocs.next() == false);
reader.close();
@@ -89,7 +87,6 @@
SegmentReader reader = SegmentReader.get(true, info, indexDivisor);
assertTrue(reader != null);
SegmentTermDocs segTermDocs = new SegmentTermDocs(reader);
- assertTrue(segTermDocs != null);
segTermDocs.seek(new Term("junk", "bad"));
assertTrue(segTermDocs.next() == false);
reader.close();
Index: src/test/org/apache/lucene/index/TestStressIndexing.java
===================================================================
--- src/test/org/apache/lucene/index/TestStressIndexing.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestStressIndexing.java (working copy)
@@ -70,7 +70,6 @@
private class IndexerThread extends TimedThread {
IndexWriter writer;
- public int count;
int nextID;
public IndexerThread(IndexWriter writer, TimedThread[] threads) {
Index: src/test/org/apache/lucene/index/TestStressIndexing2.java
===================================================================
--- src/test/org/apache/lucene/index/TestStressIndexing2.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestStressIndexing2.java (working copy)
@@ -26,7 +26,7 @@
import java.util.*;
import java.io.IOException;
-import junit.framework.TestCase;
+import junit.framework.Assert;
public class TestStressIndexing2 extends LuceneTestCase {
static int maxFields=4;
@@ -644,7 +644,7 @@
}
} catch (Throwable e) {
e.printStackTrace();
- TestCase.fail(e.toString());
+ Assert.fail(e.toString());
}
synchronized (this) {
Index: src/test/org/apache/lucene/index/TestTermVectorsReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestTermVectorsReader.java (revision 916146)
+++ src/test/org/apache/lucene/index/TestTermVectorsReader.java (working copy)
@@ -166,7 +166,6 @@
public void testReader() throws IOException {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
- assertTrue(reader != null);
for (int j = 0; j < 5; j++) {
TermFreqVector vector = reader.get(j, testFields[0]);
assertTrue(vector != null);
@@ -183,7 +182,6 @@
public void testPositionReader() throws IOException {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
- assertTrue(reader != null);
TermPositionVector vector;
String[] terms;
vector = (TermPositionVector) reader.get(0, testFields[0]);
@@ -226,7 +224,6 @@
public void testOffsetReader() throws IOException {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
- assertTrue(reader != null);
TermPositionVector vector = (TermPositionVector) reader.get(0, testFields[0]);
assertTrue(vector != null);
String[] terms = vector.getTerms();
@@ -255,7 +252,6 @@
public void testMapper() throws IOException {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
- assertTrue(reader != null);
SortedTermVectorMapper mapper = new SortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());
reader.get(0, mapper);
SortedSet set = mapper.getTermVectorEntrySet();
@@ -376,7 +372,6 @@
public void testBadParams() {
try {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
- assertTrue(reader != null);
//Bad document number, good field number
reader.get(50, testFields[0]);
fail();
@@ -385,7 +380,6 @@
}
try {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
- assertTrue(reader != null);
//Bad document number, no field
reader.get(50);
fail();
@@ -394,7 +388,6 @@
}
try {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
- assertTrue(reader != null);
//good document number, bad field number
TermFreqVector vector = reader.get(0, "f50");
assertTrue(vector == null);
Index: src/test/org/apache/lucene/queryParser/TestQueryParser.java
===================================================================
--- src/test/org/apache/lucene/queryParser/TestQueryParser.java (revision 916146)
+++ src/test/org/apache/lucene/queryParser/TestQueryParser.java (working copy)
@@ -150,7 +150,7 @@
private int originalMaxClauses;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
originalMaxClauses = BooleanQuery.getMaxClauseCount();
}
@@ -1017,9 +1017,9 @@
}
@Override
- public void tearDown() throws Exception {
+ protected void tearDown() throws Exception {
+ BooleanQuery.setMaxClauseCount(originalMaxClauses);
super.tearDown();
- BooleanQuery.setMaxClauseCount(originalMaxClauses);
}
// LUCENE-2002: make sure defaults for StandardAnalyzer's
Index: src/test/org/apache/lucene/search/CachingWrapperFilterHelper.java
===================================================================
--- src/test/org/apache/lucene/search/CachingWrapperFilterHelper.java (revision 916146)
+++ src/test/org/apache/lucene/search/CachingWrapperFilterHelper.java (working copy)
@@ -20,8 +20,7 @@
import java.io.IOException;
import java.util.WeakHashMap;
-import junit.framework.TestCase;
-
+import junit.framework.Assert;
import org.apache.lucene.index.IndexReader;
/**
@@ -51,9 +50,9 @@
synchronized (cache) { // check cache
DocIdSet cached = cache.get(reader);
if (shouldHaveCache) {
- TestCase.assertNotNull("Cache should have data ", cached);
+ Assert.assertNotNull("Cache should have data ", cached);
} else {
- TestCase.assertNull("Cache should be null " + cached , cached);
+ Assert.assertNull("Cache should be null " + cached , cached);
}
if (cached != null) {
return cached;
@@ -77,7 +76,7 @@
@Override
public boolean equals(Object o) {
if (!(o instanceof CachingWrapperFilterHelper)) return false;
- return this.filter.equals((CachingWrapperFilterHelper)o);
+ return this.filter.equals(o);
}
@Override
Index: src/test/org/apache/lucene/search/CheckHits.java
===================================================================
--- src/test/org/apache/lucene/search/CheckHits.java (revision 916146)
+++ src/test/org/apache/lucene/search/CheckHits.java (working copy)
@@ -76,7 +76,7 @@
* @param searcher the searcher to test the query against
* @param defaultFieldName used for displaying the query in assertion messages
* @param results a list of documentIds that must match the query
- * @see Searcher#search(Query,HitCollector)
+ * @see Searcher#search(Query,Collector)
* @see #checkHits
*/
public static void checkHitCollector(Query query, String defaultFieldName,
@@ -149,7 +149,7 @@
* @param searcher the searcher to test the query against
* @param defaultFieldName used for displaing the query in assertion messages
* @param results a list of documentIds that must match the query
- * @see Searcher#search(Query)
+ * @see Searcher#search(Query, int)
* @see #checkHitCollector
*/
public static void checkHits(
@@ -159,7 +159,7 @@
int[] results)
throws IOException {
- ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
+ ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
Set correct = new TreeSet();
for (int i = 0; i < results.length; i++) {
Index: src/test/org/apache/lucene/search/JustCompileSearch.java
===================================================================
--- src/test/org/apache/lucene/search/JustCompileSearch.java (revision 916146)
+++ src/test/org/apache/lucene/search/JustCompileSearch.java (working copy)
@@ -245,7 +245,7 @@
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
Index: src/test/org/apache/lucene/search/TestBoolean2.java
===================================================================
--- src/test/org/apache/lucene/search/TestBoolean2.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestBoolean2.java (working copy)
@@ -47,7 +47,7 @@
private int mulFactor;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
@@ -93,9 +93,10 @@
}
@Override
- public void tearDown() throws Exception {
+ protected void tearDown() throws Exception {
reader.close();
dir2.close();
+ super.tearDown();
}
private String[] docFields = {
Index: src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
===================================================================
--- src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java (working copy)
@@ -18,7 +18,7 @@
*/
-import junit.framework.TestCase;
+import junit.framework.Assert;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
@@ -42,7 +42,7 @@
public IndexSearcher s;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
@@ -338,7 +338,7 @@
// The constrained query
// should be a superset to the unconstrained query.
if (top2.totalHits > top1.totalHits) {
- TestCase.fail("Constrained results not a subset:\n"
+ Assert.fail("Constrained results not a subset:\n"
+ CheckHits.topdocsString(top1,0,0)
+ CheckHits.topdocsString(top2,0,0)
+ "for query:" + q2.toString());
@@ -355,7 +355,7 @@
float otherScore = top1.scoreDocs[other].score;
// check if scores match
if (Math.abs(otherScore-score)>1.0e-6f) {
- TestCase.fail("Doc " + id + " scores don't match\n"
+ Assert.fail("Doc " + id + " scores don't match\n"
+ CheckHits.topdocsString(top1,0,0)
+ CheckHits.topdocsString(top2,0,0)
+ "for query:" + q2.toString());
@@ -364,7 +364,7 @@
}
// check if subset
- if (!found) TestCase.fail("Doc " + id + " not found\n"
+ if (!found) Assert.fail("Doc " + id + " not found\n"
+ CheckHits.topdocsString(top1,0,0)
+ CheckHits.topdocsString(top2,0,0)
+ "for query:" + q2.toString());
Index: src/test/org/apache/lucene/search/TestComplexExplanations.java
===================================================================
--- src/test/org/apache/lucene/search/TestComplexExplanations.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestComplexExplanations.java (working copy)
@@ -32,7 +32,7 @@
* nice with boosts of 0.0
*/
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
searcher.setSimilarity(createQnorm1Similarity());
}
Index: src/test/org/apache/lucene/search/TestCustomSearcherSort.java
===================================================================
--- src/test/org/apache/lucene/search/TestCustomSearcherSort.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestCustomSearcherSort.java (working copy)
@@ -93,7 +93,7 @@
* Create index and query for test cases.
*/
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
index = getIndex();
query = new TermQuery( new Term("content", "test"));
Index: src/test/org/apache/lucene/search/TestDateSort.java
===================================================================
--- src/test/org/apache/lucene/search/TestDateSort.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestDateSort.java (working copy)
@@ -46,7 +46,7 @@
private static Directory directory;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
// Create an index writer.
directory = new RAMDirectory();
Index: src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java (working copy)
@@ -74,7 +74,7 @@
public IndexSearcher s;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
index = new RAMDirectory();
Index: src/test/org/apache/lucene/search/TestDocIdSet.java
===================================================================
--- src/test/org/apache/lucene/search/TestDocIdSet.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestDocIdSet.java (working copy)
@@ -35,7 +35,6 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util._TestUtil;
public class TestDocIdSet extends LuceneTestCase {
public void testFilteredDocIdSet() throws Exception {
@@ -95,8 +94,8 @@
int[] answer = new int[]{4,6,8};
boolean same = Arrays.equals(answer, docs);
if (!same) {
- System.out.println("answer: "+_TestUtil.arrayToString(answer));
- System.out.println("gotten: "+_TestUtil.arrayToString(docs));
+ System.out.println("answer: " + Arrays.toString(answer));
+ System.out.println("gotten: " + Arrays.toString(docs));
fail();
}
}
Index: src/test/org/apache/lucene/search/TestElevationComparator.java
===================================================================
--- src/test/org/apache/lucene/search/TestElevationComparator.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestElevationComparator.java (working copy)
@@ -21,6 +21,7 @@
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.*;
+import org.apache.lucene.search.FieldValueHitQueue.Entry;
import org.apache.lucene.store.*;
import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
@@ -70,7 +71,7 @@
new SortField(null, SortField.SCORE, reversed)
);
- TopDocsCollector topCollector = TopFieldCollector.create(sort, 50, false, true, true, true);
+ TopDocsCollector topCollector = TopFieldCollector.create(sort, 50, false, true, true, true);
searcher.search(newq, null, topCollector);
TopDocs topDocs = topCollector.topDocs(0, 10);
@@ -171,7 +172,7 @@
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Integer.valueOf(values[slot]);
}
};
Index: src/test/org/apache/lucene/search/TestExplanations.java
===================================================================
--- src/test/org/apache/lucene/search/TestExplanations.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestExplanations.java (working copy)
@@ -54,13 +54,13 @@
new QueryParser(TEST_VERSION_CURRENT, FIELD, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
@Override
- public void tearDown() throws Exception {
+ protected void tearDown() throws Exception {
+ searcher.close();
super.tearDown();
- searcher.close();
}
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true,
Index: src/test/org/apache/lucene/search/TestFieldCache.java
===================================================================
--- src/test/org/apache/lucene/search/TestFieldCache.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestFieldCache.java (working copy)
@@ -65,8 +65,8 @@
FieldCache cache = FieldCache.DEFAULT;
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
cache.setInfoStream(new PrintStream(bos));
- double [] doubles = cache.getDoubles(reader, "theDouble");
- float [] floats = cache.getFloats(reader, "theDouble");
+ cache.getDoubles(reader, "theDouble");
+ cache.getFloats(reader, "theDouble");
assertTrue(bos.toString().indexOf("WARNING") != -1);
} finally {
FieldCache.DEFAULT.purgeAllCaches();
Index: src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java
===================================================================
--- src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java (working copy)
@@ -479,7 +479,7 @@
int numDocs = reader.numDocs();
Float minIdO = Float.valueOf(minId + .5f);
- Float medIdO = Float.valueOf(minIdO.floatValue() + ((float) (maxId-minId))/2.0f);
+ Float medIdO = Float.valueOf(minIdO.floatValue() + ((maxId-minId))/2.0f);
ScoreDoc[] result;
Query q = new TermQuery(new Term("body","body"));
@@ -507,7 +507,7 @@
int numDocs = reader.numDocs();
Double minIdO = Double.valueOf(minId + .5);
- Double medIdO = Double.valueOf(minIdO.floatValue() + ((double) (maxId-minId))/2.0);
+ Double medIdO = Double.valueOf(minIdO.floatValue() + ((maxId-minId))/2.0);
ScoreDoc[] result;
Query q = new TermQuery(new Term("body","body"));
Index: src/test/org/apache/lucene/search/TestFilteredQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestFilteredQuery.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestFilteredQuery.java (working copy)
@@ -45,7 +45,7 @@
private Filter filter;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
directory = new RAMDirectory();
IndexWriter writer = new IndexWriter (directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
@@ -92,8 +92,7 @@
}
@Override
- public void tearDown()
- throws Exception {
+ protected void tearDown() throws Exception {
searcher.close();
directory.close();
super.tearDown();
Index: src/test/org/apache/lucene/search/TestMultiTermConstantScore.java
===================================================================
--- src/test/org/apache/lucene/search/TestMultiTermConstantScore.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestMultiTermConstantScore.java (working copy)
@@ -56,7 +56,7 @@
}
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
String[] data = new String[] { "A 1 2 3 4 5 6", "Z 4 5 6", null,
Index: src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java
===================================================================
--- src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java (working copy)
@@ -38,7 +38,7 @@
}
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
IndexWriter writer
= new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Index: src/test/org/apache/lucene/search/TestPhraseQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestPhraseQuery.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestPhraseQuery.java (working copy)
@@ -86,10 +86,10 @@
}
@Override
- public void tearDown() throws Exception {
- super.tearDown();
+ protected void tearDown() throws Exception {
searcher.close();
directory.close();
+ super.tearDown();
}
public void testNotCloseEnough() throws Exception {
@@ -474,7 +474,7 @@
query.add(new Term("palindrome", "three"));
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("just sloppy enough", 1, hits.length);
- float score2 = hits[0].score;
+ //float score2 = hits[0].score;
//System.out.println("palindrome: two three: "+score2);
QueryUtils.check(query,searcher);
@@ -488,7 +488,7 @@
query.add(new Term("palindrome", "two"));
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("just sloppy enough", 1, hits.length);
- float score3 = hits[0].score;
+ //float score3 = hits[0].score;
//System.out.println("palindrome: three two: "+score3);
QueryUtils.check(query,searcher);
@@ -536,7 +536,7 @@
query.add(new Term("palindrome", "three"));
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("just sloppy enough", 1, hits.length);
- float score2 = hits[0].score;
+ //float score2 = hits[0].score;
//System.out.println("palindrome: one two three: "+score2);
QueryUtils.check(query,searcher);
@@ -551,7 +551,7 @@
query.add(new Term("palindrome", "one"));
hits = searcher.search(query, null, 1000).scoreDocs;
assertEquals("just sloppy enough", 1, hits.length);
- float score3 = hits[0].score;
+ //float score3 = hits[0].score;
//System.out.println("palindrome: three two one: "+score3);
QueryUtils.check(query,searcher);
Index: src/test/org/apache/lucene/search/TestQueryTermVector.java
===================================================================
--- src/test/org/apache/lucene/search/TestQueryTermVector.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestQueryTermVector.java (working copy)
@@ -33,7 +33,6 @@
String [] gold = {"again", "bar", "foo", "go"};
int [] goldFreqs = {1, 2, 3, 3};
QueryTermVector result = new QueryTermVector(queryTerm);
- assertTrue(result != null);
String [] terms = result.getTerms();
assertTrue(terms.length == 4);
int [] freq = result.getTermFrequencies();
@@ -43,7 +42,6 @@
assertTrue(result.getTerms().length == 0);
result = new QueryTermVector("foo bar foo again foo bar go go go", new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
- assertTrue(result != null);
terms = result.getTerms();
assertTrue(terms.length == 4);
freq = result.getTermFrequencies();
Index: src/test/org/apache/lucene/search/TestScorerPerf.java
===================================================================
--- src/test/org/apache/lucene/search/TestScorerPerf.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestScorerPerf.java (working copy)
@@ -143,7 +143,7 @@
@Override
public DocIdSet getDocIdSet(IndexReader reader) {
return new DocIdBitSet(rnd);
- };
+ }
});
bq.add(q, BooleanClause.Occur.MUST);
if (validate) {
Index: src/test/org/apache/lucene/search/TestSort.java
===================================================================
--- src/test/org/apache/lucene/search/TestSort.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestSort.java (working copy)
@@ -37,6 +37,7 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.FieldValueHitQueue.Entry;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.DocIdBitSet;
@@ -207,7 +208,7 @@
}
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
full = getFullIndex();
searchX = getXIndex();
@@ -446,7 +447,7 @@
}
@Override
- public Comparable value(int slot) {
+ public Comparable> value(int slot) {
return Integer.valueOf(slotValues[slot]);
}
}
@@ -747,7 +748,7 @@
Sort[] sort = new Sort[] { new Sort(SortField.FIELD_DOC), new Sort() };
for (int i = 0; i < sort.length; i++) {
Query q = new MatchAllDocsQuery();
- TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, false,
+ TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, false,
false, false, true);
full.search(q, tdc);
@@ -766,7 +767,7 @@
Sort[] sort = new Sort[] {new Sort(SortField.FIELD_DOC), new Sort() };
for (int i = 0; i < sort.length; i++) {
Query q = new MatchAllDocsQuery();
- TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, true, false,
+ TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, true, false,
false, true);
full.search(q, tdc);
@@ -786,7 +787,7 @@
Sort[] sort = new Sort[] {new Sort(SortField.FIELD_DOC), new Sort() };
for (int i = 0; i < sort.length; i++) {
Query q = new MatchAllDocsQuery();
- TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, true, true,
+ TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, true, true,
false, true);
full.search(q, tdc);
@@ -806,7 +807,7 @@
Sort[] sort = new Sort[] {new Sort(SortField.FIELD_DOC), new Sort() };
for (int i = 0; i < sort.length; i++) {
Query q = new MatchAllDocsQuery();
- TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, true, true,
+ TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, true, true,
true, true);
full.search(q, tdc);
@@ -854,7 +855,7 @@
bq.setMinimumNumberShouldMatch(1);
for (int i = 0; i < sort.length; i++) {
for (int j = 0; j < tfcOptions.length; j++) {
- TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10,
+ TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10,
tfcOptions[j][0], tfcOptions[j][1], tfcOptions[j][2], false);
assertTrue(tdc.getClass().getName().endsWith("$"+actualTFCClasses[j]));
@@ -873,7 +874,7 @@
// Two Sort criteria to instantiate the multi/single comparators.
Sort[] sort = new Sort[] {new Sort(SortField.FIELD_DOC), new Sort() };
for (int i = 0; i < sort.length; i++) {
- TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, true, true, true, true);
+ TopDocsCollector tdc = TopFieldCollector.create(sort[i], 10, true, true, true, true);
TopDocs td = tdc.topDocs();
assertEquals(0, td.totalHits);
assertTrue(Float.isNaN(td.getMaxScore()));
Index: src/test/org/apache/lucene/search/TestTermRangeFilter.java
===================================================================
--- src/test/org/apache/lucene/search/TestTermRangeFilter.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestTermRangeFilter.java (working copy)
@@ -400,8 +400,6 @@
Query q = new TermQuery(new Term("body","body"));
Collator collator = Collator.getInstance(new Locale("da", "dk"));
- Query query = new TermRangeQuery
- ("content", "H\u00D8T", "MAND", false, false, collator);
// Unicode order would not include "H\u00C5T" in [ "H\u00D8T", "MAND" ],
// but Danish collation does.
Index: src/test/org/apache/lucene/search/TestTermRangeQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestTermRangeQuery.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestTermRangeQuery.java (working copy)
@@ -43,7 +43,7 @@
private RAMDirectory dir;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
dir = new RAMDirectory();
}
Index: src/test/org/apache/lucene/search/TestTermVectors.java
===================================================================
--- src/test/org/apache/lucene/search/TestTermVectors.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestTermVectors.java (working copy)
@@ -39,7 +39,7 @@
}
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
@@ -233,7 +233,6 @@
try {
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
IndexWriter.MaxFieldLength.LIMITED);
- assertTrue(writer != null);
writer.addDocument(testDoc1);
writer.addDocument(testDoc2);
writer.addDocument(testDoc3);
Index: src/test/org/apache/lucene/search/TestThreadSafe.java
===================================================================
--- src/test/org/apache/lucene/search/TestThreadSafe.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestThreadSafe.java (working copy)
@@ -16,7 +16,7 @@
* limitations under the License.
*/
-import junit.framework.TestCase;
+import junit.framework.Assert;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
@@ -65,7 +65,7 @@
}
} catch (Throwable th) {
failure=th.toString();
- TestCase.fail(failure);
+ Assert.fail(failure);
}
}
@@ -135,7 +135,7 @@
tarr[i].join();
}
if (failure!=null) {
- TestCase.fail(failure);
+ Assert.fail(failure);
}
}
Index: src/test/org/apache/lucene/search/TestTimeLimitingCollector.java
===================================================================
--- src/test/org/apache/lucene/search/TestTimeLimitingCollector.java (revision 916146)
+++ src/test/org/apache/lucene/search/TestTimeLimitingCollector.java (working copy)
@@ -97,7 +97,7 @@
}
@Override
- public void tearDown() throws Exception {
+ protected void tearDown() throws Exception {
searcher.close();
super.tearDown();
}
Index: src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java
===================================================================
--- src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java (revision 916146)
+++ src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java (working copy)
@@ -84,7 +84,6 @@
}
// must have static class otherwise serialization tests fail
- @SuppressWarnings({"SerializableHasSerializationMethods", "serial"})
private static class CustomAddQuery extends CustomScoreQuery {
// constructor
CustomAddQuery(Query q, ValueSourceQuery qValSrc) {
@@ -120,7 +119,6 @@
}
// must have static class otherwise serialization tests fail
- @SuppressWarnings({"SerializableHasSerializationMethods", "serial"})
private static class CustomMulAddQuery extends CustomScoreQuery {
// constructor
CustomMulAddQuery(Query q, ValueSourceQuery qValSrc1, ValueSourceQuery qValSrc2) {
@@ -179,7 +177,7 @@
@Override
public float customScore(int doc, float subScore, float valSrcScore) throws IOException {
assertTrue(doc <= reader.maxDoc());
- return (float) values[doc];
+ return values[doc];
}
};
}
@@ -224,7 +222,6 @@
original = new CustomScoreQuery(q);
rewritten = (CustomScoreQuery) original.rewrite(s.getIndexReader());
assertTrue("rewritten query should not be identical, as TermRangeQuery rewrites", original != rewritten);
- assertTrue("rewritten query should be a CustomScoreQuery", rewritten instanceof CustomScoreQuery);
assertTrue("no hits for query", s.search(rewritten,1).totalHits > 0);
assertEquals(s.search(q,1).totalHits, s.search(original,1).totalHits);
assertEquals(s.search(q,1).totalHits, s.search(rewritten,1).totalHits);
Index: src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java
===================================================================
--- src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java (revision 916146)
+++ src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java (working copy)
@@ -39,7 +39,6 @@
*
* The exact score tests use TopDocs top to verify the exact score.
*/
-@SuppressWarnings({"UseOfSystemOutOrSystemErr"})
public class TestFieldScoreQuery extends FunctionTestSetup {
/* @override constructor */
Index: src/test/org/apache/lucene/search/function/TestOrdValues.java
===================================================================
--- src/test/org/apache/lucene/search/function/TestOrdValues.java (revision 916146)
+++ src/test/org/apache/lucene/search/function/TestOrdValues.java (working copy)
@@ -34,7 +34,6 @@
*
* The exact score tests use TopDocs top to verify the exact score.
*/
-@SuppressWarnings({"UseOfSystemOutOrSystemErr"})
public class TestOrdValues extends FunctionTestSetup {
/* @override constructor */
Index: src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java
===================================================================
--- src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (revision 916146)
+++ src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (working copy)
@@ -188,10 +188,10 @@
TopDocs hits;
query = newPhraseQuery("field", "nine hundred ninety nine", true);
hits = searcher.search(query, null, 100);
+ assertTrue("hits is null and it shouldn't be", hits != null);
ScoreDoc doc = hits.scoreDocs[0];
// System.out.println("Doc: " + doc.toString());
// System.out.println("Explain: " + searcher.explain(query, doc.doc));
- assertTrue("hits is null and it shouldn't be", hits != null);
assertTrue("there should only be one hit", hits.totalHits == 1);
// should have score = 3 because adjacent terms have payloads of 2,4
assertTrue(doc.score + " does not equal: " + 3, doc.score == 3);
Index: src/test/org/apache/lucene/search/spans/TestBasics.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestBasics.java (revision 916146)
+++ src/test/org/apache/lucene/search/spans/TestBasics.java (working copy)
@@ -52,7 +52,7 @@
private IndexSearcher searcher;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(TEST_VERSION_CURRENT), true,
Index: src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java (revision 916146)
+++ src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java (working copy)
@@ -50,7 +50,7 @@
protected IndexSearcher searcher;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
IndexWriter writer= new IndexWriter(directory,
@@ -116,9 +116,9 @@
}
@Override
- public void tearDown() throws Exception {
+ protected void tearDown() throws Exception {
+ searcher.close();
super.tearDown();
- searcher.close();
}
protected void check(SpanQuery q, int[] docs) throws Exception {
Index: src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (revision 916146)
+++ src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (working copy)
@@ -39,13 +39,13 @@
new QueryParser(TEST_VERSION_CURRENT, FIELD, new WhitespaceAnalyzer(TEST_VERSION_CURRENT));
@Override
- public void tearDown() throws Exception {
+ protected void tearDown() throws Exception {
+ searcher.close();
super.tearDown();
- searcher.close();
}
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Index: src/test/org/apache/lucene/search/spans/TestSpans.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestSpans.java (revision 916146)
+++ src/test/org/apache/lucene/search/spans/TestSpans.java (working copy)
@@ -46,7 +46,7 @@
public static final String field = "field";
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
RAMDirectory directory = new RAMDirectory();
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(TEST_VERSION_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
Index: src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java (revision 916146)
+++ src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java (working copy)
@@ -38,7 +38,7 @@
public class TestSpansAdvanced extends LuceneTestCase {
// location to the index
- protected Directory mDirectory;;
+ protected Directory mDirectory;
protected IndexSearcher searcher;
@@ -52,7 +52,6 @@
@Override
protected void setUp() throws Exception {
super.setUp();
- super.setUp();
// create test index
mDirectory = new RAMDirectory();
@@ -67,10 +66,10 @@
@Override
protected void tearDown() throws Exception {
- super.tearDown();
searcher.close();
mDirectory.close();
mDirectory = null;
+ super.tearDown();
}
/**
Index: src/test/org/apache/lucene/store/TestBufferedIndexInput.java
===================================================================
--- src/test/org/apache/lucene/store/TestBufferedIndexInput.java (revision 916146)
+++ src/test/org/apache/lucene/store/TestBufferedIndexInput.java (working copy)
@@ -307,7 +307,7 @@
//int count = 0;
for (final IndexInput ip : allIndexInputs) {
BufferedIndexInput bii = (BufferedIndexInput) ip;
- int bufferSize = 1024+(int) Math.abs(rand.nextInt() % 32768);
+ int bufferSize = 1024+Math.abs(rand.nextInt() % 32768);
bii.setBufferSize(bufferSize);
//count++;
}
@@ -317,7 +317,7 @@
@Override
public IndexInput openInput(String name, int bufferSize) throws IOException {
// Make random changes to buffer size
- bufferSize = 1+(int) Math.abs(rand.nextInt() % 10);
+ bufferSize = 1+Math.abs(rand.nextInt() % 10);
IndexInput f = dir.openInput(name, bufferSize);
allIndexInputs.add(f);
return f;
Index: src/test/org/apache/lucene/store/TestLockFactory.java
===================================================================
--- src/test/org/apache/lucene/store/TestLockFactory.java (revision 916146)
+++ src/test/org/apache/lucene/store/TestLockFactory.java (working copy)
@@ -30,7 +30,6 @@
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
-import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
@@ -331,27 +330,23 @@
e.printStackTrace(System.out);
break;
}
- if (searcher != null) {
- ScoreDoc[] hits = null;
- try {
- hits = searcher.search(query, null, 1000).scoreDocs;
- } catch (IOException e) {
- hitException = true;
- System.out.println("Stress Test Index Searcher: search hit unexpected exception: " + e.toString());
- e.printStackTrace(System.out);
- break;
- }
- // System.out.println(hits.length() + " total results");
- try {
- searcher.close();
- } catch (IOException e) {
- hitException = true;
- System.out.println("Stress Test Index Searcher: close hit unexpected exception: " + e.toString());
- e.printStackTrace(System.out);
- break;
- }
- searcher = null;
+ try {
+ searcher.search(query, null, 1000);
+ } catch (IOException e) {
+ hitException = true;
+ System.out.println("Stress Test Index Searcher: search hit unexpected exception: " + e.toString());
+ e.printStackTrace(System.out);
+ break;
}
+ // System.out.println(hits.length() + " total results");
+ try {
+ searcher.close();
+ } catch (IOException e) {
+ hitException = true;
+ System.out.println("Stress Test Index Searcher: close hit unexpected exception: " + e.toString());
+ e.printStackTrace(System.out);
+ break;
+ }
}
}
}
Index: src/test/org/apache/lucene/store/TestRAMDirectory.java
===================================================================
--- src/test/org/apache/lucene/store/TestRAMDirectory.java (revision 916146)
+++ src/test/org/apache/lucene/store/TestRAMDirectory.java (working copy)
@@ -46,7 +46,7 @@
// setup the index
@Override
- public void setUp () throws Exception {
+ protected void setUp () throws Exception {
super.setUp();
String tempDir = System.getProperty("java.io.tmpdir");
if (tempDir == null)
@@ -152,12 +152,12 @@
}
@Override
- public void tearDown() throws Exception {
- super.tearDown();
+ protected void tearDown() throws Exception {
// cleanup
if (indexDir != null && indexDir.exists()) {
rmDir (indexDir);
}
+ super.tearDown();
}
// LUCENE-1196
Index: src/test/org/apache/lucene/store/TestWindowsMMap.java
===================================================================
--- src/test/org/apache/lucene/store/TestWindowsMMap.java (revision 916146)
+++ src/test/org/apache/lucene/store/TestWindowsMMap.java (working copy)
@@ -35,7 +35,7 @@
private Random random;
@Override
- public void setUp() throws Exception {
+ protected void setUp() throws Exception {
super.setUp();
random = newRandom();
}
Index: src/test/org/apache/lucene/util/LuceneTestCase.java
===================================================================
--- src/test/org/apache/lucene/util/LuceneTestCase.java (revision 916146)
+++ src/test/org/apache/lucene/util/LuceneTestCase.java (working copy)
@@ -51,10 +51,7 @@
*
* @see #assertSaneFieldCaches
*
- * @deprecated Replaced by {@link #LuceneTestCaseJ4}
- *
*/
-@Deprecated
public abstract class LuceneTestCase extends TestCase {
public static final Version TEST_VERSION_CURRENT = LuceneTestCaseJ4.TEST_VERSION_CURRENT;
@@ -213,7 +210,7 @@
/**
* Convinience method for logging an array. Wraps the array in an iterator and delegates
- * @see dumpIterator(String,Iterator,PrintStream)
+ * @see #dumpIterator(String,Iterator,PrintStream)
*/
public static void dumpArray(String label, Object[] objs,
PrintStream stream) {
Index: src/test/org/apache/lucene/util/LuceneTestCaseJ4.java
===================================================================
--- src/test/org/apache/lucene/util/LuceneTestCaseJ4.java (revision 916146)
+++ src/test/org/apache/lucene/util/LuceneTestCaseJ4.java (working copy)
@@ -55,17 +55,16 @@
* @Before - replaces teardown
* @Test - any public method with this annotation is a test case, regardless
* of its name
- *
- *
+ *
+ *
* See Junit4 documentation for a complete list of features at
* http://junit.org/junit/javadoc/4.7/
- *
+ *
* Import from org.junit rather than junit.framework.
- *
+ *
* You should be able to use this class anywhere you used LuceneTestCase
* if you annotate your derived class correctly with the annotations above
- * @see assertSaneFieldCaches
- *
+ * @see #assertSaneFieldCaches(String)
*/
@@ -225,7 +224,7 @@
* @param iter Each next() is toString()ed and logged on it's own line. If iter is null this is logged differnetly then an empty iterator.
* @param stream Stream to log messages to.
*/
- public static void dumpIterator(String label, Iterator iter,
+ public static void dumpIterator(String label, Iterator> iter,
PrintStream stream) {
stream.println("*** BEGIN " + label + " ***");
if (null == iter) {
@@ -241,11 +240,11 @@
/**
* Convinience method for logging an array. Wraps the array in an iterator and delegates
*
- * @see dumpIterator(String,Iterator,PrintStream)
+ * @see #dumpIterator(String,Iterator,PrintStream)
*/
public static void dumpArray(String label, Object[] objs,
PrintStream stream) {
- Iterator iter = (null == objs) ? null : Arrays.asList(objs).iterator();
+ Iterator> iter = (null == objs) ? null : Arrays.asList(objs).iterator();
dumpIterator(label, iter, stream);
}
Index: src/test/org/apache/lucene/util/TestBitVector.java
===================================================================
--- src/test/org/apache/lucene/util/TestBitVector.java (revision 916146)
+++ src/test/org/apache/lucene/util/TestBitVector.java (working copy)
@@ -159,7 +159,6 @@
/**
* Test r/w when size/count cause switching between bit-set and d-gaps file formats.
- * @throws Exception
*/
public void testDgaps() throws IOException {
doTestDgaps(1,0,1);
Index: src/test/org/apache/lucene/util/TestCharacterUtils.java
===================================================================
--- src/test/org/apache/lucene/util/TestCharacterUtils.java (revision 916146)
+++ src/test/org/apache/lucene/util/TestCharacterUtils.java (working copy)
@@ -37,9 +37,9 @@
CharacterUtils java4 = CharacterUtils.getInstance(Version.LUCENE_30);
char[] cpAt3 = "Abc\ud801\udc1c".toCharArray();
char[] highSurrogateAt3 = "Abc\ud801".toCharArray();
- assertEquals((int) 'A', java4.codePointAt(cpAt3, 0));
- assertEquals((int) '\ud801', java4.codePointAt(cpAt3, 3));
- assertEquals((int) '\ud801', java4.codePointAt(highSurrogateAt3, 3));
+ assertEquals('A', java4.codePointAt(cpAt3, 0));
+ assertEquals('\ud801', java4.codePointAt(cpAt3, 3));
+ assertEquals('\ud801', java4.codePointAt(highSurrogateAt3, 3));
try {
java4.codePointAt(highSurrogateAt3, 4);
fail("array index out of bounds");
@@ -47,10 +47,10 @@
}
CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
- assertEquals((int) 'A', java5.codePointAt(cpAt3, 0));
+ assertEquals('A', java5.codePointAt(cpAt3, 0));
assertEquals(Character.toCodePoint('\ud801', '\udc1c'), java5.codePointAt(
cpAt3, 3));
- assertEquals((int) '\ud801', java5.codePointAt(highSurrogateAt3, 3));
+ assertEquals('\ud801', java5.codePointAt(highSurrogateAt3, 3));
try {
java5.codePointAt(highSurrogateAt3, 4);
fail("array index out of bounds");
@@ -63,9 +63,9 @@
CharacterUtils java4 = CharacterUtils.getInstance(Version.LUCENE_30);
String cpAt3 = "Abc\ud801\udc1c";
String highSurrogateAt3 = "Abc\ud801";
- assertEquals((int) 'A', java4.codePointAt(cpAt3, 0));
- assertEquals((int) '\ud801', java4.codePointAt(cpAt3, 3));
- assertEquals((int) '\ud801', java4.codePointAt(highSurrogateAt3, 3));
+ assertEquals('A', java4.codePointAt(cpAt3, 0));
+ assertEquals('\ud801', java4.codePointAt(cpAt3, 3));
+ assertEquals('\ud801', java4.codePointAt(highSurrogateAt3, 3));
try {
java4.codePointAt(highSurrogateAt3, 4);
fail("string index out of bounds");
@@ -73,10 +73,10 @@
}
CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
- assertEquals((int) 'A', java5.codePointAt(cpAt3, 0));
+ assertEquals('A', java5.codePointAt(cpAt3, 0));
assertEquals(Character.toCodePoint('\ud801', '\udc1c'), java5.codePointAt(
cpAt3, 3));
- assertEquals((int) '\ud801', java5.codePointAt(highSurrogateAt3, 3));
+ assertEquals('\ud801', java5.codePointAt(highSurrogateAt3, 3));
try {
java5.codePointAt(highSurrogateAt3, 4);
fail("string index out of bounds");
@@ -90,15 +90,15 @@
CharacterUtils java4 = CharacterUtils.getInstance(Version.LUCENE_30);
char[] cpAt3 = "Abc\ud801\udc1c".toCharArray();
char[] highSurrogateAt3 = "Abc\ud801".toCharArray();
- assertEquals((int) 'A', java4.codePointAt(cpAt3, 0, 2));
- assertEquals((int) '\ud801', java4.codePointAt(cpAt3, 3, 5));
- assertEquals((int) '\ud801', java4.codePointAt(highSurrogateAt3, 3, 4));
+ assertEquals('A', java4.codePointAt(cpAt3, 0, 2));
+ assertEquals('\ud801', java4.codePointAt(cpAt3, 3, 5));
+ assertEquals('\ud801', java4.codePointAt(highSurrogateAt3, 3, 4));
CharacterUtils java5 = CharacterUtils.getInstance(TEST_VERSION_CURRENT);
- assertEquals((int) 'A', java5.codePointAt(cpAt3, 0, 2));
+ assertEquals('A', java5.codePointAt(cpAt3, 0, 2));
assertEquals(Character.toCodePoint('\ud801', '\udc1c'), java5.codePointAt(
cpAt3, 3, 5));
- assertEquals((int) '\ud801', java5.codePointAt(highSurrogateAt3, 3, 4));
+ assertEquals('\ud801', java5.codePointAt(highSurrogateAt3, 3, 4));
}
Index: src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java
===================================================================
--- src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java (revision 916146)
+++ src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java (working copy)
@@ -78,30 +78,24 @@
}
@Override
- public void tearDown() throws Exception {
- super.tearDown();
+ protected void tearDown() throws Exception {
readerA.close();
readerB.close();
readerX.close();
+ super.tearDown();
}
public void testSanity() throws IOException {
FieldCache cache = FieldCache.DEFAULT;
cache.purgeAllCaches();
- double [] doubles;
- int [] ints;
+ cache.getDoubles(readerA, "theDouble");
+ cache.getDoubles(readerA, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER);
+ cache.getDoubles(readerB, "theDouble", FieldCache.DEFAULT_DOUBLE_PARSER);
- doubles = cache.getDoubles(readerA, "theDouble");
- doubles = cache.getDoubles(readerA, "theDouble",
- FieldCache.DEFAULT_DOUBLE_PARSER);
- doubles = cache.getDoubles(readerB, "theDouble",
- FieldCache.DEFAULT_DOUBLE_PARSER);
+ cache.getInts(readerX, "theInt");
+ cache.getInts(readerX, "theInt", FieldCache.DEFAULT_INT_PARSER);
- ints = cache.getInts(readerX, "theInt");
- ints = cache.getInts(readerX, "theInt",
- FieldCache.DEFAULT_INT_PARSER);
-
// // //
Insanity[] insanity =
@@ -118,16 +112,10 @@
FieldCache cache = FieldCache.DEFAULT;
cache.purgeAllCaches();
- int [] ints;
- String [] strings;
- byte [] bytes;
+ cache.getInts(readerX, "theInt", FieldCache.DEFAULT_INT_PARSER);
+ cache.getStrings(readerX, "theInt");
+ cache.getBytes(readerX, "theByte");
- ints = cache.getInts(readerX, "theInt", FieldCache.DEFAULT_INT_PARSER);
- strings = cache.getStrings(readerX, "theInt");
-
- // this one is ok
- bytes = cache.getBytes(readerX, "theByte");
-
// // //
Insanity[] insanity =
@@ -148,17 +136,13 @@
FieldCache cache = FieldCache.DEFAULT;
cache.purgeAllCaches();
- String [] strings;
- byte [] bytes;
+ cache.getStrings(readerA, "theString");
+ cache.getStrings(readerB, "theString");
+ cache.getStrings(readerX, "theString");
- strings = cache.getStrings(readerA, "theString");
- strings = cache.getStrings(readerB, "theString");
- strings = cache.getStrings(readerX, "theString");
+ cache.getBytes(readerX, "theByte");
- // this one is ok
- bytes = cache.getBytes(readerX, "theByte");
-
// // //
Insanity[] insanity =
Index: src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java
===================================================================
--- src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java (revision 916146)
+++ src/test/org/apache/lucene/util/TestIndexableBinaryStringTools.java (working copy)
@@ -338,7 +338,7 @@
public String binaryDump(byte[] binary, int numBytes) {
StringBuilder buf = new StringBuilder();
for (int byteNum = 0 ; byteNum < numBytes ; ++byteNum) {
- String hex = Integer.toHexString((int)binary[byteNum] & 0xFF);
+ String hex = Integer.toHexString(binary[byteNum] & 0xFF);
if (hex.length() == 1) {
buf.append('0');
}
@@ -359,7 +359,7 @@
public String charArrayDump(char[] charArray, int numBytes) {
StringBuilder buf = new StringBuilder();
for (int charNum = 0 ; charNum < numBytes ; ++charNum) {
- String hex = Integer.toHexString((int)charArray[charNum]);
+ String hex = Integer.toHexString(charArray[charNum]);
for (int digit = 0 ; digit < 4 - hex.length() ; ++digit) {
buf.append('0');
}
Index: src/test/org/apache/lucene/util/TestRamUsageEstimator.java
===================================================================
--- src/test/org/apache/lucene/util/TestRamUsageEstimator.java (revision 916146)
+++ src/test/org/apache/lucene/util/TestRamUsageEstimator.java (working copy)
@@ -22,23 +22,17 @@
public class TestRamUsageEstimator extends TestCase {
public void testBasic() {
- String string = new String("test str");
RamUsageEstimator rue = new RamUsageEstimator();
- long size = rue.estimateRamUsage(string);
- //System.out.println("size:" + size);
+ rue.estimateRamUsage("test str");
- string = new String("test strin");
- size = rue.estimateRamUsage(string);
- //System.out.println("size:" + size);
+ rue.estimateRamUsage("test strin");
Holder holder = new Holder();
holder.holder = new Holder("string2", 5000L);
- size = rue.estimateRamUsage(holder);
- //System.out.println("size:" + size);
+ rue.estimateRamUsage(holder);
String[] strings = new String[]{new String("test strin"), new String("hollow"), new String("catchmaster")};
- size = rue.estimateRamUsage(strings);
- //System.out.println("size:" + size);
+ rue.estimateRamUsage(strings);
}
private static final class Holder {
Index: src/test/org/apache/lucene/util/TestStringIntern.java
===================================================================
--- src/test/org/apache/lucene/util/TestStringIntern.java (revision 916146)
+++ src/test/org/apache/lucene/util/TestStringIntern.java (working copy)
@@ -17,8 +17,7 @@
package org.apache.lucene.util;
-import junit.framework.TestCase;
-
+import junit.framework.Assert;
import java.util.Random;
public class TestStringIntern extends LuceneTestCase {
@@ -48,7 +47,6 @@
int nThreads = 20;
// final int iter=100000;
final int iter=1000000;
- final boolean newStrings=true;
// try native intern
// StringHelper.interner = new StringInterner();
@@ -64,20 +62,20 @@
for (int j=0; j