Property changes on: .
___________________________________________________________________
Modified: svn:mergeinfo
Merged /lucene/java/branches/lucene_2_9:r829134
Index: CHANGES.txt
===================================================================
--- CHANGES.txt (revision 829134)
+++ CHANGES.txt (working copy)
@@ -137,6 +137,11 @@
* LUCENE-1183: Optimize Levenshtein Distance computation in
FuzzyQuery. (Cédrik Lime via Mike McCandless)
+ * LUCENE-2002: Add required Version matchVersion argument when
+ constructing QueryParser or MultiFieldQueryParser and, default (as
+ of 2.9) enablePositionIncrements to true to match
+ StandardAnalyzer's 2.9 default (Uwe Schindler, Mike McCandless)
+
Documentation
Build
Index: src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java
===================================================================
--- src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java (revision 829134)
+++ src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java (working copy)
@@ -31,6 +31,7 @@
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.search.Query;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.util.Version;
/**
* Test QueryParser's ability to deal with Analyzers that return more
@@ -44,7 +45,7 @@
public void testMultiAnalyzer() throws ParseException {
- QueryParser qp = new QueryParser("", new MultiAnalyzer());
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "", new MultiAnalyzer());
// trivial, no multiple tokens:
assertEquals("foo", qp.parse("foo").toString());
@@ -117,7 +118,7 @@
}
public void testPosIncrementAnalyzer() throws ParseException {
- QueryParser qp = new QueryParser("", new PosIncrementAnalyzer());
+ QueryParser qp = new QueryParser(Version.LUCENE_24, "", new PosIncrementAnalyzer());
assertEquals("quick brown", qp.parse("the quick brown").toString());
assertEquals("\"quick brown\"", qp.parse("\"the quick brown\"").toString());
assertEquals("quick brown fox", qp.parse("the quick brown fox").toString());
@@ -134,7 +135,7 @@
}
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new StandardTokenizer(reader);
+ TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
result = new TestFilter(result);
result = new LowerCaseFilter(result);
return result;
@@ -200,7 +201,7 @@
}
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new StandardTokenizer(reader);
+ TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
result = new TestPosIncrementFilter(result);
result = new LowerCaseFilter(result);
return result;
@@ -238,7 +239,7 @@
private final static class DumbQueryParser extends QueryParser {
public DumbQueryParser(String f, Analyzer a) {
- super(f, a);
+ super(Version.LUCENE_CURRENT, f, a);
}
/** expose super's version */
Index: src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java
===================================================================
--- src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java (revision 829134)
+++ src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java (working copy)
@@ -36,6 +36,7 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
+import org.apache.lucene.util.Version;
/**
* Tests QueryParser.
@@ -59,18 +60,18 @@
String[] fields = {"b", "t"};
Occur occur[] = {Occur.SHOULD, Occur.SHOULD};
TestQueryParser.QPTestAnalyzer a = new TestQueryParser.QPTestAnalyzer();
- MultiFieldQueryParser mfqp = new MultiFieldQueryParser(fields, a);
+ MultiFieldQueryParser mfqp = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, a);
Query q = mfqp.parse(qtxt);
assertEquals(expectedRes, q.toString());
- q = MultiFieldQueryParser.parse(qtxt, fields, occur, a);
+ q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, qtxt, fields, occur, a);
assertEquals(expectedRes, q.toString());
}
public void testSimple() throws Exception {
String[] fields = {"b", "t"};
- MultiFieldQueryParser mfqp = new MultiFieldQueryParser(fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ MultiFieldQueryParser mfqp = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
Query q = mfqp.parse("one");
assertEquals("b:one t:one", q.toString());
@@ -133,7 +134,7 @@
boosts.put("b", Float.valueOf(5));
boosts.put("t", Float.valueOf(10));
String[] fields = {"b", "t"};
- MultiFieldQueryParser mfqp = new MultiFieldQueryParser(fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), boosts);
+ MultiFieldQueryParser mfqp = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), boosts);
//Check for simple
@@ -159,24 +160,24 @@
public void testStaticMethod1() throws ParseException {
String[] fields = {"b", "t"};
String[] queries = {"one", "two"};
- Query q = MultiFieldQueryParser.parse(queries, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
assertEquals("b:one t:two", q.toString());
String[] queries2 = {"+one", "+two"};
- q = MultiFieldQueryParser.parse(queries2, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries2, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
assertEquals("(+b:one) (+t:two)", q.toString());
String[] queries3 = {"one", "+two"};
- q = MultiFieldQueryParser.parse(queries3, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries3, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
assertEquals("b:one (+t:two)", q.toString());
String[] queries4 = {"one +more", "+two"};
- q = MultiFieldQueryParser.parse(queries4, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries4, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
assertEquals("(b:one +b:more) (+t:two)", q.toString());
String[] queries5 = {"blah"};
try {
- q = MultiFieldQueryParser.parse(queries5, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries5, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
fail();
} catch(IllegalArgumentException e) {
// expected exception, array length differs
@@ -186,11 +187,11 @@
TestQueryParser.QPTestAnalyzer stopA = new TestQueryParser.QPTestAnalyzer();
String[] queries6 = {"((+stop))", "+((stop))"};
- q = MultiFieldQueryParser.parse(queries6, fields, stopA);
+ q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries6, fields, stopA);
assertEquals("", q.toString());
String[] queries7 = {"one ((+stop)) +more", "+((stop)) +two"};
- q = MultiFieldQueryParser.parse(queries7, fields, stopA);
+ q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries7, fields, stopA);
assertEquals("(b:one +b:more) (+t:two)", q.toString());
}
@@ -198,15 +199,15 @@
public void testStaticMethod2() throws ParseException {
String[] fields = {"b", "t"};
BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT};
- Query q = MultiFieldQueryParser.parse("one", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
assertEquals("+b:one -t:one", q.toString());
- q = MultiFieldQueryParser.parse("one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
try {
BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
- q = MultiFieldQueryParser.parse("blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
fail();
} catch(IllegalArgumentException e) {
// expected exception, array length differs
@@ -217,17 +218,17 @@
String[] fields = {"b", "t"};
//int[] flags = {MultiFieldQueryParser.REQUIRED_FIELD, MultiFieldQueryParser.PROHIBITED_FIELD};
BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT};
- MultiFieldQueryParser parser = new MultiFieldQueryParser(fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
- Query q = MultiFieldQueryParser.parse("one", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));//, fields, flags, new StandardAnalyzer());
+ Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));//, fields, flags, new StandardAnalyzer());
assertEquals("+b:one -t:one", q.toString());
- q = MultiFieldQueryParser.parse("one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
try {
BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
- q = MultiFieldQueryParser.parse("blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
fail();
} catch(IllegalArgumentException e) {
// expected exception, array length differs
@@ -239,12 +240,12 @@
String[] fields = {"f1", "f2", "f3"};
BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST,
BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD};
- Query q = MultiFieldQueryParser.parse(queries, fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
assertEquals("+f1:one -f2:two f3:three", q.toString());
try {
BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
- q = MultiFieldQueryParser.parse(queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
fail();
} catch(IllegalArgumentException e) {
// expected exception, array length differs
@@ -255,12 +256,12 @@
String[] queries = {"one", "two"};
String[] fields = {"b", "t"};
BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT};
- Query q = MultiFieldQueryParser.parse(queries, fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
assertEquals("+b:one -t:two", q.toString());
try {
BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
- q = MultiFieldQueryParser.parse(queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
fail();
} catch(IllegalArgumentException e) {
// expected exception, array length differs
@@ -269,7 +270,7 @@
public void testAnalyzerReturningNull() throws ParseException {
String[] fields = new String[] { "f1", "f2", "f3" };
- MultiFieldQueryParser parser = new MultiFieldQueryParser(fields, new AnalyzerReturningNull());
+ MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new AnalyzerReturningNull());
Query q = parser.parse("bla AND blo");
assertEquals("+(f2:bla f3:bla) +(f2:blo f3:blo)", q.toString());
// the following queries are not affected as their terms are not analyzed anyway:
@@ -291,7 +292,7 @@
iw.close();
MultiFieldQueryParser mfqp =
- new MultiFieldQueryParser(new String[] {"body"}, analyzer);
+ new MultiFieldQueryParser(Version.LUCENE_CURRENT, new String[] {"body"}, analyzer);
mfqp.setDefaultOperator(QueryParser.Operator.AND);
Query q = mfqp.parse("the footest");
IndexSearcher is = new IndexSearcher(ramDir, true);
Index: src/test/org/apache/lucene/queryParser/TestQueryParser.java
===================================================================
--- src/test/org/apache/lucene/queryParser/TestQueryParser.java (revision 829134)
+++ src/test/org/apache/lucene/queryParser/TestQueryParser.java (working copy)
@@ -47,6 +47,7 @@
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.FuzzyQuery;
@@ -60,7 +61,10 @@
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.util.LocalizedTestCase;
+import org.apache.lucene.util.Version;
/**
* Tests QueryParser.
@@ -127,7 +131,7 @@
public static class QPTestParser extends QueryParser {
public QPTestParser(String f, Analyzer a) {
- super(f, a);
+ super(Version.LUCENE_CURRENT, f, a);
}
protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException {
@@ -149,7 +153,7 @@
public QueryParser getParser(Analyzer a) throws Exception {
if (a == null)
a = new SimpleAnalyzer();
- QueryParser qp = new QueryParser("field", a);
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", a);
qp.setDefaultOperator(QueryParser.OR_OPERATOR);
return qp;
}
@@ -219,7 +223,7 @@
throws Exception {
if (a == null)
a = new SimpleAnalyzer();
- QueryParser qp = new QueryParser("field", a);
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", a);
qp.setDefaultOperator(QueryParser.AND_OPERATOR);
return qp.parse(query);
}
@@ -291,7 +295,7 @@
assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null,
"+(title:dog title:cat) -author:\"bob dole\"");
- QueryParser qp = new QueryParser("field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
// make sure OR is the default:
assertEquals(QueryParser.OR_OPERATOR, qp.getDefaultOperator());
qp.setDefaultOperator(QueryParser.AND_OPERATOR);
@@ -446,7 +450,7 @@
assertQueryEquals("[ a TO z]", null, "[a TO z]");
assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod());
- QueryParser qp = new QueryParser("field", new SimpleAnalyzer());
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new SimpleAnalyzer());
qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]")).getRewriteMethod());
@@ -472,7 +476,7 @@
iw.close();
IndexSearcher is = new IndexSearcher(ramDir, true);
- QueryParser qp = new QueryParser("content", new WhitespaceAnalyzer());
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "content", new WhitespaceAnalyzer());
// Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in
// RuleBasedCollator. However, the Arabic Locale seems to order the Farsi
@@ -569,7 +573,7 @@
final String defaultField = "default";
final String monthField = "month";
final String hourField = "hour";
- QueryParser qp = new QueryParser("field", new SimpleAnalyzer());
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new SimpleAnalyzer());
// Don't set any date resolution and verify if DateField is used
assertDateRangeQueryEquals(qp, defaultField, startDate, endDate,
@@ -792,7 +796,7 @@
Set stopWords = new HashSet(1);
stopWords.add("on");
StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, stopWords);
- QueryParser qp = new QueryParser("field", oneStopAnalyzer);
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", oneStopAnalyzer);
Query q = qp.parse("on^1.0");
assertNotNull(q);
q = qp.parse("\"hello\"^2.0");
@@ -804,7 +808,7 @@
q = qp.parse("\"on\"^1.0");
assertNotNull(q);
- QueryParser qp2 = new QueryParser("field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
+ QueryParser qp2 = new QueryParser(Version.LUCENE_CURRENT, "field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));
q = qp2.parse("the^3");
// "the" is a stop word so the result is an empty query:
assertNotNull(q);
@@ -852,7 +856,7 @@
public void testBooleanQuery() throws Exception {
BooleanQuery.setMaxClauseCount(2);
try {
- QueryParser qp = new QueryParser("field", new WhitespaceAnalyzer());
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer());
qp.parse("one two three");
fail("ParseException expected due to too many boolean clauses");
} catch (ParseException expected) {
@@ -864,7 +868,7 @@
* This test differs from TestPrecedenceQueryParser
*/
public void testPrecedence() throws Exception {
- QueryParser qp = new QueryParser("field", new WhitespaceAnalyzer());
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer());
Query query1 = qp.parse("A AND B OR C AND D");
Query query2 = qp.parse("+A +B +C +D");
assertEquals(query1, query2);
@@ -888,7 +892,7 @@
public void testStarParsing() throws Exception {
final int[] type = new int[1];
- QueryParser qp = new QueryParser("field", new WhitespaceAnalyzer()) {
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer()) {
protected Query getWildcardQuery(String field, String termStr) throws ParseException {
// override error checking of superclass
type[0]=1;
@@ -944,7 +948,7 @@
}
public void testStopwords() throws Exception {
- QueryParser qp = new QueryParser("a", new StopAnalyzer(StopFilter.makeStopSet("the", "foo"), true));
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "a", new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet("the", "foo")));
Query result = qp.parse("a:the OR a:foo");
assertNotNull("result is null and it shouldn't be", result);
assertTrue("result is not a BooleanQuery", result instanceof BooleanQuery);
@@ -960,7 +964,7 @@
}
public void testPositionIncrement() throws Exception {
- QueryParser qp = new QueryParser("a", new StopAnalyzer(StopFilter.makeStopSet("the", "in", "are", "this"), true));
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "a", new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet("the", "in", "are", "this")));
qp.setEnablePositionIncrements(true);
String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\"";
// 0 2 5 7 8
@@ -977,7 +981,7 @@
}
public void testMatchAllDocs() throws Exception {
- QueryParser qp = new QueryParser("field", new WhitespaceAnalyzer());
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer());
assertEquals(new MatchAllDocsQuery(), qp.parse("*:*"));
assertEquals(new MatchAllDocsQuery(), qp.parse("(*:*)"));
BooleanQuery bq = (BooleanQuery)qp.parse("+*:* -*:*");
@@ -986,7 +990,7 @@
}
private void assertHits(int expected, String query, IndexSearcher is) throws ParseException, IOException {
- QueryParser qp = new QueryParser("date", new WhitespaceAnalyzer());
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "date", new WhitespaceAnalyzer());
qp.setLocale(Locale.ENGLISH);
Query q = qp.parse(query);
ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs;
@@ -1008,4 +1012,49 @@
BooleanQuery.setMaxClauseCount(originalMaxClauses);
}
+ // LUCENE-2002: make sure defaults for StandardAnalyzer's
+ // enableStopPositionIncr & QueryParser's enablePosIncr
+ // "match"
+ public void testPositionIncrements() throws Exception {
+ Directory dir = new MockRAMDirectory();
+ Analyzer a = new StandardAnalyzer(Version.LUCENE_CURRENT);
+ IndexWriter w = new IndexWriter(dir, a, IndexWriter.MaxFieldLength.UNLIMITED);
+ Document doc = new Document();
+ doc.add(new Field("f", "the wizard of ozzy", Field.Store.NO, Field.Index.ANALYZED));
+ w.addDocument(doc);
+ IndexReader r = w.getReader();
+ w.close();
+ IndexSearcher s = new IndexSearcher(r);
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "f", a);
+ Query q = qp.parse("\"wizard of ozzy\"");
+ assertEquals(1, s.search(q, 1).totalHits);
+ r.close();
+ dir.close();
+ }
+
+ // LUCENE-2002: when we run javacc to regen QueryParser,
+ // we also run a replaceregexp step to fix 2 of the public
+ // ctors (change them to protected):
+ //
+ // protected QueryParser(CharStream stream)
+ //
+ // protected QueryParser(QueryParserTokenManager tm)
+ //
+ // This test is here as a safety, in case that ant step
+ // doesn't work for some reason.
+ public void testProtectedCtors() throws Exception {
+ try {
+ QueryParser.class.getConstructor(new Class[] {CharStream.class});
+ fail("please switch public QueryParser(CharStream) to be protected");
+ } catch (NoSuchMethodException nsme) {
+ // expected
+ }
+ try {
+ QueryParser.class.getConstructor(new Class[] {QueryParserTokenManager.class});
+ fail("please switch public QueryParser(QueryParserTokenManager) to be protected");
+ } catch (NoSuchMethodException nsme) {
+ // expected
+ }
+ }
+
}
Index: src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java (revision 829134)
+++ src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java (working copy)
@@ -5,6 +5,7 @@
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
+import org.apache.lucene.util.Version;
import java.io.StringReader;
@@ -108,15 +109,22 @@
}
public void testDomainNames() throws Exception {
- // Don't reuse a because we alter its state (setReplaceInvalidAcronym)
+ // Current lucene should not show the bug
StandardAnalyzer a2 = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT);
+
// domain names
assertAnalyzesTo(a2, "www.nutch.org", new String[]{"www.nutch.org"});
//Notice the trailing . See https://issues.apache.org/jira/browse/LUCENE-1068.
// the following should be recognized as HOST:
assertAnalyzesTo(a2, "www.nutch.org.", new String[]{ "www.nutch.org" }, new String[] { " In other words, all the query's terms must appear, but it doesn't matter in
* what fields they appear. In other words, all the query's terms must appear, but it doesn't matter in
* what fields they appear. NOTE: there is a new QueryParser in contrib, which matches
* the same syntax as this class, but is more modular,
* enabling substantial customization to how a query is created.
+ *
+ *
+ * NOTE: You must specify the required {@link Version}
+ * compatibility when creating QueryParser:
+ * NOTE: there is a new QueryParser in contrib, which matches
* the same syntax as this class, but is more modular,
* enabling substantial customization to how a query is created.
+ *
+ *
+ * NOTE: You must specify the required {@link Version}
+ * compatibility when creating QueryParser:
+ * Many applications have specific tokenizer needs. If this tokenizer does
* not suit your application, please consider copying this source code
* directory to your project and maintaining your own grammar-based tokenizer.
+ *
+ *
+ * You must specify the required {@link Version}
+ * compatibility when creating StandardAnalyzer:
+ * You must specify the required {@link Version}
+ * compatibility when creating StopAnalyzer:
+ * NOTE: This class uses the same {@link Version}
+ * dependent settings as {@link StandardAnalyzer}.
@@ -103,11 +104,13 @@
}
}
+ private final Version matchVersion;
+
/**
* Create a new SmartChineseAnalyzer, using the default stopword list.
*/
- public SmartChineseAnalyzer() {
- this(true);
+ public SmartChineseAnalyzer(Version matchVersion) {
+ this(matchVersion, true);
}
/**
@@ -121,9 +124,10 @@
*
* @param useDefaultStopWords true to use the default stopword list.
*/
- public SmartChineseAnalyzer(boolean useDefaultStopWords) {
+ public SmartChineseAnalyzer(Version matchVersion, boolean useDefaultStopWords) {
stopWords = useDefaultStopWords ? DefaultSetHolder.DEFAULT_STOP_SET
- : Collections.EMPTY_SET;
+ : Collections.EMPTY_SET;
+ this.matchVersion = matchVersion;
}
/**
@@ -135,8 +139,9 @@
*
+ *
*/
public class QueryParser implements QueryParserConstants {
@@ -123,7 +132,7 @@
boolean lowercaseExpandedTerms = true;
MultiTermQuery.RewriteMethod multiTermRewriteMethod = MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
boolean allowLeadingWildcard = false;
- boolean enablePositionIncrements = false;
+ boolean enablePositionIncrements = true;
Analyzer analyzer;
String field;
@@ -147,13 +156,19 @@
static public enum Operator { OR, AND }
/** Constructs a query parser.
+ * @param matchVersion Lucene version to match. See above)
* @param f the default field for query terms.
* @param a used to find terms in the query text.
*/
- public QueryParser(String f, Analyzer a) {
+ public QueryParser(Version matchVersion, String f, Analyzer a) {
this(new FastCharStream(new StringReader("")));
analyzer = a;
field = f;
+ if (matchVersion.onOrAfter(Version.LUCENE_29)) {
+ enablePositionIncrements = true;
+ } else {
+ enablePositionIncrements = false;
+ }
}
/** Parses a query string, returning a {@link org.apache.lucene.search.Query}.
@@ -1077,7 +1092,7 @@
System.out.println("Usage: java org.apache.lucene.queryParser.QueryParser ");
System.exit(0);
}
- QueryParser qp = new QueryParser("field",
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field",
new org.apache.lucene.analysis.SimpleAnalyzer());
Query q = qp.parse(args[0]);
System.out.println(q.toString("field"));
@@ -1513,12 +1528,6 @@
finally { jj_save(0, xla); }
}
- private boolean jj_3R_3() {
- if (jj_scan_token(STAR)) return true;
- if (jj_scan_token(COLON)) return true;
- return false;
- }
-
private boolean jj_3R_2() {
if (jj_scan_token(TERM)) return true;
if (jj_scan_token(COLON)) return true;
@@ -1535,6 +1544,12 @@
return false;
}
+ private boolean jj_3R_3() {
+ if (jj_scan_token(STAR)) return true;
+ if (jj_scan_token(COLON)) return true;
+ return false;
+ }
+
/** Generated Token Manager. */
public QueryParserTokenManager token_source;
/** Current token. */
@@ -1563,7 +1578,7 @@
private int jj_gc = 0;
/** Constructor with user supplied CharStream. */
- public QueryParser(CharStream stream) {
+ protected QueryParser(CharStream stream) {
token_source = new QueryParserTokenManager(stream);
token = new Token();
jj_ntk = -1;
@@ -1583,7 +1598,7 @@
}
/** Constructor with generated Token Manager. */
- public QueryParser(QueryParserTokenManager tm) {
+ protected QueryParser(QueryParserTokenManager tm) {
token_source = tm;
token = new Token();
jj_ntk = -1;
Index: src/java/org/apache/lucene/queryParser/QueryParser.jj
===================================================================
--- src/java/org/apache/lucene/queryParser/QueryParser.jj (revision 829134)
+++ src/java/org/apache/lucene/queryParser/QueryParser.jj (working copy)
@@ -57,6 +57,7 @@
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.WildcardQuery;
+import org.apache.lucene.util.Version;
/**
* This class is generated by JavaCC. The most important method is
@@ -123,6 +124,14 @@
*
+ *
*/
public class QueryParser {
@@ -147,7 +156,7 @@
boolean lowercaseExpandedTerms = true;
MultiTermQuery.RewriteMethod multiTermRewriteMethod = MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
boolean allowLeadingWildcard = false;
- boolean enablePositionIncrements = false;
+ boolean enablePositionIncrements = true;
Analyzer analyzer;
String field;
@@ -171,13 +180,19 @@
static public enum Operator { OR, AND }
/** Constructs a query parser.
+ * @param matchVersion Lucene version to match. See {@link above)
* @param f the default field for query terms.
* @param a used to find terms in the query text.
*/
- public QueryParser(String f, Analyzer a) {
+ public QueryParser(Version matchVersion, String f, Analyzer a) {
this(new FastCharStream(new StringReader("")));
analyzer = a;
field = f;
+ if (matchVersion.onOrAfter(Version.LUCENE_29)) {
+ enablePositionIncrements = true;
+ } else {
+ enablePositionIncrements = false;
+ }
}
/** Parses a query string, returning a {@link org.apache.lucene.search.Query}.
@@ -1101,7 +1116,7 @@
System.out.println("Usage: java org.apache.lucene.queryParser.QueryParser ");
System.exit(0);
}
- QueryParser qp = new QueryParser("field",
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field",
new org.apache.lucene.analysis.SimpleAnalyzer());
Query q = qp.parse(args[0]);
System.out.println(q.toString("field"));
Index: src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java
===================================================================
--- src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java (revision 829134)
+++ src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java (working copy)
@@ -32,6 +32,7 @@
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.WildcardQuery;
+import org.apache.lucene.util.Version;
/** Token Manager. */
public class QueryParserTokenManager implements QueryParserConstants
Index: src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java
===================================================================
--- src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java (revision 829134)
+++ src/java/org/apache/lucene/analysis/standard/StandardAnalyzer.java (working copy)
@@ -35,7 +35,7 @@
* compatibility when creating StandardAnalyzer:
*
*
@@ -52,6 +52,7 @@
/** An unmodifiable set containing some common English words that are usually not
useful for searching. */
public static final Set> STOP_WORDS_SET = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
+ private final Version matchVersion;
/** Builds an analyzer with the default stop words ({@link
* #STOP_WORDS_SET}).
@@ -71,6 +72,7 @@
setOverridesTokenStreamMethod(StandardAnalyzer.class);
enableStopPositionIncrements = matchVersion.onOrAfter(Version.LUCENE_29);
replaceInvalidAcronym = matchVersion.onOrAfter(Version.LUCENE_24);
+ this.matchVersion = matchVersion;
}
/** Builds an analyzer with the stop words from the given file.
@@ -94,11 +96,12 @@
/** Constructs a {@link StandardTokenizer} filtered by a {@link
StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}. */
public TokenStream tokenStream(String fieldName, Reader reader) {
- StandardTokenizer tokenStream = new StandardTokenizer(reader, replaceInvalidAcronym);
+ StandardTokenizer tokenStream = new StandardTokenizer(matchVersion, reader);
tokenStream.setMaxTokenLength(maxTokenLength);
TokenStream result = new StandardFilter(tokenStream);
result = new LowerCaseFilter(result);
- result = new StopFilter(enableStopPositionIncrements, result, stopSet);
+ result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion),
+ result, stopSet);
return result;
}
@@ -140,10 +143,11 @@
if (streams == null) {
streams = new SavedStreams();
setPreviousTokenStream(streams);
- streams.tokenStream = new StandardTokenizer(reader);
+ streams.tokenStream = new StandardTokenizer(matchVersion, reader);
streams.filteredTokenStream = new StandardFilter(streams.tokenStream);
streams.filteredTokenStream = new LowerCaseFilter(streams.filteredTokenStream);
- streams.filteredTokenStream = new StopFilter(enableStopPositionIncrements, streams.filteredTokenStream, stopSet);
+ streams.filteredTokenStream = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion),
+ streams.filteredTokenStream, stopSet);
} else {
streams.tokenStream.reset(reader);
}
Index: src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java
===================================================================
--- src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java (revision 829134)
+++ src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java (working copy)
@@ -27,6 +27,7 @@
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.AttributeSource;
+import org.apache.lucene.util.Version;
/** A grammar-based tokenizer constructed with JFlex
*
@@ -43,6 +44,14 @@
*
+ *
*/
public final class StandardTokenizer extends Tokenizer {
@@ -105,55 +114,50 @@
}
/**
- * Creates a new instance of the {@link StandardTokenizer}. Attaches the
- * input to a newly created JFlex scanner.
- */
- public StandardTokenizer(Reader input) {
- this(input, false);
- }
-
- /**
* Creates a new instance of the {@link org.apache.lucene.analysis.standard.StandardTokenizer}. Attaches
* the input to the newly created JFlex scanner.
*
* @param input The input reader
- * @param replaceInvalidAcronym Set to true to replace mischaracterized acronyms with HOST.
*
* See http://issues.apache.org/jira/browse/LUCENE-1068
*/
- public StandardTokenizer(Reader input, boolean replaceInvalidAcronym) {
+ public StandardTokenizer(Version matchVersion, Reader input) {
super();
this.scanner = new StandardTokenizerImpl(input);
- init(input, replaceInvalidAcronym);
+ init(input, matchVersion);
}
/**
* Creates a new StandardTokenizer with a given {@link AttributeSource}.
*/
- public StandardTokenizer(AttributeSource source, Reader input, boolean replaceInvalidAcronym) {
+ public StandardTokenizer(Version matchVersion, AttributeSource source, Reader input) {
super(source);
this.scanner = new StandardTokenizerImpl(input);
- init(input, replaceInvalidAcronym);
+ init(input, matchVersion);
}
/**
* Creates a new StandardTokenizer with a given {@link org.apache.lucene.util.AttributeSource.AttributeFactory}
*/
- public StandardTokenizer(AttributeFactory factory, Reader input, boolean replaceInvalidAcronym) {
+ public StandardTokenizer(Version matchVersion, AttributeFactory factory, Reader input) {
super(factory);
this.scanner = new StandardTokenizerImpl(input);
- init(input, replaceInvalidAcronym);
+ init(input, matchVersion);
}
- private void init(Reader input, boolean replaceInvalidAcronym) {
- this.replaceInvalidAcronym = replaceInvalidAcronym;
+ private void init(Reader input, Version matchVersion) {
+ if (matchVersion.onOrAfter(Version.LUCENE_24)) {
+ replaceInvalidAcronym = true;
+ } else {
+ replaceInvalidAcronym = false;
+ }
this.input = input;
termAtt = addAttribute(TermAttribute.class);
offsetAtt = addAttribute(OffsetAttribute.class);
posIncrAtt = addAttribute(PositionIncrementAttribute.class);
typeAtt = addAttribute(TypeAttribute.class);
}
-
+
// this tokenizer generates three attributes:
// offset, positionIncrement and type
private TermAttribute termAtt;
Index: src/java/org/apache/lucene/analysis/StopFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/StopFilter.java (revision 829134)
+++ src/java/org/apache/lucene/analysis/StopFilter.java (working copy)
@@ -25,6 +25,7 @@
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.queryParser.QueryParser; // for javadoc
+import org.apache.lucene.util.Version;
/**
* Removes stop words from a token stream.
@@ -151,6 +152,21 @@
}
/**
+ * Returns version-dependent default for
+ * enablePositionIncrements. Analyzers that embed
+ * StopFilter use this method when creating the
+ * StopFilter. Prior to 2.9, this returns false. On 2.9
+ * or later, it returns true.
+ */
+ public static boolean getEnablePositionIncrementsVersionDefault(Version matchVersion) {
+ if (matchVersion.onOrAfter(Version.LUCENE_29)) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ /**
* @see #setEnablePositionIncrements(boolean).
*/
public boolean getEnablePositionIncrements() {
Index: src/java/org/apache/lucene/analysis/StopAnalyzer.java
===================================================================
--- src/java/org/apache/lucene/analysis/StopAnalyzer.java (revision 829134)
+++ src/java/org/apache/lucene/analysis/StopAnalyzer.java (working copy)
@@ -24,8 +24,18 @@
import java.util.Set;
import java.util.List;
-/** Filters {@link LetterTokenizer} with {@link LowerCaseFilter} and {@link StopFilter}. */
+import org.apache.lucene.util.Version;
+/** Filters {@link LetterTokenizer} with {@link LowerCaseFilter} and {@link StopFilter}.
+ *
+ *
+ *
+ *
+*/
+
public final class StopAnalyzer extends Analyzer {
private final Set> stopWords;
private final boolean enablePositionIncrements;
@@ -49,40 +59,39 @@
/** Builds an analyzer which removes words in
* {@link #ENGLISH_STOP_WORDS}.
- * @param enablePositionIncrements See {@link
- * StopFilter#setEnablePositionIncrements} */
- public StopAnalyzer(boolean enablePositionIncrements) {
+ * @param matchVersion See above
+ */
+ public StopAnalyzer(Version matchVersion) {
stopWords = ENGLISH_STOP_WORDS_SET;
- this.enablePositionIncrements = enablePositionIncrements;
+ enablePositionIncrements = StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion);
}
/** Builds an analyzer with the stop words from the given set.
+ * @param matchVersion See above
* @param stopWords Set of stop words
* @param enablePositionIncrements See {@link
* StopFilter#setEnablePositionIncrements} */
- public StopAnalyzer(Set> stopWords, boolean enablePositionIncrements) {
+ public StopAnalyzer(Version matchVersion, Set> stopWords) {
this.stopWords = stopWords;
- this.enablePositionIncrements = enablePositionIncrements;
+ enablePositionIncrements = StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion);
}
/** Builds an analyzer with the stop words from the given file.
* @see WordlistLoader#getWordSet(File)
- * @param stopwordsFile File to load stop words from
- * @param enablePositionIncrements See {@link
- * StopFilter#setEnablePositionIncrements} */
- public StopAnalyzer(File stopwordsFile, boolean enablePositionIncrements) throws IOException {
+ * @param matchVersion See above
+ * @param stopwordsFile File to load stop words from */
+ public StopAnalyzer(Version matchVersion, File stopwordsFile) throws IOException {
stopWords = WordlistLoader.getWordSet(stopwordsFile);
- this.enablePositionIncrements = enablePositionIncrements;
+ this.enablePositionIncrements = StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion);
}
/** Builds an analyzer with the stop words from the given reader.
* @see WordlistLoader#getWordSet(Reader)
- * @param stopwords Reader to load stop words from
- * @param enablePositionIncrements See {@link
- * StopFilter#setEnablePositionIncrements} */
- public StopAnalyzer(Reader stopwords, boolean enablePositionIncrements) throws IOException {
+ * @param matchVersion See above
+ * @param stopwords Reader to load stop words from */
+ public StopAnalyzer(Version matchVersion, Reader stopwords) throws IOException {
stopWords = WordlistLoader.getWordSet(stopwords);
- this.enablePositionIncrements = enablePositionIncrements;
+ this.enablePositionIncrements = StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion);
}
/** Filters LowerCaseTokenizer with StopFilter. */
Index: src/demo/org/apache/lucene/demo/SearchFiles.java
===================================================================
--- src/demo/org/apache/lucene/demo/SearchFiles.java (revision 829134)
+++ src/demo/org/apache/lucene/demo/SearchFiles.java (working copy)
@@ -127,7 +127,7 @@
} else {
in = new BufferedReader(new InputStreamReader(System.in, "UTF-8"));
}
- QueryParser parser = new QueryParser(field, analyzer);
+ QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, field, analyzer);
while (true) {
if (queries == null) // prompt the user
System.out.println("Enter query: ");
Index: build.xml
===================================================================
--- build.xml (revision 829134)
+++ build.xml (working copy)
@@ -580,9 +580,21 @@
FieldTermStack is a stack that keeps query terms in the specified field
@@ -50,7 +51,7 @@
public static void main( String[] args ) throws Exception {
Analyzer analyzer = new WhitespaceAnalyzer();
- QueryParser parser = new QueryParser( "f", analyzer );
+ QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "f", analyzer );
Query query = parser.parse( "a x:b" );
FieldQuery fieldQuery = new FieldQuery( query, true, false );
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java (revision 829134)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/quality/utils/SimpleQQParser.java (working copy)
@@ -50,7 +50,7 @@
public Query parse(QualityQuery qq) throws ParseException {
QueryParser qp = queryParser.get();
if (qp==null) {
- qp = new QueryParser(indexField, new StandardAnalyzer(Version.LUCENE_CURRENT));
+ qp = new QueryParser(Version.LUCENE_CURRENT, indexField, new StandardAnalyzer(Version.LUCENE_CURRENT));
queryParser.set(qp);
}
return qp.parse(qq.getValue(qqName));
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java (revision 829134)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/ReutersQueryMaker.java (working copy)
@@ -27,6 +27,7 @@
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
+import org.apache.lucene.util.Version;
import java.util.ArrayList;
import java.util.Arrays;
@@ -72,7 +73,7 @@
* @return array of Lucene queries
*/
private static Query[] createQueries(List qs, Analyzer a) {
- QueryParser qp = new QueryParser(DocMaker.BODY_FIELD, a);
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, DocMaker.BODY_FIELD, a);
List queries = new ArrayList();
for (int i = 0; i < qs.size(); i++) {
try {
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java (revision 829134)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/SimpleQueryMaker.java (working copy)
@@ -25,6 +25,7 @@
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
+import org.apache.lucene.util.Version;
import java.util.ArrayList;
@@ -46,7 +47,7 @@
Analyzer anlzr= NewAnalyzerTask.createAnalyzer(config.get("analyzer",
"org.apache.lucene.analysis.standard.StandardAnalyzer"));
- QueryParser qp = new QueryParser(DocMaker.BODY_FIELD,anlzr);
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, DocMaker.BODY_FIELD,anlzr);
ArrayList qq = new ArrayList();
Query q1 = new TermQuery(new Term(DocMaker.ID_FIELD,"doc2"));
qq.add(q1);
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java (revision 829134)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/FileBasedQueryMaker.java (working copy)
@@ -5,6 +5,7 @@
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.search.Query;
import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
+import org.apache.lucene.util.Version;
import java.io.*;
import java.util.ArrayList;
@@ -48,7 +49,7 @@
Analyzer anlzr = NewAnalyzerTask.createAnalyzer(config.get("analyzer",
"org.apache.lucene.analysis.standard.StandardAnalyzer"));
String defaultField = config.get("file.query.maker.default.field", DocMaker.BODY_FIELD);
- QueryParser qp = new QueryParser(defaultField, anlzr);
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, defaultField, anlzr);
List qq = new ArrayList();
String fileName = config.get("file.query.maker.file", null);
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java (revision 829134)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/EnwikiQueryMaker.java (working copy)
@@ -33,6 +33,7 @@
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.benchmark.byTask.tasks.NewAnalyzerTask;
+import org.apache.lucene.util.Version;
/**
* A QueryMaker that uses common and uncommon actual Wikipedia queries for
@@ -92,7 +93,7 @@
* @return array of Lucene queries
*/
private static Query[] createQueries(List qs, Analyzer a) {
- QueryParser qp = new QueryParser(DocMaker.BODY_FIELD, a);
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, DocMaker.BODY_FIELD, a);
List queries = new ArrayList();
for (int i = 0; i < qs.size(); i++) {
try {
Index: contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java
===================================================================
--- contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (revision 829134)
+++ contrib/misc/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (working copy)
@@ -33,6 +33,7 @@
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.Version;
public class TestComplexPhraseQuery extends TestCase {
@@ -71,7 +72,7 @@
}
private void checkBadQuery(String qString) {
- QueryParser qp = new ComplexPhraseQueryParser(defaultFieldName, analyzer);
+ QueryParser qp = new ComplexPhraseQueryParser(Version.LUCENE_CURRENT, defaultFieldName, analyzer);
Throwable expected = null;
try {
qp.parse(qString);
@@ -84,7 +85,7 @@
private void checkMatches(String qString, String expectedVals)
throws Exception {
- QueryParser qp = new ComplexPhraseQueryParser(defaultFieldName, analyzer);
+ QueryParser qp = new ComplexPhraseQueryParser(Version.LUCENE_CURRENT, defaultFieldName, analyzer);
qp.setFuzzyPrefixLength(1); // usually a good idea
Query q = qp.parse(qString);
Index: contrib/misc/src/test/org/apache/lucene/queryParser/analyzing/TestAnalyzingQueryParser.java
===================================================================
--- contrib/misc/src/test/org/apache/lucene/queryParser/analyzing/TestAnalyzingQueryParser.java (revision 829134)
+++ contrib/misc/src/test/org/apache/lucene/queryParser/analyzing/TestAnalyzingQueryParser.java (working copy)
@@ -28,6 +28,7 @@
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.queryParser.ParseException;
+import org.apache.lucene.util.Version;
/**
* @version $Revision$, $Date$
@@ -97,7 +98,7 @@
}
private String parseWithAnalyzingQueryParser(String s, Analyzer a) throws ParseException {
- AnalyzingQueryParser qp = new AnalyzingQueryParser("field", a);
+ AnalyzingQueryParser qp = new AnalyzingQueryParser(Version.LUCENE_CURRENT, "field", a);
org.apache.lucene.search.Query q = qp.parse(s);
return q.toString("field");
}
@@ -109,7 +110,7 @@
}
public TokenStream tokenStream(String fieldName, Reader reader) {
- TokenStream result = new StandardTokenizer(reader);
+ TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader);
result = new StandardFilter(result);
result = new ASCIIFoldingFilter(result);
result = new LowerCaseFilter(result);
Index: contrib/misc/src/java/org/apache/lucene/queryParser/complexPhrase/ComplexPhraseQueryParser.java
===================================================================
--- contrib/misc/src/java/org/apache/lucene/queryParser/complexPhrase/ComplexPhraseQueryParser.java (revision 829134)
+++ contrib/misc/src/java/org/apache/lucene/queryParser/complexPhrase/ComplexPhraseQueryParser.java (working copy)
@@ -38,6 +38,7 @@
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
+import org.apache.lucene.util.Version;
/**
* QueryParser which permits complex phrase query syntax eg "(john jon
@@ -67,8 +68,8 @@
private ComplexPhraseQuery currentPhraseQuery = null;
- public ComplexPhraseQueryParser(String f, Analyzer a) {
- super(f, a);
+ public ComplexPhraseQueryParser(Version matchVersion, String f, Analyzer a) {
+ super(matchVersion, f, a);
}
protected Query getFieldQuery(String field, String queryText, int slop) {
Index: contrib/misc/src/java/org/apache/lucene/queryParser/analyzing/AnalyzingQueryParser.java
===================================================================
--- contrib/misc/src/java/org/apache/lucene/queryParser/analyzing/AnalyzingQueryParser.java (revision 829134)
+++ contrib/misc/src/java/org/apache/lucene/queryParser/analyzing/AnalyzingQueryParser.java (working copy)
@@ -28,6 +28,7 @@
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.search.Query;
+import org.apache.lucene.util.Version;
/**
* Overrides Lucene's default QueryParser so that Fuzzy-, Prefix-, Range-, and WildcardQuerys
@@ -49,8 +50,8 @@
* @param field the default field for query terms.
* @param analyzer used to find terms in the query text.
*/
- public AnalyzingQueryParser(String field, Analyzer analyzer) {
- super(field, analyzer);
+ public AnalyzingQueryParser(Version matchVersion, String field, Analyzer analyzer) {
+ super(matchVersion, field, analyzer);
}
/**
Index: contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/UserInputQueryBuilder.java
===================================================================
--- contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/UserInputQueryBuilder.java (revision 829134)
+++ contrib/xml-query-parser/src/java/org/apache/lucene/xmlparser/builders/UserInputQueryBuilder.java (working copy)
@@ -8,6 +8,7 @@
import org.apache.lucene.xmlparser.ParserException;
import org.apache.lucene.xmlparser.QueryBuilder;
import org.w3c.dom.Element;
+import org.apache.lucene.util.Version;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
@@ -88,7 +89,7 @@
*/
protected QueryParser createQueryParser(String fieldName, Analyzer analyzer)
{
- return new QueryParser(fieldName,analyzer);
+ return new QueryParser(Version.LUCENE_CURRENT, fieldName,analyzer);
}
}
Index: contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
===================================================================
--- contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (revision 829134)
+++ contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (working copy)
@@ -113,7 +113,7 @@
public void testQueryScorerHits() throws Exception {
Analyzer analyzer = new SimpleAnalyzer();
- QueryParser qp = new QueryParser(FIELD_NAME, analyzer);
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, FIELD_NAME, analyzer);
query = qp.parse("\"very long\"");
searcher = new IndexSearcher(ramDir, true);
TopDocs hits = searcher.search(query, 10);
@@ -143,7 +143,7 @@
String s1 = "I call our world Flatland, not because we call it so,";
- QueryParser parser = new QueryParser(FIELD_NAME, new StandardAnalyzer(TEST_VERSION));
+ QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, FIELD_NAME, new StandardAnalyzer(TEST_VERSION));
// Verify that a query against the default field results in text being
// highlighted
@@ -221,7 +221,7 @@
String q = "(" + f1c + ph1 + " OR " + f2c + ph1 + ") AND (" + f1c + ph2
+ " OR " + f2c + ph2 + ")";
Analyzer analyzer = new WhitespaceAnalyzer();
- QueryParser qp = new QueryParser(f1, analyzer);
+ QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, f1, analyzer);
Query query = qp.parse(q);
QueryScorer scorer = new QueryScorer(query, f1);
@@ -590,7 +590,7 @@
// Need to explicitly set the QueryParser property to use TermRangeQuery
// rather
// than RangeFilters
- QueryParser parser = new QueryParser(FIELD_NAME, analyzer);
+ QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, FIELD_NAME, analyzer);
parser.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
query = parser.parse(queryString);
doSearching(query);
@@ -930,7 +930,7 @@
String srchkey = "football";
String s = "football-soccer in the euro 2004 footie competition";
- QueryParser parser = new QueryParser("bookid", analyzer);
+ QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "bookid", analyzer);
Query query = parser.parse(srchkey);
TokenStream tokenStream = analyzer.tokenStream(null, new StringReader(s));
@@ -1111,7 +1111,7 @@
searcher = new IndexSearcher(ramDir, true);
Analyzer analyzer = new StandardAnalyzer(TEST_VERSION);
- QueryParser parser = new QueryParser(FIELD_NAME, analyzer);
+ QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, FIELD_NAME, analyzer);
Query query = parser.parse("JF? or Kenned*");
System.out.println("Searching with primitive query");
// forget to set this and...
@@ -1245,7 +1245,7 @@
searchers[0] = new IndexSearcher(ramDir1, true);
searchers[1] = new IndexSearcher(ramDir2, true);
MultiSearcher multiSearcher = new MultiSearcher(searchers);
- QueryParser parser = new QueryParser(FIELD_NAME, new StandardAnalyzer(TEST_VERSION));
+ QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, FIELD_NAME, new StandardAnalyzer(TEST_VERSION));
parser.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
query = parser.parse("multi*");
System.out.println("Searching for: " + query.toString(FIELD_NAME));
@@ -1278,7 +1278,7 @@
public void run() throws Exception {
String docMainText = "fred is one of the people";
- QueryParser parser = new QueryParser(FIELD_NAME, analyzer);
+ QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, FIELD_NAME, analyzer);
Query query = parser.parse("fred category:people");
// highlighting respects fieldnames used in query
@@ -1419,64 +1419,64 @@
Highlighter highlighter;
String result;
- query = new QueryParser("text", new WhitespaceAnalyzer()).parse("foo");
+ query = new QueryParser(Version.LUCENE_CURRENT, "text", new WhitespaceAnalyzer()).parse("foo");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed10 foo", result);
- query = new QueryParser("text", new WhitespaceAnalyzer()).parse("10");
+ query = new QueryParser(Version.LUCENE_CURRENT, "text", new WhitespaceAnalyzer()).parse("10");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed10 foo", result);
- query = new QueryParser("text", new WhitespaceAnalyzer()).parse("hi");
+ query = new QueryParser(Version.LUCENE_CURRENT, "text", new WhitespaceAnalyzer()).parse("hi");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed10 foo", result);
- query = new QueryParser("text", new WhitespaceAnalyzer()).parse("speed");
+ query = new QueryParser(Version.LUCENE_CURRENT, "text", new WhitespaceAnalyzer()).parse("speed");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed10 foo", result);
- query = new QueryParser("text", new WhitespaceAnalyzer()).parse("hispeed");
+ query = new QueryParser(Version.LUCENE_CURRENT, "text", new WhitespaceAnalyzer()).parse("hispeed");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed10 foo", result);
- query = new QueryParser("text", new WhitespaceAnalyzer()).parse("hi speed");
+ query = new QueryParser(Version.LUCENE_CURRENT, "text", new WhitespaceAnalyzer()).parse("hi speed");
highlighter = getHighlighter(query, "text", getTS2(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2(), s, 3, "...");
assertEquals("Hi-Speed10 foo", result);
// ///////////////// same tests, just put the bigger overlapping token
// first
- query = new QueryParser("text", new WhitespaceAnalyzer()).parse("foo");
+ query = new QueryParser(Version.LUCENE_CURRENT, "text", new WhitespaceAnalyzer()).parse("foo");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed10 foo", result);
- query = new QueryParser("text", new WhitespaceAnalyzer()).parse("10");
+ query = new QueryParser(Version.LUCENE_CURRENT, "text", new WhitespaceAnalyzer()).parse("10");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed10 foo", result);
- query = new QueryParser("text", new WhitespaceAnalyzer()).parse("hi");
+ query = new QueryParser(Version.LUCENE_CURRENT, "text", new WhitespaceAnalyzer()).parse("hi");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed10 foo", result);
- query = new QueryParser("text", new WhitespaceAnalyzer()).parse("speed");
+ query = new QueryParser(Version.LUCENE_CURRENT, "text", new WhitespaceAnalyzer()).parse("speed");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed10 foo", result);
- query = new QueryParser("text", new WhitespaceAnalyzer()).parse("hispeed");
+ query = new QueryParser(Version.LUCENE_CURRENT, "text", new WhitespaceAnalyzer()).parse("hispeed");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed10 foo", result);
- query = new QueryParser("text", new WhitespaceAnalyzer()).parse("hi speed");
+ query = new QueryParser(Version.LUCENE_CURRENT, "text", new WhitespaceAnalyzer()).parse("hi speed");
highlighter = getHighlighter(query, "text", getTS2a(), HighlighterTest.this);
result = highlighter.getBestFragments(getTS2a(), s, 3, "...");
assertEquals("Hi-Speed10 foo", result);
@@ -1521,7 +1521,7 @@
private void searchIndex() throws IOException, ParseException, InvalidTokenOffsetsException {
String q = "t_text1:random";
- QueryParser parser = new QueryParser( "t_text1", a );
+ QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "t_text1", a );
Query query = parser.parse( q );
IndexSearcher searcher = new IndexSearcher( dir, true );
// This scorer can return negative idf -> null fragment
@@ -1575,7 +1575,7 @@
}
public void doSearching(String queryString) throws Exception {
- QueryParser parser = new QueryParser(FIELD_NAME, analyzer);
+ QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, FIELD_NAME, analyzer);
parser.setEnablePositionIncrements(true);
parser.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
query = parser.parse(queryString);
Index: contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
===================================================================
--- contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (revision 829134)
+++ contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (working copy)
@@ -53,6 +53,7 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.TermDocs;
+import org.apache.lucene.util.Version;
/**
Verifies that Lucene MemoryIndex and RAMDirectory have the same behaviour,
@@ -277,7 +278,7 @@
Analyzer[] analyzers = new Analyzer[] {
new SimpleAnalyzer(),
- new StopAnalyzer(true),
+ new StopAnalyzer(Version.LUCENE_CURRENT),
new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT),
PatternAnalyzer.DEFAULT_ANALYZER,
// new WhitespaceAnalyzer(),
@@ -480,7 +481,7 @@
}
private Query parseQuery(String expression) throws ParseException {
- QueryParser parser = new QueryParser(FIELD_NAME, analyzer);
+ QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, FIELD_NAME, analyzer);
// parser.setPhraseSlop(0);
return parser.parse(expression);
}
Index: contrib/memory/src/test/org/apache/lucene/index/memory/PatternAnalyzerTest.java
===================================================================
--- contrib/memory/src/test/org/apache/lucene/index/memory/PatternAnalyzerTest.java (revision 829134)
+++ contrib/memory/src/test/org/apache/lucene/index/memory/PatternAnalyzerTest.java (working copy)
@@ -24,6 +24,7 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.util.Version;
/**
* Verifies the behavior of PatternAnalyzer.
@@ -36,13 +37,13 @@
*/
public void testNonWordPattern() throws IOException {
// Split on non-letter pattern, do not lowercase, no stopwords
- PatternAnalyzer a = new PatternAnalyzer(PatternAnalyzer.NON_WORD_PATTERN,
+ PatternAnalyzer a = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.NON_WORD_PATTERN,
false, null);
check(a, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] {
"The", "quick", "brown", "Fox", "the", "abcd", "dc" });
// split on non-letter pattern, lowercase, english stopwords
- PatternAnalyzer b = new PatternAnalyzer(PatternAnalyzer.NON_WORD_PATTERN,
+ PatternAnalyzer b = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.NON_WORD_PATTERN,
true, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
check(b, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] {
"quick", "brown", "fox", "abcd", "dc" });
@@ -54,13 +55,13 @@
*/
public void testWhitespacePattern() throws IOException {
// Split on whitespace patterns, do not lowercase, no stopwords
- PatternAnalyzer a = new PatternAnalyzer(PatternAnalyzer.WHITESPACE_PATTERN,
+ PatternAnalyzer a = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
false, null);
check(a, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] {
"The", "quick", "brown", "Fox,the", "abcd1234", "(56.78)", "dc." });
// Split on whitespace patterns, lowercase, english stopwords
- PatternAnalyzer b = new PatternAnalyzer(PatternAnalyzer.WHITESPACE_PATTERN,
+ PatternAnalyzer b = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
true, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
check(b, "The quick brown Fox,the abcd1234 (56.78) dc.", new String[] {
"quick", "brown", "fox,the", "abcd1234", "(56.78)", "dc." });
@@ -72,12 +73,12 @@
*/
public void testCustomPattern() throws IOException {
// Split on comma, do not lowercase, no stopwords
- PatternAnalyzer a = new PatternAnalyzer(Pattern.compile(","), false, null);
+ PatternAnalyzer a = new PatternAnalyzer(Version.LUCENE_CURRENT, Pattern.compile(","), false, null);
check(a, "Here,Are,some,Comma,separated,words,", new String[] { "Here",
"Are", "some", "Comma", "separated", "words" });
// split on comma, lowercase, english stopwords
- PatternAnalyzer b = new PatternAnalyzer(Pattern.compile(","), true,
+ PatternAnalyzer b = new PatternAnalyzer(Version.LUCENE_CURRENT, Pattern.compile(","), true,
StopAnalyzer.ENGLISH_STOP_WORDS_SET);
check(b, "Here,Are,some,Comma,separated,words,", new String[] { "here",
"some", "comma", "separated", "words" });
@@ -102,7 +103,7 @@
document.append(largeWord2);
// Split on whitespace patterns, do not lowercase, no stopwords
- PatternAnalyzer a = new PatternAnalyzer(PatternAnalyzer.WHITESPACE_PATTERN,
+ PatternAnalyzer a = new PatternAnalyzer(Version.LUCENE_CURRENT, PatternAnalyzer.WHITESPACE_PATTERN,
false, null);
check(a, document.toString(), new String[] { new String(largeWord),
new String(largeWord2) });
Index: contrib/memory/src/java/org/apache/lucene/index/memory/PatternAnalyzer.java
===================================================================
--- contrib/memory/src/java/org/apache/lucene/index/memory/PatternAnalyzer.java (revision 829134)
+++ contrib/memory/src/java/org/apache/lucene/index/memory/PatternAnalyzer.java (working copy)
@@ -33,6 +33,7 @@
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
+import org.apache.lucene.util.Version;
/**
* Efficient Lucene analyzer/tokenizer that preferably operates on a String rather than a
@@ -124,7 +125,7 @@
* freely across threads without harm); global per class loader.
*/
public static final PatternAnalyzer DEFAULT_ANALYZER = new PatternAnalyzer(
- NON_WORD_PATTERN, true, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
+ Version.LUCENE_CURRENT, NON_WORD_PATTERN, true, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
/**
* A lower-casing word analyzer with extended English stop words
@@ -134,15 +135,18 @@
* http://thomas.loc.gov/home/all.about.inquery.html
*/
public static final PatternAnalyzer EXTENDED_ANALYZER = new PatternAnalyzer(
- NON_WORD_PATTERN, true, EXTENDED_ENGLISH_STOP_WORDS);
+ Version.LUCENE_CURRENT, NON_WORD_PATTERN, true, EXTENDED_ENGLISH_STOP_WORDS);
private final Pattern pattern;
private final boolean toLowerCase;
private final Set stopWords;
+
+ private final Version matchVersion;
/**
* Constructs a new instance with the given parameters.
*
+ * @param matchVersion If >= {@link Version#LUCENE_29}, StopFilter.enablePositionIncrement is set to true
* @param pattern
* a regular expression delimiting tokens
* @param toLowerCase
@@ -158,7 +162,7 @@
* or other stop words
* lists .
*/
- public PatternAnalyzer(Pattern pattern, boolean toLowerCase, Set stopWords) {
+ public PatternAnalyzer(Version matchVersion, Pattern pattern, boolean toLowerCase, Set stopWords) {
if (pattern == null)
throw new IllegalArgumentException("pattern must not be null");
@@ -170,6 +174,7 @@
this.pattern = pattern;
this.toLowerCase = toLowerCase;
this.stopWords = stopWords;
+ this.matchVersion = matchVersion;
}
/**
@@ -197,7 +202,7 @@
}
else {
stream = new PatternTokenizer(text, pattern, toLowerCase);
- if (stopWords != null) stream = new StopFilter(false, stream, stopWords);
+ if (stopWords != null) stream = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), stream, stopWords);
}
return stream;
Index: contrib/lucli/src/java/lucli/LuceneMethods.java
===================================================================
--- contrib/lucli/src/java/lucli/LuceneMethods.java (revision 829134)
+++ contrib/lucli/src/java/lucli/LuceneMethods.java (working copy)
@@ -195,7 +195,7 @@
for (int ii = 0; ii < arraySize; ii++) {
indexedArray[ii] = (String) indexedFields.get(ii);
}
- MultiFieldQueryParser parser = new MultiFieldQueryParser(indexedArray, analyzer);
+ MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_CURRENT, indexedArray, analyzer);
query = parser.parse(queryString);
System.out.println("Searching for: " + query.toString());
return (query);
@@ -216,7 +216,7 @@
for (int ii = 0; ii < arraySize; ii++) {
fieldsArray[ii] = (String) fields.get(ii);
}
- MultiFieldQueryParser parser = new MultiFieldQueryParser(fieldsArray, analyzer);
+ MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fieldsArray, analyzer);
query = parser.parse(queryString);
System.out.println("Searching for: " + query.toString());
}
Index: contrib/analyzers/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java
===================================================================
--- contrib/analyzers/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java (revision 829134)
+++ contrib/analyzers/smartcn/src/test/org/apache/lucene/analysis/cn/smart/TestSmartChineseAnalyzer.java (working copy)
@@ -26,16 +26,17 @@
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.util.Version;
public class TestSmartChineseAnalyzer extends BaseTokenStreamTestCase {
public void testChineseStopWordsDefault() throws Exception {
- Analyzer ca = new SmartChineseAnalyzer(); /* will load stopwords */
+ Analyzer ca = new SmartChineseAnalyzer(Version.LUCENE_CURRENT); /* will load stopwords */
String sentence = "我购买了道具和服装。";
String result[] = { "我", "购买", "了", "道具", "和", "服装" };
assertAnalyzesTo(ca, sentence, result);
// set stop-words from the outer world - must yield same behavior
- ca = new SmartChineseAnalyzer(SmartChineseAnalyzer.getDefaultStopSet());
+ ca = new SmartChineseAnalyzer(Version.LUCENE_CURRENT, SmartChineseAnalyzer.getDefaultStopSet());
assertAnalyzesTo(ca, sentence, result);
}
@@ -44,7 +45,7 @@
* This tests to ensure the SentenceTokenizer->WordTokenFilter chain works correctly.
*/
public void testChineseStopWordsDefaultTwoPhrases() throws Exception {
- Analyzer ca = new SmartChineseAnalyzer(); /* will load stopwords */
+ Analyzer ca = new SmartChineseAnalyzer(Version.LUCENE_CURRENT); /* will load stopwords */
String sentence = "我购买了道具和服装。 我购买了道具和服装。";
String result[] = { "我", "购买", "了", "道具", "和", "服装", "我", "购买", "了", "道具", "和", "服装" };
assertAnalyzesTo(ca, sentence, result);
@@ -55,7 +56,7 @@
* This tests to ensure the stopwords are working correctly.
*/
public void testChineseStopWordsDefaultTwoPhrasesIdeoSpace() throws Exception {
- Analyzer ca = new SmartChineseAnalyzer(); /* will load stopwords */
+ Analyzer ca = new SmartChineseAnalyzer(Version.LUCENE_CURRENT); /* will load stopwords */
String sentence = "我购买了道具和服装 我购买了道具和服装。";
String result[] = { "我", "购买", "了", "道具", "和", "服装", "我", "购买", "了", "道具", "和", "服装" };
assertAnalyzesTo(ca, sentence, result);
@@ -69,8 +70,8 @@
*/
public void testChineseStopWordsOff() throws Exception {
Analyzer[] analyzers = new Analyzer[] {
- new SmartChineseAnalyzer(false),/* doesn't load stopwords */
- new SmartChineseAnalyzer(null) /* sets stopwords to empty set */};
+ new SmartChineseAnalyzer(Version.LUCENE_CURRENT, false),/* doesn't load stopwords */
+ new SmartChineseAnalyzer(Version.LUCENE_CURRENT, null) /* sets stopwords to empty set */};
String sentence = "我购买了道具和服装。";
String result[] = { "我", "购买", "了", "道具", "和", "服装", "," };
for (Analyzer analyzer : analyzers) {
@@ -80,7 +81,7 @@
}
public void testChineseAnalyzer() throws Exception {
- Analyzer ca = new SmartChineseAnalyzer(true);
+ Analyzer ca = new SmartChineseAnalyzer(Version.LUCENE_CURRENT, true);
String sentence = "我购买了道具和服装。";
String[] result = { "我", "购买", "了", "道具", "和", "服装" };
assertAnalyzesTo(ca, sentence, result);
@@ -90,7 +91,7 @@
* English words are lowercased and porter-stemmed.
*/
public void testMixedLatinChinese() throws Exception {
- assertAnalyzesTo(new SmartChineseAnalyzer(true), "我购买 Tests 了道具和服装",
+ assertAnalyzesTo(new SmartChineseAnalyzer(Version.LUCENE_CURRENT, true), "我购买 Tests 了道具和服装",
new String[] { "我", "购买", "test", "了", "道具", "和", "服装"});
}
@@ -98,7 +99,7 @@
* Numerics are parsed as their own tokens
*/
public void testNumerics() throws Exception {
- assertAnalyzesTo(new SmartChineseAnalyzer(true), "我购买 Tests 了道具和服装1234",
+ assertAnalyzesTo(new SmartChineseAnalyzer(Version.LUCENE_CURRENT, true), "我购买 Tests 了道具和服装1234",
new String[] { "我", "购买", "test", "了", "道具", "和", "服装", "1234"});
}
@@ -106,7 +107,7 @@
* Full width alphas and numerics are folded to half-width
*/
public void testFullWidth() throws Exception {
- assertAnalyzesTo(new SmartChineseAnalyzer(true), "我购买 Tests 了道具和服装1234",
+ assertAnalyzesTo(new SmartChineseAnalyzer(Version.LUCENE_CURRENT, true), "我购买 Tests 了道具和服装1234",
new String[] { "我", "购买", "test", "了", "道具", "和", "服装", "1234"});
}
@@ -114,7 +115,7 @@
* Presentation form delimiters are removed
*/
public void testDelimiters() throws Exception {
- assertAnalyzesTo(new SmartChineseAnalyzer(true), "我购买︱ Tests 了道具和服装",
+ assertAnalyzesTo(new SmartChineseAnalyzer(Version.LUCENE_CURRENT, true), "我购买︱ Tests 了道具和服装",
new String[] { "我", "购买", "test", "了", "道具", "和", "服装"});
}
@@ -123,7 +124,7 @@
* (regardless of Unicode category)
*/
public void testNonChinese() throws Exception {
- assertAnalyzesTo(new SmartChineseAnalyzer(true), "我购买 روبرتTests 了道具和服装",
+ assertAnalyzesTo(new SmartChineseAnalyzer(Version.LUCENE_CURRENT, true), "我购买 روبرتTests 了道具和服装",
new String[] { "我", "购买", "ر", "و", "ب", "ر", "ت", "test", "了", "道具", "和", "服装"});
}
@@ -133,22 +134,22 @@
* Currently it is being analyzed into single characters...
*/
public void testOOV() throws Exception {
- assertAnalyzesTo(new SmartChineseAnalyzer(true), "优素福·拉扎·吉拉尼",
+ assertAnalyzesTo(new SmartChineseAnalyzer(Version.LUCENE_CURRENT, true), "优素福·拉扎·吉拉尼",
new String[] { "优", "素", "福", "拉", "扎", "吉", "拉", "尼" });
- assertAnalyzesTo(new SmartChineseAnalyzer(true), "优素福拉扎吉拉尼",
+ assertAnalyzesTo(new SmartChineseAnalyzer(Version.LUCENE_CURRENT, true), "优素福拉扎吉拉尼",
new String[] { "优", "素", "福", "拉", "扎", "吉", "拉", "尼" });
}
public void testOffsets() throws Exception {
- assertAnalyzesTo(new SmartChineseAnalyzer(true), "我购买了道具和服装",
+ assertAnalyzesTo(new SmartChineseAnalyzer(Version.LUCENE_CURRENT, true), "我购买了道具和服装",
new String[] { "我", "购买", "了", "道具", "和", "服装" },
new int[] { 0, 1, 3, 4, 6, 7 },
new int[] { 1, 3, 4, 6, 7, 9 });
}
public void testReusableTokenStream() throws Exception {
- Analyzer a = new SmartChineseAnalyzer();
+ Analyzer a = new SmartChineseAnalyzer(Version.LUCENE_CURRENT);
assertAnalyzesToReuse(a, "我购买 Tests 了道具和服装",
new String[] { "我", "购买", "test", "了", "道具", "和", "服装"},
new int[] { 0, 1, 4, 10, 11, 13, 14 },
Index: contrib/analyzers/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseAnalyzer.java
===================================================================
--- contrib/analyzers/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseAnalyzer.java (revision 829134)
+++ contrib/analyzers/smartcn/src/java/org/apache/lucene/analysis/cn/smart/SmartChineseAnalyzer.java (working copy)
@@ -32,6 +32,7 @@
import org.apache.lucene.analysis.WordlistLoader;
import org.apache.lucene.analysis.cn.smart.SentenceTokenizer;
import org.apache.lucene.analysis.cn.smart.WordTokenFilter;
+import org.apache.lucene.util.Version;
/**
*
NOTE: This class uses the same {@link Version} + * dependent settings as {@link StandardAnalyzer}.
*/ public class GermanAnalyzer extends Analyzer { @@ -74,37 +78,43 @@ */ private Set exclusionSet = new HashSet(); + private final Version matchVersion; + /** * Builds an analyzer with the default stop words: * {@link #GERMAN_STOP_WORDS}. */ - public GermanAnalyzer() { + public GermanAnalyzer(Version matchVersion) { stopSet = StopFilter.makeStopSet(GERMAN_STOP_WORDS); setOverridesTokenStreamMethod(GermanAnalyzer.class); + this.matchVersion = matchVersion; } /** * Builds an analyzer with the given stop words. */ - public GermanAnalyzer(String... stopwords) { + public GermanAnalyzer(Version matchVersion, String... stopwords) { stopSet = StopFilter.makeStopSet(stopwords); setOverridesTokenStreamMethod(GermanAnalyzer.class); + this.matchVersion = matchVersion; } /** * Builds an analyzer with the given stop words. */ - public GermanAnalyzer(Map stopwords) { + public GermanAnalyzer(Version matchVersion, Map stopwords) { stopSet = new HashSet(stopwords.keySet()); setOverridesTokenStreamMethod(GermanAnalyzer.class); + this.matchVersion = matchVersion; } /** * Builds an analyzer with the given stop words. */ - public GermanAnalyzer(File stopwords) throws IOException { + public GermanAnalyzer(Version matchVersion, File stopwords) throws IOException { stopSet = WordlistLoader.getWordSet(stopwords); setOverridesTokenStreamMethod(GermanAnalyzer.class); + this.matchVersion = matchVersion; } /** @@ -139,10 +149,11 @@ * {@link GermanStemFilter} */ public TokenStream tokenStream(String fieldName, Reader reader) { - TokenStream result = new StandardTokenizer(reader); + TokenStream result = new StandardTokenizer(matchVersion, reader); result = new StandardFilter(result); result = new LowerCaseFilter(result); - result = new StopFilter(false, result, stopSet); + result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + result, stopSet); result = new GermanStemFilter(result, exclusionSet); return result; } @@ -171,10 +182,11 @@ SavedStreams streams = (SavedStreams) getPreviousTokenStream(); if (streams == null) { streams = new SavedStreams(); - streams.source = new StandardTokenizer(reader); + streams.source = new StandardTokenizer(matchVersion, reader); streams.result = new StandardFilter(streams.source); streams.result = new LowerCaseFilter(streams.result); - streams.result = new StopFilter(false, streams.result, stopSet); + streams.result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + streams.result, stopSet); streams.result = new GermanStemFilter(streams.result, exclusionSet); setPreviousTokenStream(streams); } else { Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java (revision 829134) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java (working copy) @@ -22,6 +22,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.standard.StandardTokenizer; +import org.apache.lucene.util.Version; import java.io.IOException; import java.io.Reader; @@ -36,6 +37,9 @@ * that will not be indexed at all). * A default set of stopwords is used unless an alternative list is specified. * + * + *NOTE: This class uses the same {@link Version} + * dependent settings as {@link StandardAnalyzer}.
*/ public final class GreekAnalyzer extends Analyzer { @@ -59,27 +63,33 @@ */ private Set stopSet = new HashSet(); - public GreekAnalyzer() { - this(GREEK_STOP_WORDS); + private final Version matchVersion; + + public GreekAnalyzer(Version matchVersion) { + super(); + stopSet = StopFilter.makeStopSet(GREEK_STOP_WORDS); + this.matchVersion = matchVersion; } - + /** * Builds an analyzer with the given stop words. * @param stopwords Array of stopwords to use. */ - public GreekAnalyzer(String... stopwords) + public GreekAnalyzer(Version matchVersion, String... stopwords) { - super(); - stopSet = StopFilter.makeStopSet(stopwords); + super(); + stopSet = StopFilter.makeStopSet(stopwords); + this.matchVersion = matchVersion; } - + /** * Builds an analyzer with the given stop words. */ - public GreekAnalyzer(Map stopwords) + public GreekAnalyzer(Version matchVersion, Map stopwords) { - super(); - stopSet = new HashSet(stopwords.keySet()); + super(); + stopSet = new HashSet(stopwords.keySet()); + this.matchVersion = matchVersion; } /** @@ -90,9 +100,10 @@ */ public TokenStream tokenStream(String fieldName, Reader reader) { - TokenStream result = new StandardTokenizer(reader); + TokenStream result = new StandardTokenizer(matchVersion, reader); result = new GreekLowerCaseFilter(result); - result = new StopFilter(false, result, stopSet); + result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + result, stopSet); return result; } @@ -113,9 +124,10 @@ SavedStreams streams = (SavedStreams) getPreviousTokenStream(); if (streams == null) { streams = new SavedStreams(); - streams.source = new StandardTokenizer(reader); + streams.source = new StandardTokenizer(matchVersion, reader); streams.result = new GreekLowerCaseFilter(streams.source); - streams.result = new StopFilter(false, streams.result, stopSet); + streams.result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + streams.result, stopSet); setPreviousTokenStream(streams); } else { streams.source.reset(reader); Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java (revision 829134) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/ar/ArabicAnalyzer.java (working copy) @@ -33,6 +33,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.WordlistLoader; +import org.apache.lucene.util.Version; /** * {@link Analyzer} for Arabic. @@ -109,32 +110,38 @@ } } + private final Version matchVersion; + /** * Builds an analyzer with the default stop words: {@link #DEFAULT_STOPWORD_FILE}. */ - public ArabicAnalyzer() { + public ArabicAnalyzer(Version matchVersion) { + this.matchVersion = matchVersion; stoptable = DefaultSetHolder.DEFAULT_STOP_SET; } /** * Builds an analyzer with the given stop words. */ - public ArabicAnalyzer( String... stopwords ) { + public ArabicAnalyzer( Version matchVersion, String... stopwords ) { stoptable = StopFilter.makeStopSet( stopwords ); + this.matchVersion = matchVersion; } /** * Builds an analyzer with the given stop words. */ - public ArabicAnalyzer( Hashtable,?> stopwords ) { - stoptable = new HashSet( stopwords.keySet() ); + public ArabicAnalyzer( Version matchVersion, Hashtable,?> stopwords ) { + stoptable = new HashSet(stopwords.keySet()); + this.matchVersion = matchVersion; } /** * Builds an analyzer with the given stop words. Lines can be commented out using {@link #STOPWORDS_COMMENT} */ - public ArabicAnalyzer( File stopwords ) throws IOException { + public ArabicAnalyzer( Version matchVersion, File stopwords ) throws IOException { stoptable = WordlistLoader.getWordSet( stopwords, STOPWORDS_COMMENT); + this.matchVersion = matchVersion; } @@ -149,7 +156,8 @@ TokenStream result = new ArabicLetterTokenizer( reader ); result = new LowerCaseFilter(result); // the order here is important: the stopword list is not normalized! - result = new StopFilter(false, result, stoptable ); + result = new StopFilter( StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + result, stoptable ); result = new ArabicNormalizationFilter( result ); result = new ArabicStemFilter( result ); @@ -177,7 +185,8 @@ streams.source = new ArabicLetterTokenizer(reader); streams.result = new LowerCaseFilter(streams.source); // the order here is important: the stopword list is not normalized! - streams.result = new StopFilter(false, streams.result, stoptable); + streams.result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + streams.result, stoptable); streams.result = new ArabicNormalizationFilter(streams.result); streams.result = new ArabicStemFilter(streams.result); setPreviousTokenStream(streams); Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java (revision 829134) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java (working copy) @@ -33,6 +33,7 @@ import org.apache.lucene.analysis.WordlistLoader; import org.apache.lucene.analysis.standard.StandardFilter; import org.apache.lucene.analysis.standard.StandardTokenizer; +import org.apache.lucene.util.Version; /** * {@link Analyzer} for Brazilian Portuguese language. @@ -41,6 +42,9 @@ * will not be indexed at all) and an external list of exclusions (words that will * not be stemmed, but indexed). * + * + *NOTE: This class uses the same {@link Version} + * dependent settings as {@link StandardAnalyzer}.
*/ public final class BrazilianAnalyzer extends Analyzer { @@ -78,33 +82,38 @@ * Contains words that should be indexed but not stemmed. */ private Set excltable = Collections.emptySet(); + private final Version matchVersion; /** * Builds an analyzer with the default stop words ({@link #BRAZILIAN_STOP_WORDS}). */ - public BrazilianAnalyzer() { - stoptable = StopFilter.makeStopSet( BRAZILIAN_STOP_WORDS ); + public BrazilianAnalyzer(Version matchVersion) { + stoptable = StopFilter.makeStopSet( BRAZILIAN_STOP_WORDS ); + this.matchVersion = matchVersion; } /** * Builds an analyzer with the given stop words. */ - public BrazilianAnalyzer( String... stopwords ) { - stoptable = StopFilter.makeStopSet( stopwords ); + public BrazilianAnalyzer( Version matchVersion, String... stopwords ) { + stoptable = StopFilter.makeStopSet( stopwords ); + this.matchVersion = matchVersion; } /** * Builds an analyzer with the given stop words. */ - public BrazilianAnalyzer( Map stopwords ) { - stoptable = new HashSet(stopwords.keySet()); + public BrazilianAnalyzer( Version matchVersion, Map stopwords ) { + stoptable = new HashSet(stopwords.keySet()); + this.matchVersion = matchVersion; } /** * Builds an analyzer with the given stop words. */ - public BrazilianAnalyzer( File stopwords ) throws IOException { - stoptable = WordlistLoader.getWordSet( stopwords ); + public BrazilianAnalyzer( Version matchVersion, File stopwords ) throws IOException { + stoptable = WordlistLoader.getWordSet( stopwords ); + this.matchVersion = matchVersion; } /** @@ -137,10 +146,11 @@ * {@link BrazilianStemFilter}. */ public final TokenStream tokenStream(String fieldName, Reader reader) { - TokenStream result = new StandardTokenizer( reader ); + TokenStream result = new StandardTokenizer( matchVersion, reader ); result = new LowerCaseFilter( result ); result = new StandardFilter( result ); - result = new StopFilter( false, result, stoptable ); + result = new StopFilter( StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + result, stoptable ); result = new BrazilianStemFilter( result, excltable ); return result; } @@ -163,10 +173,11 @@ SavedStreams streams = (SavedStreams) getPreviousTokenStream(); if (streams == null) { streams = new SavedStreams(); - streams.source = new StandardTokenizer(reader); + streams.source = new StandardTokenizer(matchVersion, reader); streams.result = new LowerCaseFilter(streams.source); streams.result = new StandardFilter(streams.result); - streams.result = new StopFilter(false, streams.result, stoptable); + streams.result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + streams.result, stoptable); streams.result = new BrazilianStemFilter(streams.result, excltable); setPreviousTokenStream(streams); } else { Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java (revision 829134) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzer.java (working copy) @@ -23,6 +23,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.util.StringHelper; +import org.apache.lucene.util.Version; import java.io.IOException; import java.io.Reader; @@ -48,15 +49,17 @@ //The default maximum percentage (40%) of index documents which //can contain a term, after which the term is considered to be a stop word. public static final float defaultMaxDocFreqPercent = 0.4f; + private final Version matchVersion; /** * Initializes this analyzer with the Analyzer object that actually produces the tokens * * @param delegate The choice of {@link Analyzer} that is used to produce the token stream which needs filtering */ - public QueryAutoStopWordAnalyzer(Analyzer delegate) { + public QueryAutoStopWordAnalyzer(Version matchVersion, Analyzer delegate) { this.delegate = delegate; setOverridesTokenStreamMethod(QueryAutoStopWordAnalyzer.class); + this.matchVersion = matchVersion; } /** @@ -175,7 +178,8 @@ } HashSet stopWords = (HashSet) stopWordsPerField.get(fieldName); if (stopWords != null) { - result = new StopFilter(false, result, stopWords); + result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + result, stopWords); } return result; } @@ -217,7 +221,8 @@ /* if there are any stopwords for the field, save the stopfilter */ HashSet stopWords = (HashSet) stopWordsPerField.get(fieldName); if (stopWords != null) - streams.withStopFilter = new StopFilter(false, streams.wrapped, stopWords); + streams.withStopFilter = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + streams.wrapped, stopWords); else streams.withStopFilter = streams.wrapped; @@ -238,7 +243,8 @@ streams.wrapped = result; HashSet stopWords = (HashSet) stopWordsPerField.get(fieldName); if (stopWords != null) - streams.withStopFilter = new StopFilter(false, streams.wrapped, stopWords); + streams.withStopFilter = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + streams.wrapped, stopWords); else streams.withStopFilter = streams.wrapped; } Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java (revision 829134) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java (working copy) @@ -25,6 +25,7 @@ import org.apache.lucene.analysis.WordlistLoader; import org.apache.lucene.analysis.standard.StandardFilter; import org.apache.lucene.analysis.standard.StandardTokenizer; +import org.apache.lucene.util.Version; import java.io.File; import java.io.IOException; @@ -42,6 +43,17 @@ * A default set of stopwords is used unless an alternative list is specified, but the * exclusion list is empty by default. * + * + * + *You must specify the required {@link Version} + * compatibility when creating FrenchAnalyzer: + *
NOTE: This class uses the same {@link Version} + * dependent settings as {@link StandardAnalyzer}.
*/ public final class FrenchAnalyzer extends Analyzer { @@ -82,26 +94,31 @@ */ private Set excltable = new HashSet(); + private final Version matchVersion; + /** * Builds an analyzer with the default stop words ({@link #FRENCH_STOP_WORDS}). */ - public FrenchAnalyzer() { + public FrenchAnalyzer(Version matchVersion) { stoptable = StopFilter.makeStopSet(FRENCH_STOP_WORDS); + this.matchVersion = matchVersion; } /** * Builds an analyzer with the given stop words. */ - public FrenchAnalyzer(String... stopwords) { + public FrenchAnalyzer(Version matchVersion, String... stopwords) { stoptable = StopFilter.makeStopSet(stopwords); + this.matchVersion = matchVersion; } /** * Builds an analyzer with the given stop words. * @throws IOException */ - public FrenchAnalyzer(File stopwords) throws IOException { + public FrenchAnalyzer(Version matchVersion, File stopwords) throws IOException { stoptable = new HashSet(WordlistLoader.getWordSet(stopwords)); + this.matchVersion = matchVersion; } /** @@ -138,9 +155,10 @@ * {@link FrenchStemFilter} and {@link LowerCaseFilter} */ public final TokenStream tokenStream(String fieldName, Reader reader) { - TokenStream result = new StandardTokenizer(reader); + TokenStream result = new StandardTokenizer(matchVersion, reader); result = new StandardFilter(result); - result = new StopFilter(false, result, stoptable); + result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + result, stoptable); result = new FrenchStemFilter(result, excltable); // Convert to lowercase after stemming! result = new LowerCaseFilter(result); @@ -165,9 +183,10 @@ SavedStreams streams = (SavedStreams) getPreviousTokenStream(); if (streams == null) { streams = new SavedStreams(); - streams.source = new StandardTokenizer(reader); + streams.source = new StandardTokenizer(matchVersion, reader); streams.result = new StandardFilter(streams.source); - streams.result = new StopFilter(false, streams.result, stoptable); + streams.result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + streams.result, stoptable); streams.result = new FrenchStemFilter(streams.result, excltable); // Convert to lowercase after stemming! streams.result = new LowerCaseFilter(streams.result); Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java (revision 829134) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/cjk/CJKAnalyzer.java (working copy) @@ -21,6 +21,7 @@ import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.util.Version; import java.io.IOException; import java.io.Reader; @@ -56,14 +57,16 @@ * stop word list */ private final Set stopTable; + private final Version matchVersion; //~ Constructors ----------------------------------------------------------- /** * Builds an analyzer which removes words in {@link #STOP_WORDS}. */ - public CJKAnalyzer() { + public CJKAnalyzer(Version matchVersion) { stopTable = StopFilter.makeStopSet(STOP_WORDS); + this.matchVersion = matchVersion; } /** @@ -71,8 +74,9 @@ * * @param stopWords stop word array */ - public CJKAnalyzer(String... stopWords) { + public CJKAnalyzer(Version matchVersion, String... stopWords) { stopTable = StopFilter.makeStopSet(stopWords); + this.matchVersion = matchVersion; } //~ Methods ---------------------------------------------------------------- @@ -86,7 +90,8 @@ * {@link StopFilter} */ public final TokenStream tokenStream(String fieldName, Reader reader) { - return new StopFilter(false, new CJKTokenizer(reader), stopTable); + return new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + new CJKTokenizer(reader), stopTable); } private class SavedStreams { @@ -109,7 +114,8 @@ if (streams == null) { streams = new SavedStreams(); streams.source = new CJKTokenizer(reader); - streams.result = new StopFilter(false, streams.source, stopTable); + streams.result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + streams.source, stopTable); setPreviousTokenStream(streams); } else { streams.source.reset(reader); Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java (revision 829134) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/nl/DutchAnalyzer.java (working copy) @@ -23,6 +23,7 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.standard.StandardFilter; import org.apache.lucene.analysis.standard.StandardTokenizer; +import org.apache.lucene.util.Version; import java.io.File; import java.io.IOException; @@ -42,6 +43,9 @@ * A default set of stopwords is used unless an alternative list is specified, but the * exclusion list is empty by default. * + * + *NOTE: This class uses the same {@link Version} + * dependent settings as {@link StandardAnalyzer}.
*/ public class DutchAnalyzer extends Analyzer { /** @@ -73,30 +77,33 @@ private Set excltable = new HashSet(); private Map stemdict = new HashMap(); + private final Version matchVersion; - /** * Builds an analyzer with the default stop words ({@link #DUTCH_STOP_WORDS}) * and a few default entries for the stem exclusion table. * */ - public DutchAnalyzer() { + public DutchAnalyzer(Version matchVersion) { setOverridesTokenStreamMethod(DutchAnalyzer.class); stoptable = StopFilter.makeStopSet(DUTCH_STOP_WORDS); stemdict.put("fiets", "fiets"); //otherwise fiet stemdict.put("bromfiets", "bromfiets"); //otherwise bromfiet stemdict.put("ei", "eier"); stemdict.put("kind", "kinder"); + this.matchVersion = matchVersion; } /** * Builds an analyzer with the given stop words. * + * @param matchVersion * @param stopwords */ - public DutchAnalyzer(String... stopwords) { + public DutchAnalyzer(Version matchVersion, String... stopwords) { setOverridesTokenStreamMethod(DutchAnalyzer.class); stoptable = StopFilter.makeStopSet(stopwords); + this.matchVersion = matchVersion; } /** @@ -104,9 +111,10 @@ * * @param stopwords */ - public DutchAnalyzer(HashSet stopwords) { + public DutchAnalyzer(Version matchVersion, HashSet stopwords) { setOverridesTokenStreamMethod(DutchAnalyzer.class); stoptable = stopwords; + this.matchVersion = matchVersion; } /** @@ -114,7 +122,7 @@ * * @param stopwords */ - public DutchAnalyzer(File stopwords) { + public DutchAnalyzer(Version matchVersion, File stopwords) { setOverridesTokenStreamMethod(DutchAnalyzer.class); try { stoptable = org.apache.lucene.analysis.WordlistLoader.getWordSet(stopwords); @@ -122,6 +130,7 @@ // TODO: throw IOException throw new RuntimeException(e); } + this.matchVersion = matchVersion; } /** @@ -179,9 +188,10 @@ * and {@link DutchStemFilter} */ public TokenStream tokenStream(String fieldName, Reader reader) { - TokenStream result = new StandardTokenizer(reader); + TokenStream result = new StandardTokenizer(matchVersion, reader); result = new StandardFilter(result); - result = new StopFilter(false, result, stoptable); + result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + result, stoptable); result = new DutchStemFilter(result, excltable, stemdict); return result; } @@ -211,9 +221,10 @@ SavedStreams streams = (SavedStreams) getPreviousTokenStream(); if (streams == null) { streams = new SavedStreams(); - streams.source = new StandardTokenizer(reader); + streams.source = new StandardTokenizer(matchVersion, reader); streams.result = new StandardFilter(streams.source); - streams.result = new StopFilter(false, streams.result, stoptable); + streams.result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + streams.result, stoptable); streams.result = new DutchStemFilter(streams.result, excltable, stemdict); setPreviousTokenStream(streams); } else { Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java (revision 829134) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/th/ThaiAnalyzer.java (working copy) @@ -25,22 +25,29 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.standard.StandardFilter; import org.apache.lucene.analysis.standard.StandardTokenizer; +import org.apache.lucene.util.Version; /** * {@link Analyzer} for Thai language. It uses {@link java.text.BreakIterator} to break words. * @version 0.2 + * + *NOTE: This class uses the same {@link Version} + * dependent settings as {@link StandardAnalyzer}.
*/ public class ThaiAnalyzer extends Analyzer { - - public ThaiAnalyzer() { + private final Version matchVersion; + + public ThaiAnalyzer(Version matchVersion) { setOverridesTokenStreamMethod(ThaiAnalyzer.class); + this.matchVersion = matchVersion; } public TokenStream tokenStream(String fieldName, Reader reader) { - TokenStream ts = new StandardTokenizer(reader); + TokenStream ts = new StandardTokenizer(matchVersion, reader); ts = new StandardFilter(ts); ts = new ThaiWordFilter(ts); - ts = new StopFilter(false, ts, StopAnalyzer.ENGLISH_STOP_WORDS_SET); + ts = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + ts, StopAnalyzer.ENGLISH_STOP_WORDS_SET); return ts; } @@ -60,10 +67,11 @@ SavedStreams streams = (SavedStreams) getPreviousTokenStream(); if (streams == null) { streams = new SavedStreams(); - streams.source = new StandardTokenizer(reader); + streams.source = new StandardTokenizer(matchVersion, reader); streams.result = new StandardFilter(streams.source); streams.result = new ThaiWordFilter(streams.result); - streams.result = new StopFilter(false, streams.result, StopAnalyzer.ENGLISH_STOP_WORDS_SET); + streams.result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + streams.result, StopAnalyzer.ENGLISH_STOP_WORDS_SET); setPreviousTokenStream(streams); } else { streams.source.reset(reader); Index: contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java =================================================================== --- contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java (revision 829134) +++ contrib/analyzers/common/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java (working copy) @@ -25,6 +25,7 @@ import org.apache.lucene.analysis.WordlistLoader; import org.apache.lucene.analysis.standard.StandardFilter; import org.apache.lucene.analysis.standard.StandardTokenizer; +import org.apache.lucene.util.Version; import java.io.*; import java.util.HashSet; @@ -38,6 +39,9 @@ * will not be indexed at all). * A default set of stopwords is used unless an alternative list is specified. * + * + *NOTE: This class uses the same {@link Version} + * dependent settings as {@link StandardAnalyzer}.
*/ public final class CzechAnalyzer extends Analyzer { @@ -69,30 +73,35 @@ * Contains the stopwords used with the {@link StopFilter}. */ private Set stoptable; + private final Version matchVersion; /** * Builds an analyzer with the default stop words ({@link #CZECH_STOP_WORDS}). */ - public CzechAnalyzer() { - stoptable = StopFilter.makeStopSet( CZECH_STOP_WORDS ); + public CzechAnalyzer(Version matchVersion) { + stoptable = StopFilter.makeStopSet( CZECH_STOP_WORDS ); + this.matchVersion = matchVersion; } /** * Builds an analyzer with the given stop words. */ - public CzechAnalyzer( String... stopwords ) { - stoptable = StopFilter.makeStopSet( stopwords ); + public CzechAnalyzer(Version matchVersion, String... stopwords) { + stoptable = StopFilter.makeStopSet( stopwords ); + this.matchVersion = matchVersion; } - public CzechAnalyzer( HashSet stopwords ) { - stoptable = stopwords; + public CzechAnalyzer(Version matchVersion, HashSet stopwords) { + stoptable = stopwords; + this.matchVersion = matchVersion; } /** * Builds an analyzer with the given stop words. */ - public CzechAnalyzer( File stopwords ) throws IOException { - stoptable = WordlistLoader.getWordSet( stopwords ); + public CzechAnalyzer(Version matchVersion, File stopwords ) throws IOException { + stoptable = WordlistLoader.getWordSet( stopwords ); + this.matchVersion = matchVersion; } /** @@ -131,10 +140,11 @@ * {@link StandardFilter}, {@link LowerCaseFilter}, and {@link StopFilter} */ public final TokenStream tokenStream( String fieldName, Reader reader ) { - TokenStream result = new StandardTokenizer( reader ); + TokenStream result = new StandardTokenizer( matchVersion, reader ); result = new StandardFilter( result ); result = new LowerCaseFilter( result ); - result = new StopFilter(false, result, stoptable ); + result = new StopFilter( StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + result, stoptable ); return result; } @@ -155,10 +165,11 @@ SavedStreams streams = (SavedStreams) getPreviousTokenStream(); if (streams == null) { streams = new SavedStreams(); - streams.source = new StandardTokenizer(reader); + streams.source = new StandardTokenizer(matchVersion, reader); streams.result = new StandardFilter(streams.source); streams.result = new LowerCaseFilter(streams.result); - streams.result = new StopFilter(false, streams.result, stoptable); + streams.result = new StopFilter(StopFilter.getEnablePositionIncrementsVersionDefault(matchVersion), + streams.result, stoptable); setPreviousTokenStream(streams); } else { streams.source.reset(reader); Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/queryParser/TestMultiAnalyzer.java (working copy) @@ -31,6 +31,7 @@ import org.apache.lucene.analysis.tokenattributes.TypeAttribute; import org.apache.lucene.search.Query; import org.apache.lucene.analysis.BaseTokenStreamTestCase; +import org.apache.lucene.util.Version; /** * Test QueryParser's ability to deal with Analyzers that return more @@ -44,7 +45,7 @@ public void testMultiAnalyzer() throws ParseException { - QueryParser qp = new QueryParser("", new MultiAnalyzer()); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "", new MultiAnalyzer()); // trivial, no multiple tokens: assertEquals("foo", qp.parse("foo").toString()); @@ -117,7 +118,7 @@ } public void testPosIncrementAnalyzer() throws ParseException { - QueryParser qp = new QueryParser("", new PosIncrementAnalyzer()); + QueryParser qp = new QueryParser(Version.LUCENE_24, "", new PosIncrementAnalyzer()); assertEquals("quick brown", qp.parse("the quick brown").toString()); assertEquals("\"quick brown\"", qp.parse("\"the quick brown\"").toString()); assertEquals("quick brown fox", qp.parse("the quick brown fox").toString()); @@ -134,7 +135,7 @@ } public TokenStream tokenStream(String fieldName, Reader reader) { - TokenStream result = new StandardTokenizer(reader); + TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader); result = new TestFilter(result); result = new LowerCaseFilter(result); return result; @@ -200,7 +201,7 @@ } public TokenStream tokenStream(String fieldName, Reader reader) { - TokenStream result = new StandardTokenizer(reader); + TokenStream result = new StandardTokenizer(Version.LUCENE_CURRENT, reader); result = new TestPosIncrementFilter(result); result = new LowerCaseFilter(result); return result; @@ -238,7 +239,7 @@ private final static class DumbQueryParser extends QueryParser { public DumbQueryParser(String f, Analyzer a) { - super(f, a); + super(Version.LUCENE_CURRENT, f, a); } /** expose super's version */ Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java (working copy) @@ -36,6 +36,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.analysis.BaseTokenStreamTestCase; +import org.apache.lucene.util.Version; /** * Tests QueryParser. @@ -59,18 +60,18 @@ String[] fields = {"b", "t"}; Occur occur[] = {Occur.SHOULD, Occur.SHOULD}; TestQueryParser.QPTestAnalyzer a = new TestQueryParser.QPTestAnalyzer(); - MultiFieldQueryParser mfqp = new MultiFieldQueryParser(fields, a); + MultiFieldQueryParser mfqp = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, a); Query q = mfqp.parse(qtxt); assertEquals(expectedRes, q.toString()); - q = MultiFieldQueryParser.parse(qtxt, fields, occur, a); + q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, qtxt, fields, occur, a); assertEquals(expectedRes, q.toString()); } public void testSimple() throws Exception { String[] fields = {"b", "t"}; - MultiFieldQueryParser mfqp = new MultiFieldQueryParser(fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + MultiFieldQueryParser mfqp = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); Query q = mfqp.parse("one"); assertEquals("b:one t:one", q.toString()); @@ -133,7 +134,7 @@ boosts.put("b", new Float(5)); boosts.put("t", new Float(10)); String[] fields = {"b", "t"}; - MultiFieldQueryParser mfqp = new MultiFieldQueryParser(fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), boosts); + MultiFieldQueryParser mfqp = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT), boosts); //Check for simple @@ -159,24 +160,24 @@ public void testStaticMethod1() throws ParseException { String[] fields = {"b", "t"}; String[] queries = {"one", "two"}; - Query q = MultiFieldQueryParser.parse(queries, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("b:one t:two", q.toString()); String[] queries2 = {"+one", "+two"}; - q = MultiFieldQueryParser.parse(queries2, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries2, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("(+b:one) (+t:two)", q.toString()); String[] queries3 = {"one", "+two"}; - q = MultiFieldQueryParser.parse(queries3, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries3, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("b:one (+t:two)", q.toString()); String[] queries4 = {"one +more", "+two"}; - q = MultiFieldQueryParser.parse(queries4, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries4, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("(b:one +b:more) (+t:two)", q.toString()); String[] queries5 = {"blah"}; try { - q = MultiFieldQueryParser.parse(queries5, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries5, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -186,11 +187,11 @@ TestQueryParser.QPTestAnalyzer stopA = new TestQueryParser.QPTestAnalyzer(); String[] queries6 = {"((+stop))", "+((stop))"}; - q = MultiFieldQueryParser.parse(queries6, fields, stopA); + q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries6, fields, stopA); assertEquals("", q.toString()); String[] queries7 = {"one ((+stop)) +more", "+((stop)) +two"}; - q = MultiFieldQueryParser.parse(queries7, fields, stopA); + q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries7, fields, stopA); assertEquals("(b:one +b:more) (+t:two)", q.toString()); } @@ -198,15 +199,15 @@ public void testStaticMethod2() throws ParseException { String[] fields = {"b", "t"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; - Query q = MultiFieldQueryParser.parse("one", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+b:one -t:one", q.toString()); - q = MultiFieldQueryParser.parse("one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse("blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -217,17 +218,17 @@ String[] fields = {"b", "t"}; //int[] flags = {MultiFieldQueryParser.REQUIRED_FIELD, MultiFieldQueryParser.PROHIBITED_FIELD}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; - MultiFieldQueryParser parser = new MultiFieldQueryParser(fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); - Query q = MultiFieldQueryParser.parse("one", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));//, fields, flags, new StandardAnalyzer()); + Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT));//, fields, flags, new StandardAnalyzer()); assertEquals("+b:one -t:one", q.toString()); - q = MultiFieldQueryParser.parse("one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "one two", fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+(b:one b:two) -(t:one t:two)", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse("blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, "blah", fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -239,12 +240,12 @@ String[] fields = {"f1", "f2", "f3"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD}; - Query q = MultiFieldQueryParser.parse(queries, fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+f1:one -f2:two f3:three", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse(queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -255,12 +256,12 @@ String[] queries = {"one", "two"}; String[] fields = {"b", "t"}; BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT}; - Query q = MultiFieldQueryParser.parse(queries, fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + Query q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); assertEquals("+b:one -t:two", q.toString()); try { BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST}; - q = MultiFieldQueryParser.parse(queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + q = MultiFieldQueryParser.parse(Version.LUCENE_CURRENT, queries, fields, flags2, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); fail(); } catch(IllegalArgumentException e) { // expected exception, array length differs @@ -269,7 +270,7 @@ public void testAnalyzerReturningNull() throws ParseException { String[] fields = new String[] { "f1", "f2", "f3" }; - MultiFieldQueryParser parser = new MultiFieldQueryParser(fields, new AnalyzerReturningNull()); + MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_CURRENT, fields, new AnalyzerReturningNull()); Query q = parser.parse("bla AND blo"); assertEquals("+(f2:bla f3:bla) +(f2:blo f3:blo)", q.toString()); // the following queries are not affected as their terms are not analyzed anyway: @@ -291,7 +292,7 @@ iw.close(); MultiFieldQueryParser mfqp = - new MultiFieldQueryParser(new String[] {"body"}, analyzer); + new MultiFieldQueryParser(Version.LUCENE_CURRENT, new String[] {"body"}, analyzer); mfqp.setDefaultOperator(QueryParser.Operator.AND); Query q = mfqp.parse("the footest"); IndexSearcher is = new IndexSearcher(ramDir, true); Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/queryParser/TestQueryParser.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/queryParser/TestQueryParser.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/queryParser/TestQueryParser.java (working copy) @@ -47,6 +47,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.FuzzyQuery; @@ -60,7 +61,10 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.MockRAMDirectory; import org.apache.lucene.util.LocalizedTestCase; +import org.apache.lucene.util.Version; /** * Tests QueryParser. @@ -127,7 +131,7 @@ public static class QPTestParser extends QueryParser { public QPTestParser(String f, Analyzer a) { - super(f, a); + super(Version.LUCENE_CURRENT, f, a); } protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException { @@ -149,7 +153,7 @@ public QueryParser getParser(Analyzer a) throws Exception { if (a == null) a = new SimpleAnalyzer(); - QueryParser qp = new QueryParser("field", a); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", a); qp.setDefaultOperator(QueryParser.OR_OPERATOR); return qp; } @@ -219,7 +223,7 @@ throws Exception { if (a == null) a = new SimpleAnalyzer(); - QueryParser qp = new QueryParser("field", a); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", a); qp.setDefaultOperator(QueryParser.AND_OPERATOR); return qp.parse(query); } @@ -291,7 +295,7 @@ assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null, "+(title:dog title:cat) -author:\"bob dole\""); - QueryParser qp = new QueryParser("field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); // make sure OR is the default: assertEquals(QueryParser.OR_OPERATOR, qp.getDefaultOperator()); qp.setDefaultOperator(QueryParser.AND_OPERATOR); @@ -446,7 +450,7 @@ assertQueryEquals("[ a TO z]", null, "[a TO z]"); assertEquals(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT, ((TermRangeQuery)getQuery("[ a TO z]", null)).getRewriteMethod()); - QueryParser qp = new QueryParser("field", new SimpleAnalyzer()); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new SimpleAnalyzer()); qp.setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE); assertEquals(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE,((TermRangeQuery)qp.parse("[ a TO z]")).getRewriteMethod()); @@ -472,7 +476,7 @@ iw.close(); IndexSearcher is = new IndexSearcher(ramDir, true); - QueryParser qp = new QueryParser("content", new WhitespaceAnalyzer()); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "content", new WhitespaceAnalyzer()); // Neither Java 1.4.2 nor 1.5.0 has Farsi Locale collation available in // RuleBasedCollator. However, the Arabic Locale seems to order the Farsi @@ -569,7 +573,7 @@ final String defaultField = "default"; final String monthField = "month"; final String hourField = "hour"; - QueryParser qp = new QueryParser("field", new SimpleAnalyzer()); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new SimpleAnalyzer()); // Don't set any date resolution and verify if DateField is used assertDateRangeQueryEquals(qp, defaultField, startDate, endDate, @@ -792,7 +796,7 @@ Set stopWords = new HashSet(1); stopWords.add("on"); StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT, stopWords); - QueryParser qp = new QueryParser("field", oneStopAnalyzer); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", oneStopAnalyzer); Query q = qp.parse("on^1.0"); assertNotNull(q); q = qp.parse("\"hello\"^2.0"); @@ -804,7 +808,7 @@ q = qp.parse("\"on\"^1.0"); assertNotNull(q); - QueryParser qp2 = new QueryParser("field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + QueryParser qp2 = new QueryParser(Version.LUCENE_CURRENT, "field", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); q = qp2.parse("the^3"); // "the" is a stop word so the result is an empty query: assertNotNull(q); @@ -852,7 +856,7 @@ public void testBooleanQuery() throws Exception { BooleanQuery.setMaxClauseCount(2); try { - QueryParser qp = new QueryParser("field", new WhitespaceAnalyzer()); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer()); qp.parse("one two three"); fail("ParseException expected due to too many boolean clauses"); } catch (ParseException expected) { @@ -864,7 +868,7 @@ * This test differs from TestPrecedenceQueryParser */ public void testPrecedence() throws Exception { - QueryParser qp = new QueryParser("field", new WhitespaceAnalyzer()); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer()); Query query1 = qp.parse("A AND B OR C AND D"); Query query2 = qp.parse("+A +B +C +D"); assertEquals(query1, query2); @@ -888,7 +892,7 @@ public void testStarParsing() throws Exception { final int[] type = new int[1]; - QueryParser qp = new QueryParser("field", new WhitespaceAnalyzer()) { + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer()) { protected Query getWildcardQuery(String field, String termStr) throws ParseException { // override error checking of superclass type[0]=1; @@ -944,7 +948,7 @@ } public void testStopwords() throws Exception { - QueryParser qp = new QueryParser("a", new StopAnalyzer(StopFilter.makeStopSet(new String[]{"the", "foo"}), true)); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "a", new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet(new String[]{"the", "foo"}))); Query result = qp.parse("a:the OR a:foo"); assertNotNull("result is null and it shouldn't be", result); assertTrue("result is not a BooleanQuery", result instanceof BooleanQuery); @@ -960,7 +964,7 @@ } public void testPositionIncrement() throws Exception { - QueryParser qp = new QueryParser("a", new StopAnalyzer(StopFilter.makeStopSet(new String[]{"the", "in", "are", "this"}), true)); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "a", new StopAnalyzer(Version.LUCENE_CURRENT, StopFilter.makeStopSet(new String[]{"the", "in", "are", "this"}))); qp.setEnablePositionIncrements(true); String qtxt = "\"the words in poisitions pos02578 are stopped in this phrasequery\""; // 0 2 5 7 8 @@ -977,7 +981,7 @@ } public void testMatchAllDocs() throws Exception { - QueryParser qp = new QueryParser("field", new WhitespaceAnalyzer()); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new WhitespaceAnalyzer()); assertEquals(new MatchAllDocsQuery(), qp.parse("*:*")); assertEquals(new MatchAllDocsQuery(), qp.parse("(*:*)")); BooleanQuery bq = (BooleanQuery)qp.parse("+*:* -*:*"); @@ -986,7 +990,7 @@ } private void assertHits(int expected, String query, IndexSearcher is) throws ParseException, IOException { - QueryParser qp = new QueryParser("date", new WhitespaceAnalyzer()); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "date", new WhitespaceAnalyzer()); qp.setLocale(Locale.ENGLISH); Query q = qp.parse(query); ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs; @@ -1008,4 +1012,49 @@ BooleanQuery.setMaxClauseCount(originalMaxClauses); } + // LUCENE-2002: make sure defaults for StandardAnalyzer's + // enableStopPositionIncr & QueryParser's enablePosIncr + // "match" + public void testPositionIncrements() throws Exception { + Directory dir = new MockRAMDirectory(); + Analyzer a = new StandardAnalyzer(Version.LUCENE_CURRENT); + IndexWriter w = new IndexWriter(dir, a, IndexWriter.MaxFieldLength.UNLIMITED); + Document doc = new Document(); + doc.add(new Field("f", "the wizard of ozzy", Field.Store.NO, Field.Index.ANALYZED)); + w.addDocument(doc); + IndexReader r = w.getReader(); + w.close(); + IndexSearcher s = new IndexSearcher(r); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "f", a); + Query q = qp.parse("\"wizard of ozzy\""); + assertEquals(1, s.search(q, 1).totalHits); + r.close(); + dir.close(); + } + + // LUCENE-2002: when we run javacc to regen QueryParser, + // we also run a replaceregexp step to fix 2 of the public + // ctors (change them to protected): + // + // protected QueryParser(CharStream stream) + // + // protected QueryParser(QueryParserTokenManager tm) + // + // This test is here as a safety, in case that ant step + // doesn't work for some reason. + public void testProtectedCtors() throws Exception { + try { + QueryParser.class.getConstructor(new Class[] {CharStream.class}); + fail("please switch public QueryParser(CharStream) to be protected"); + } catch (NoSuchMethodException nsme) { + // expected + } + try { + QueryParser.class.getConstructor(new Class[] {QueryParserTokenManager.class}); + fail("please switch public QueryParser(QueryParserTokenManager) to be protected"); + } catch (NoSuchMethodException nsme) { + // expected + } + } + } Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/analysis/TestStandardAnalyzer.java (working copy) @@ -5,6 +5,7 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TermAttribute; import org.apache.lucene.analysis.tokenattributes.TypeAttribute; +import org.apache.lucene.util.Version; import java.io.StringReader; Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/analysis/TestAnalyzers.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/analysis/TestAnalyzers.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/analysis/TestAnalyzers.java (working copy) @@ -26,6 +26,7 @@ import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; import org.apache.lucene.analysis.tokenattributes.TermAttribute; import org.apache.lucene.index.Payload; +import org.apache.lucene.util.Version; public class TestAnalyzers extends BaseTokenStreamTestCase { @@ -74,7 +75,7 @@ } public void testStop() throws Exception { - Analyzer a = new StopAnalyzer(true); + Analyzer a = new StopAnalyzer(Version.LUCENE_CURRENT); assertAnalyzesTo(a, "foo bar FOO BAR", new String[] { "foo", "bar", "foo", "bar" }); assertAnalyzesTo(a, "foo a bar such FOO THESE BAR", Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/analysis/TestTeeSinkTokenFilter.java (working copy) @@ -22,6 +22,7 @@ import org.apache.lucene.analysis.tokenattributes.TermAttribute; import org.apache.lucene.util.AttributeSource; import org.apache.lucene.util.English; +import org.apache.lucene.util.Version; import java.io.IOException; import java.io.StringReader; @@ -167,10 +168,10 @@ buffer.append(English.intToEnglish(i).toUpperCase()).append(' '); } //make sure we produce the same tokens - TeeSinkTokenFilter teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString())))); + TeeSinkTokenFilter teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString())))); TokenStream sink = teeStream.newSinkTokenStream(new ModuloSinkFilter(100)); teeStream.consumeAllTokens(); - TokenStream stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))), 100); + TokenStream stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))), 100); TermAttribute tfTok = (TermAttribute) stream.addAttribute(TermAttribute.class); TermAttribute sinkTok = (TermAttribute) sink.addAttribute(TermAttribute.class); for (int i=0; stream.incrementToken(); i++) { @@ -183,12 +184,12 @@ int tfPos = 0; long start = System.currentTimeMillis(); for (int i = 0; i < 20; i++) { - stream = new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))); + stream = new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))); PositionIncrementAttribute posIncrAtt = (PositionIncrementAttribute) stream.getAttribute(PositionIncrementAttribute.class); while (stream.incrementToken()) { tfPos += posIncrAtt.getPositionIncrement(); } - stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString()))), modCounts[j]); + stream = new ModuloTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString()))), modCounts[j]); posIncrAtt = (PositionIncrementAttribute) stream.getAttribute(PositionIncrementAttribute.class); while (stream.incrementToken()) { tfPos += posIncrAtt.getPositionIncrement(); @@ -200,7 +201,7 @@ //simulate one field with one sink start = System.currentTimeMillis(); for (int i = 0; i < 20; i++) { - teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(new StringReader(buffer.toString())))); + teeStream = new TeeSinkTokenFilter(new StandardFilter(new StandardTokenizer(Version.LUCENE_CURRENT, new StringReader(buffer.toString())))); sink = teeStream.newSinkTokenStream(new ModuloSinkFilter(modCounts[j])); PositionIncrementAttribute posIncrAtt = (PositionIncrementAttribute) teeStream.getAttribute(PositionIncrementAttribute.class); while (teeStream.incrementToken()) { Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java (working copy) @@ -31,6 +31,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.Version; public class TestKeywordAnalyzer extends BaseTokenStreamTestCase { @@ -58,7 +59,7 @@ PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new SimpleAnalyzer()); analyzer.addAnalyzer("partnum", new KeywordAnalyzer()); - QueryParser queryParser = new QueryParser("description", analyzer); + QueryParser queryParser = new QueryParser(Version.LUCENE_CURRENT, "description", analyzer); Query query = queryParser.parse("partnum:Q36 AND SPACE"); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/analysis/TestStopAnalyzer.java (working copy) @@ -19,6 +19,7 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TermAttribute; +import org.apache.lucene.util.Version; import java.io.StringReader; import java.io.IOException; @@ -28,7 +29,7 @@ public class TestStopAnalyzer extends BaseTokenStreamTestCase { - private StopAnalyzer stop = new StopAnalyzer(false); + private StopAnalyzer stop = new StopAnalyzer(Version.LUCENE_CURRENT); private Set inValidTokens = new HashSet(); public TestStopAnalyzer(String s) { @@ -61,7 +62,7 @@ stopWordsSet.add("good"); stopWordsSet.add("test"); stopWordsSet.add("analyzer"); - StopAnalyzer newStop = new StopAnalyzer(stopWordsSet, false); + StopAnalyzer newStop = new StopAnalyzer(Version.LUCENE_24, stopWordsSet); StringReader reader = new StringReader("This is a good test of the english stop analyzer"); TokenStream stream = newStop.tokenStream("test", reader); assertNotNull(stream); @@ -71,7 +72,7 @@ while (stream.incrementToken()) { String text = termAtt.term(); assertFalse(stopWordsSet.contains(text)); - assertEquals(1,posIncrAtt.getPositionIncrement()); // by default stop tokenizer does not apply increments. + assertEquals(1,posIncrAtt.getPositionIncrement()); // in 2.4 stop tokenizer does not apply increments. } } @@ -80,7 +81,7 @@ stopWordsSet.add("good"); stopWordsSet.add("test"); stopWordsSet.add("analyzer"); - StopAnalyzer newStop = new StopAnalyzer(stopWordsSet, true); + StopAnalyzer newStop = new StopAnalyzer(Version.LUCENE_CURRENT, stopWordsSet); StringReader reader = new StringReader("This is a good test of the english stop analyzer with positions"); int expectedIncr[] = { 1, 1, 1, 3, 1, 1, 1, 2, 1}; TokenStream stream = newStop.tokenStream("test", reader); Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/TestDemo.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/TestDemo.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/TestDemo.java (working copy) @@ -63,7 +63,7 @@ // Now search the index: IndexSearcher isearcher = new IndexSearcher(directory, true); // read-only=true // Parse a simple query that searches for "text": - QueryParser parser = new QueryParser("fieldname", analyzer); + QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "fieldname", analyzer); Query query = parser.parse("text"); ScoreDoc[] hits = isearcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestNot.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestNot.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestNot.java (working copy) @@ -25,6 +25,7 @@ import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.util.Version; /** Similarity unit test. * @@ -48,7 +49,7 @@ writer.close(); Searcher searcher = new IndexSearcher(store, true); - QueryParser parser = new QueryParser("field", new SimpleAnalyzer()); + QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "field", new SimpleAnalyzer()); Query query = parser.parse("a NOT b"); //System.out.println(query); ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestTimeLimitingCollector.java (working copy) @@ -31,6 +31,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.Version; /** * Tests the {@link TimeLimitingCollector}. This test checks (1) search @@ -85,7 +86,7 @@ for (int i = 0; i < docText.length; i++) { qtxt += ' ' + docText[i]; // large query so that search will be longer } - QueryParser queryParser = new QueryParser(FIELD_NAME, new WhitespaceAnalyzer()); + QueryParser queryParser = new QueryParser(Version.LUCENE_CURRENT, FIELD_NAME, new WhitespaceAnalyzer()); query = queryParser.parse(qtxt); // warm the searcher Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestPhraseQuery.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestPhraseQuery.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestPhraseQuery.java (working copy) @@ -25,6 +25,7 @@ import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.Version; import java.io.IOException; import java.io.Reader; @@ -201,7 +202,7 @@ public void testPhraseQueryWithStopAnalyzer() throws Exception { RAMDirectory directory = new RAMDirectory(); - StopAnalyzer stopAnalyzer = new StopAnalyzer(false); + StopAnalyzer stopAnalyzer = new StopAnalyzer(Version.LUCENE_24); IndexWriter writer = new IndexWriter(directory, stopAnalyzer, true, IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); @@ -220,7 +221,7 @@ QueryUtils.check(query,searcher); - // currently StopAnalyzer does not leave "holes", so this matches. + // StopAnalyzer as of 2.4 does not leave "holes", so this matches. query = new PhraseQuery(); query.add(new Term("field", "words")); query.add(new Term("field", "here")); @@ -357,8 +358,8 @@ } public void testToString() throws Exception { - StopAnalyzer analyzer = new StopAnalyzer(true); - QueryParser qp = new QueryParser("field", analyzer); + StopAnalyzer analyzer = new StopAnalyzer(Version.LUCENE_CURRENT); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", analyzer); qp.setEnablePositionIncrements(true); PhraseQuery q = (PhraseQuery)qp.parse("\"this hi this is a test is\""); assertEquals("field:\"? hi ? ? ? test\"", q.toString()); Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestPositionIncrement.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestPositionIncrement.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestPositionIncrement.java (working copy) @@ -51,6 +51,7 @@ import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.search.spans.Spans; +import org.apache.lucene.util.Version; /** * Term position unit test. @@ -188,7 +189,7 @@ assertEquals(0, hits.length); // should not find "1 2" because there is a gap of 1 in the index - QueryParser qp = new QueryParser("field", + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "field", new StopWhitespaceAnalyzer(false)); q = (PhraseQuery) qp.parse("\"1 2\""); hits = searcher.search(q, null, 1000).scoreDocs; @@ -212,7 +213,7 @@ assertEquals(0, hits.length); // when both qp qnd stopFilter propagate increments, we should find the doc. - qp = new QueryParser("field", + qp = new QueryParser(Version.LUCENE_CURRENT, "field", new StopWhitespaceAnalyzer(true)); qp.setEnablePositionIncrements(true); q = (PhraseQuery) qp.parse("\"1 stop 2\""); Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestDateSort.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestDateSort.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestDateSort.java (working copy) @@ -33,6 +33,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.Version; /** * Test date sorting, i.e. auto-sorting of fields with type "long". @@ -74,7 +75,7 @@ Sort sort = new Sort(new SortField(DATE_TIME_FIELD, SortField.STRING, true)); - QueryParser queryParser = new QueryParser(TEXT_FIELD, new WhitespaceAnalyzer()); + QueryParser queryParser = new QueryParser(Version.LUCENE_CURRENT, TEXT_FIELD, new WhitespaceAnalyzer()); Query query = queryParser.parse("Document"); // Execute the search and process the search results. Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (working copy) @@ -30,13 +30,14 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.Version; public class TestNearSpansOrdered extends LuceneTestCase { protected IndexSearcher searcher; public static final String FIELD = "field"; public static final QueryParser qp = - new QueryParser(FIELD, new WhitespaceAnalyzer()); + new QueryParser(Version.LUCENE_CURRENT, FIELD, new WhitespaceAnalyzer()); public void tearDown() throws Exception { super.tearDown(); Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestMultiSearcher.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestMultiSearcher.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestMultiSearcher.java (working copy) @@ -30,6 +30,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.store.MockRAMDirectory; +import org.apache.lucene.util.Version; import java.io.IOException; import java.util.Collections; @@ -103,7 +104,7 @@ writerB.close(); // creating the query - QueryParser parser = new QueryParser("fulltext", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "fulltext", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); Query query = parser.parse("handle:1"); // building the searchables Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestSimpleExplanations.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestSimpleExplanations.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestSimpleExplanations.java (working copy) @@ -28,6 +28,7 @@ import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockRAMDirectory; +import org.apache.lucene.util.Version; /** @@ -327,7 +328,7 @@ writerB.addDocument(lDoc3); writerB.close(); - QueryParser parser = new QueryParser("fulltext", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "fulltext", new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); Query query = parser.parse("handle:1"); Searcher[] searchers = new Searcher[2]; Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestWildcard.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestWildcard.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestWildcard.java (working copy) @@ -28,6 +28,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.Version; import java.io.IOException; @@ -167,7 +168,7 @@ public void testParsingAndSearching() throws Exception { String field = "content"; boolean dbg = false; - QueryParser qp = new QueryParser(field, new WhitespaceAnalyzer()); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, field, new WhitespaceAnalyzer()); qp.setAllowLeadingWildcard(true); String docs[] = { "\\ abcdefg1", Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestBoolean2.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestBoolean2.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestBoolean2.java (working copy) @@ -29,6 +29,7 @@ import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.Version; /** Test BooleanQuery2 against BooleanQuery by overriding the standard query parser. * This also tests the scoring order of BooleanQuery. @@ -59,7 +60,7 @@ }; public Query makeQuery(String queryText) throws ParseException { - Query q = (new QueryParser(field, new WhitespaceAnalyzer())).parse(queryText); + Query q = (new QueryParser(Version.LUCENE_CURRENT, field, new WhitespaceAnalyzer())).parse(queryText); return q; } Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestExplanations.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestExplanations.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestExplanations.java (working copy) @@ -32,6 +32,7 @@ import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.Version; /** * Tests primitive queries (ie: that rewrite to themselves) to @@ -51,7 +52,7 @@ public static final String KEY = "KEY"; public static final String FIELD = "field"; public static final QueryParser qp = - new QueryParser(FIELD, new WhitespaceAnalyzer()); + new QueryParser(Version.LUCENE_CURRENT, FIELD, new WhitespaceAnalyzer()); public void tearDown() throws Exception { super.tearDown(); Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/function/TestCustomScoreQuery.java (working copy) @@ -28,6 +28,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.util.Version; /** * Test CustomScoreQuery search. @@ -139,7 +140,7 @@ float boost = (float) dboost; IndexSearcher s = new IndexSearcher(dir, true); FieldScoreQuery qValSrc = new FieldScoreQuery(field,tp); // a query that would score by the field - QueryParser qp = new QueryParser(TEXT_FIELD,anlzr); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, TEXT_FIELD,anlzr); String qtxt = "first aid text"; // from the doc texts in FunctionQuerySetup. // regular (boolean) query. Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestFuzzyQuery.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestFuzzyQuery.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestFuzzyQuery.java (working copy) @@ -26,6 +26,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.Version; /** * Tests {@link FuzzyQuery}. Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java (working copy) @@ -100,7 +100,7 @@ assertEquals(2, hits.length); // test parsable toString() - QueryParser qp = new QueryParser("key", analyzer); + QueryParser qp = new QueryParser(Version.LUCENE_CURRENT, "key", analyzer); hits = is.search(qp.parse(new MatchAllDocsQuery().toString()), null, 1000).scoreDocs; assertEquals(2, hits.length); Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestMultiSearcherRanking.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestMultiSearcherRanking.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/search/TestMultiSearcherRanking.java (working copy) @@ -26,6 +26,7 @@ import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.Version; import java.io.IOException; @@ -88,7 +89,7 @@ private void checkQuery(String queryStr) throws IOException, ParseException { // check result hit ranking if(verbose) System.out.println("Query: " + queryStr); - QueryParser queryParser = new QueryParser(FIELD_NAME, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); + QueryParser queryParser = new QueryParser(Version.LUCENE_CURRENT, FIELD_NAME, new StandardAnalyzer(org.apache.lucene.util.Version.LUCENE_CURRENT)); Query query = queryParser.parse(queryStr); ScoreDoc[] multiSearcherHits = multiSearcher.search(query, null, 1000).scoreDocs; ScoreDoc[] singleSearcherHits = singleSearcher.search(query, null, 1000).scoreDocs; Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/TestSearch.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/TestSearch.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/TestSearch.java (working copy) @@ -22,6 +22,7 @@ import java.io.StringWriter; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.Version; import junit.framework.TestSuite; import junit.textui.TestRunner; @@ -110,7 +111,7 @@ }; ScoreDoc[] hits = null; - QueryParser parser = new QueryParser("contents", analyzer); + QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, "contents", analyzer); parser.setPhraseSlop(4); for (int j = 0; j < queries.length; j++) { Query query = parser.parse(queries[j]); Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/index/TestIndexWriter.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -66,6 +66,7 @@ import org.apache.lucene.store.SingleInstanceLockFactory; import org.apache.lucene.util.UnicodeUtil; import org.apache.lucene.util._TestUtil; +import org.apache.lucene.util.Version; /** * @@ -1707,7 +1708,7 @@ IndexWriter writer = new IndexWriter(dir, new Analyzer() { public TokenStream tokenStream(String fieldName, Reader reader) { - return new TokenFilter(new StandardTokenizer(reader)) { + return new TokenFilter(new StandardTokenizer(Version.LUCENE_CURRENT, reader)) { private int count = 0; public boolean incrementToken() throws IOException { @@ -4174,7 +4175,7 @@ // LUCENE-1448 public void testEndOffsetPositionStopFilter() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, new StopAnalyzer(true), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter w = new IndexWriter(dir, new StopAnalyzer(Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); Field f = new Field("field", "abcd the", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); doc.add(f); Index: tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/TestSearchForDuplicates.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/TestSearchForDuplicates.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/test/org/apache/lucene/TestSearchForDuplicates.java (working copy) @@ -27,8 +27,10 @@ import org.apache.lucene.index.*; import org.apache.lucene.search.*; import org.apache.lucene.queryParser.*; +import org.apache.lucene.util.Version; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.Version; import junit.framework.TestSuite; import junit.textui.TestRunner; @@ -102,7 +104,7 @@ // try a search without OR Searcher searcher = new IndexSearcher(directory, true); - QueryParser parser = new QueryParser(PRIORITY_FIELD, analyzer); + QueryParser parser = new QueryParser(Version.LUCENE_CURRENT, PRIORITY_FIELD, analyzer); Query query = parser.parse(HIGH_PRIORITY); out.println("Query: " + query.toString(PRIORITY_FIELD)); @@ -117,7 +119,7 @@ searcher = new IndexSearcher(directory, true); hits = null; - parser = new QueryParser(PRIORITY_FIELD, analyzer); + parser = new QueryParser(Version.LUCENE_CURRENT, PRIORITY_FIELD, analyzer); query = parser.parse(HIGH_PRIORITY + " OR " + MED_PRIORITY); out.println("Query: " + query.toString(PRIORITY_FIELD)); Index: tags/lucene_2_9_back_compat_tests_20091023/src/java/org/apache/lucene/queryParser/MultiFieldQueryParser.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/java/org/apache/lucene/queryParser/MultiFieldQueryParser.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/java/org/apache/lucene/queryParser/MultiFieldQueryParser.java (working copy) @@ -27,6 +27,7 @@ import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.util.Version; /** * A QueryParser which constructs queries to search multiple fields. @@ -69,6 +70,11 @@ this(fields,analyzer); this.boosts = boosts; } + + public MultiFieldQueryParser(Version matchVersion, String[] fields, Analyzer analyzer, Map boosts) { + // stub + super(matchVersion, null, analyzer); + } /** * Creates a MultiFieldQueryParser. @@ -94,6 +100,11 @@ super(null, analyzer); this.fields = fields; } + + public MultiFieldQueryParser(Version matchVersion, String[] fields, Analyzer analyzer) { + // stub + super(matchVersion, null, analyzer); + } protected Query getFieldQuery(String field, String queryText, int slop) throws ParseException { if (field == null) { @@ -203,7 +214,7 @@ * @throws IllegalArgumentException if the length of the queries array differs * from the length of the fields array */ - public static Query parse(String[] queries, String[] fields, + public static Query parse(Version matchVersion, String[] queries, String[] fields, Analyzer analyzer) throws ParseException { if (queries.length != fields.length) @@ -251,7 +262,7 @@ * @throws IllegalArgumentException if the length of the fields array differs * from the length of the flags array */ - public static Query parse(String query, String[] fields, + public static Query parse(Version matchVersion, String query, String[] fields, BooleanClause.Occur[] flags, Analyzer analyzer) throws ParseException { if (fields.length != flags.length) throw new IllegalArgumentException("fields.length != flags.length"); @@ -298,7 +309,7 @@ * @throws IllegalArgumentException if the length of the queries, fields, * and flags array differ */ - public static Query parse(String[] queries, String[] fields, BooleanClause.Occur[] flags, + public static Query parse(Version matchVersion, String[] queries, String[] fields, BooleanClause.Occur[] flags, Analyzer analyzer) throws ParseException { if (!(queries.length == fields.length && queries.length == flags.length)) Index: tags/lucene_2_9_back_compat_tests_20091023/src/java/org/apache/lucene/queryParser/QueryParser.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/java/org/apache/lucene/queryParser/QueryParser.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/java/org/apache/lucene/queryParser/QueryParser.java (working copy) @@ -35,6 +35,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.Parameter; +import org.apache.lucene.util.Version; /** * This class is generated by JavaCC. The most important method is @@ -165,6 +166,10 @@ field = f; } + public QueryParser(Version matchVersion, String f, Analyzer a) { + // stub + } + /** Parses a query string, returning a {@link org.apache.lucene.search.Query}. * @param query the query string to be parsed. * @throws ParseException if the parsing fails Index: tags/lucene_2_9_back_compat_tests_20091023/src/java/org/apache/lucene/queryParser/QueryParser.jj =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/java/org/apache/lucene/queryParser/QueryParser.jj (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/java/org/apache/lucene/queryParser/QueryParser.jj (working copy) @@ -59,6 +59,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.Parameter; +import org.apache.lucene.util.Version; /** * This class is generated by JavaCC. The most important method is @@ -189,6 +190,10 @@ field = f; } + public QueryParser(Version matchVersion, String f, Analyzer a) { + // stub + } + /** Parses a query string, returning a {@link org.apache.lucene.search.Query}. * @param query the query string to be parsed. * @throws ParseException if the parsing fails Index: tags/lucene_2_9_back_compat_tests_20091023/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091023/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java (revision 829190) +++ tags/lucene_2_9_back_compat_tests_20091023/src/java/org/apache/lucene/analysis/standard/StandardTokenizer.java (working copy) @@ -27,6 +27,7 @@ import org.apache.lucene.analysis.tokenattributes.TermAttribute; import org.apache.lucene.analysis.tokenattributes.TypeAttribute; import org.apache.lucene.util.AttributeSource; +import org.apache.lucene.util.Version; /** A grammar-based tokenizer constructed with JFlex * @@ -47,7 +48,7 @@ public class StandardTokenizer extends Tokenizer { /** A private instance of the JFlex-constructed scanner */ - private final StandardTokenizerImpl scanner; + private StandardTokenizerImpl scanner; public static final int ALPHANUM = 0; public static final int APOSTROPHE = 1; @@ -112,6 +113,10 @@ this(input, false); } + public StandardTokenizer(Version matchVersion, Reader input) { + // stub + } + /** * Creates a new instance of the {@link org.apache.lucene.analysis.standard.StandardTokenizer}. Attaches * theinput to the newly created JFlex scanner.
@@ -136,6 +141,10 @@
init(input, replaceInvalidAcronym);
}
+ public StandardTokenizer(Version matchVersion, AttributeSource source, Reader input) {
+ // stub
+ }
+
/**
* Creates a new StandardTokenizer with a given {@link org.apache.lucene.util.AttributeSource.AttributeFactory}
*/
@@ -145,6 +154,10 @@
init(input, replaceInvalidAcronym);
}
+ public StandardTokenizer(Version matchVersion, AttributeFactory factory, Reader input) {
+ // stub
+ }
+
private void init(Reader input, boolean replaceInvalidAcronym) {
this.replaceInvalidAcronym = replaceInvalidAcronym;
this.input = input;
Index: tags/lucene_2_9_back_compat_tests_20091023/src/java/org/apache/lucene/analysis/StopAnalyzer.java
===================================================================
--- tags/lucene_2_9_back_compat_tests_20091023/src/java/org/apache/lucene/analysis/StopAnalyzer.java (revision 829190)
+++ tags/lucene_2_9_back_compat_tests_20091023/src/java/org/apache/lucene/analysis/StopAnalyzer.java (working copy)
@@ -23,13 +23,15 @@
import java.util.Arrays;
import java.util.Set;
+import org.apache.lucene.util.Version;
+
/** Filters {@link LetterTokenizer} with {@link LowerCaseFilter} and {@link StopFilter}. */
public final class StopAnalyzer extends Analyzer {
- private final Set/*