Index: src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java
===================================================================
--- src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java (revision 386892)
+++ src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java (working copy)
@@ -16,10 +16,7 @@
* limitations under the License.
*/
-import java.io.Reader;
-
import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
@@ -34,6 +31,8 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
+import java.io.Reader;
+
/**
* Tests QueryParser.
* @author Daniel Naber
@@ -96,23 +95,6 @@
}
- // TODO: remove this for Lucene 2.0
- public void testOldMethods() throws ParseException {
- // testing the old static calls that are now deprecated:
- assertQueryEquals("b:one t:one", "one");
- assertQueryEquals("(b:one b:two) (t:one t:two)", "one two");
- assertQueryEquals("(b:one -b:two) (t:one -t:two)", "one -two");
- assertQueryEquals("(b:one -(b:two b:three)) (t:one -(t:two t:three))", "one -(two three)");
- assertQueryEquals("(+b:one +b:two) (+t:one +t:two)", "+one +two");
- }
-
- // TODO: remove this for Lucene 2.0
- private void assertQueryEquals(String expected, String query) throws ParseException {
- String[] fields = {"b", "t"};
- Query q = MultiFieldQueryParser.parse(query, fields, new StandardAnalyzer());
- String s = q.toString();
- assertEquals(expected, s);
- }
public void testStaticMethod1() throws ParseException {
String[] fields = {"b", "t"};
@@ -161,15 +143,18 @@
public void testStaticMethod2Old() throws ParseException {
String[] fields = {"b", "t"};
- int[] flags = {MultiFieldQueryParser.REQUIRED_FIELD, MultiFieldQueryParser.PROHIBITED_FIELD};
- Query q = MultiFieldQueryParser.parse("one", fields, flags, new StandardAnalyzer());
+ //int[] flags = {MultiFieldQueryParser.REQUIRED_FIELD, MultiFieldQueryParser.PROHIBITED_FIELD};
+ BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT};
+ MultiFieldQueryParser parser = new MultiFieldQueryParser(fields, new StandardAnalyzer());
+
+ Query q = MultiFieldQueryParser.parse("one", fields, flags, new StandardAnalyzer());//, fields, flags, new StandardAnalyzer());
assertEquals("+b:one -t:one", q.toString());
q = MultiFieldQueryParser.parse("one two", fields, flags, new StandardAnalyzer());
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
try {
- int[] flags2 = {MultiFieldQueryParser.REQUIRED_FIELD};
+ BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
q = MultiFieldQueryParser.parse("blah", fields, flags2, new StandardAnalyzer());
fail();
} catch(IllegalArgumentException e) {
@@ -197,12 +182,12 @@
public void testStaticMethod3Old() throws ParseException {
String[] queries = {"one", "two"};
String[] fields = {"b", "t"};
- int[] flags = {MultiFieldQueryParser.REQUIRED_FIELD, MultiFieldQueryParser.PROHIBITED_FIELD};
+ BooleanClause.Occur[] flags = {BooleanClause.Occur.MUST, BooleanClause.Occur.MUST_NOT};
Query q = MultiFieldQueryParser.parse(queries, fields, flags, new StandardAnalyzer());
assertEquals("+b:one -t:two", q.toString());
try {
- int[] flags2 = {MultiFieldQueryParser.REQUIRED_FIELD};
+ BooleanClause.Occur[] flags2 = {BooleanClause.Occur.MUST};
q = MultiFieldQueryParser.parse(queries, fields, flags2, new StandardAnalyzer());
fail();
} catch(IllegalArgumentException e) {
Index: src/test/org/apache/lucene/ThreadSafetyTest.java
===================================================================
--- src/test/org/apache/lucene/ThreadSafetyTest.java (revision 386892)
+++ src/test/org/apache/lucene/ThreadSafetyTest.java (working copy)
@@ -113,9 +113,9 @@
private void searchFor(int n, Searcher searcher)
throws Exception {
System.out.println("Searching for " + n);
+ QueryParser parser = new QueryParser("contents", ANALYZER);
Hits hits =
- searcher.search(QueryParser.parse(English.intToEnglish(n), "contents",
- ANALYZER));
+ searcher.search(parser.parse(English.intToEnglish(n)));
System.out.println("Search for " + n + ": total=" + hits.length());
for (int j = 0; j < Math.min(3, hits.length()); j++) {
System.out.println("Hit for " + n + ": " + hits.doc(j).get("id"));
Index: src/test/org/apache/lucene/TestDemo.java
===================================================================
--- src/test/org/apache/lucene/TestDemo.java (revision 386892)
+++ src/test/org/apache/lucene/TestDemo.java (working copy)
@@ -16,8 +16,7 @@
* limitations under the License.
*/
-import java.io.IOException;
-
+import junit.framework.TestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
@@ -31,7 +30,7 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import junit.framework.TestCase;
+import java.io.IOException;
/**
* A very simple demo used in the API documentation (src/java/overview.html).
@@ -62,7 +61,8 @@
// Now search the index:
IndexSearcher isearcher = new IndexSearcher(directory);
// Parse a simple query that searches for "text":
- Query query = QueryParser.parse("text", "fieldname", analyzer);
+ QueryParser parser = new QueryParser("fieldname", analyzer);
+ Query query = parser.parse("text");
Hits hits = isearcher.search(query);
assertEquals(1, hits.length());
// Iterate through the results:
Index: src/test/org/apache/lucene/search/TestNot.java
===================================================================
--- src/test/org/apache/lucene/search/TestNot.java (revision 386892)
+++ src/test/org/apache/lucene/search/TestNot.java (working copy)
@@ -47,7 +47,8 @@
writer.close();
Searcher searcher = new IndexSearcher(store);
- Query query = QueryParser.parse("a NOT b", "field", new SimpleAnalyzer());
+ QueryParser parser = new QueryParser("field", new SimpleAnalyzer());
+ Query query = parser.parse("a NOT b");
//System.out.println(query);
Hits hits = searcher.search(query);
assertEquals(0, hits.length());
Index: src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
===================================================================
--- src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java (revision 386892)
+++ src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java (working copy)
@@ -17,23 +17,16 @@
*/
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.store.Directory;
-
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-
+import junit.framework.TestCase;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
-
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.queryParser.QueryParser;
-import org.apache.lucene.queryParser.ParseException;
-
-import junit.framework.TestCase;
-
import java.text.DecimalFormat;
import java.util.Random;
@@ -67,10 +60,10 @@
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
- doc.add(Field.Keyword("id",String.valueOf(i)));
- doc.add(Field.Keyword("all","all"));
+ doc.add(new Field("id", String.valueOf(i), Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("id",String.valueOf(i)));
+ doc.add(new Field("all", "all", Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("all","all"));
if (null != data[i]) {
- doc.add(Field.Text("data",data[i]));
+ doc.add(new Field("data", data[i], Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
@@ -96,7 +89,7 @@
BooleanQuery q = new BooleanQuery();
for (int i = 1; i <=4; i++) {
- q.add(new TermQuery(new Term("data",""+i)), false, false);
+ q.add(new TermQuery(new Term("data",""+i)), BooleanClause.Occur.SHOULD);//false, false);
}
q.setMinimumNumberShouldMatch(2); // match at least two of 4
verifyNrHits(q, 2);
@@ -106,10 +99,10 @@
/* one required, some optional */
BooleanQuery q = new BooleanQuery();
- q.add(new TermQuery(new Term("all", "all" )), true, false);
- q.add(new TermQuery(new Term("data", "5" )), false, false);
- q.add(new TermQuery(new Term("data", "4" )), false, false);
- q.add(new TermQuery(new Term("data", "3" )), false, false);
+ q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true, false);
+ q.add(new TermQuery(new Term("data", "5" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "4" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "3" )), BooleanClause.Occur.SHOULD);//false, false);
q.setMinimumNumberShouldMatch(2); // 2 of 3 optional
@@ -120,11 +113,11 @@
/* two required, some optional */
BooleanQuery q = new BooleanQuery();
- q.add(new TermQuery(new Term("all", "all" )), true, false);
- q.add(new TermQuery(new Term("data", "6" )), true, false);
- q.add(new TermQuery(new Term("data", "5" )), false, false);
- q.add(new TermQuery(new Term("data", "4" )), false, false);
- q.add(new TermQuery(new Term("data", "3" )), false, false);
+ q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true, false);
+ q.add(new TermQuery(new Term("data", "6" )), BooleanClause.Occur.MUST);//true, false);
+ q.add(new TermQuery(new Term("data", "5" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "4" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "3" )), BooleanClause.Occur.SHOULD);//false, false);
q.setMinimumNumberShouldMatch(2); // 2 of 3 optional
@@ -135,10 +128,10 @@
/* one prohibited, some optional */
BooleanQuery q = new BooleanQuery();
- q.add(new TermQuery(new Term("data", "1" )), false, false);
- q.add(new TermQuery(new Term("data", "2" )), false, false);
- q.add(new TermQuery(new Term("data", "3" )), false, true );
- q.add(new TermQuery(new Term("data", "4" )), false, false);
+ q.add(new TermQuery(new Term("data", "1" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "2" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "3" )), BooleanClause.Occur.MUST_NOT);//false, true );
+ q.add(new TermQuery(new Term("data", "4" )), BooleanClause.Occur.SHOULD);//false, false);
q.setMinimumNumberShouldMatch(2); // 2 of 3 optional
@@ -149,11 +142,11 @@
/* two prohibited, some optional */
BooleanQuery q = new BooleanQuery();
- q.add(new TermQuery(new Term("data", "1" )), false, false);
- q.add(new TermQuery(new Term("data", "2" )), false, false);
- q.add(new TermQuery(new Term("data", "3" )), false, true );
- q.add(new TermQuery(new Term("data", "4" )), false, false);
- q.add(new TermQuery(new Term("data", "C" )), false, true );
+ q.add(new TermQuery(new Term("data", "1" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "2" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "3" )), BooleanClause.Occur.MUST_NOT);//false, true );
+ q.add(new TermQuery(new Term("data", "4" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "C" )), BooleanClause.Occur.MUST_NOT);//false, true );
q.setMinimumNumberShouldMatch(2); // 2 of 3 optional
@@ -164,12 +157,12 @@
/* one required, one prohibited, some optional */
BooleanQuery q = new BooleanQuery();
- q.add(new TermQuery(new Term("data", "6" )), true, false);
- q.add(new TermQuery(new Term("data", "5" )), false, false);
- q.add(new TermQuery(new Term("data", "4" )), false, false);
- q.add(new TermQuery(new Term("data", "3" )), false, true );
- q.add(new TermQuery(new Term("data", "2" )), false, false);
- q.add(new TermQuery(new Term("data", "1" )), false, false);
+ q.add(new TermQuery(new Term("data", "6" )), BooleanClause.Occur.MUST);// true, false);
+ q.add(new TermQuery(new Term("data", "5" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "4" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "3" )), BooleanClause.Occur.MUST_NOT);//false, true );
+ q.add(new TermQuery(new Term("data", "2" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "1" )), BooleanClause.Occur.SHOULD);//false, false);
q.setMinimumNumberShouldMatch(3); // 3 of 4 optional
@@ -180,13 +173,13 @@
/* two required, one prohibited, some optional */
BooleanQuery q = new BooleanQuery();
- q.add(new TermQuery(new Term("all", "all")), true, false);
- q.add(new TermQuery(new Term("data", "6" )), true, false);
- q.add(new TermQuery(new Term("data", "5" )), false, false);
- q.add(new TermQuery(new Term("data", "4" )), false, false);
- q.add(new TermQuery(new Term("data", "3" )), false, true );
- q.add(new TermQuery(new Term("data", "2" )), false, false);
- q.add(new TermQuery(new Term("data", "1" )), false, false);
+ q.add(new TermQuery(new Term("all", "all")), BooleanClause.Occur.MUST);//true, false);
+ q.add(new TermQuery(new Term("data", "6" )), BooleanClause.Occur.MUST);//true, false);
+ q.add(new TermQuery(new Term("data", "5" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "4" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "3" )), BooleanClause.Occur.MUST_NOT);//false, true );
+ q.add(new TermQuery(new Term("data", "2" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "1" )), BooleanClause.Occur.SHOULD);//false, false);
q.setMinimumNumberShouldMatch(3); // 3 of 4 optional
@@ -197,13 +190,13 @@
/* one required, two prohibited, some optional */
BooleanQuery q = new BooleanQuery();
- q.add(new TermQuery(new Term("data", "6" )), true, false);
- q.add(new TermQuery(new Term("data", "5" )), false, false);
- q.add(new TermQuery(new Term("data", "4" )), false, false);
- q.add(new TermQuery(new Term("data", "3" )), false, true );
- q.add(new TermQuery(new Term("data", "2" )), false, false);
- q.add(new TermQuery(new Term("data", "1" )), false, false);
- q.add(new TermQuery(new Term("data", "C" )), false, true );
+ q.add(new TermQuery(new Term("data", "6" )), BooleanClause.Occur.MUST);//true, false);
+ q.add(new TermQuery(new Term("data", "5" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "4" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "3" )), BooleanClause.Occur.MUST_NOT);//false, true );
+ q.add(new TermQuery(new Term("data", "2" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "1" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "C" )), BooleanClause.Occur.MUST_NOT);//false, true );
q.setMinimumNumberShouldMatch(3); // 3 of 4 optional
@@ -214,14 +207,14 @@
/* two required, two prohibited, some optional */
BooleanQuery q = new BooleanQuery();
- q.add(new TermQuery(new Term("all", "all")), true, false);
- q.add(new TermQuery(new Term("data", "6" )), true, false);
- q.add(new TermQuery(new Term("data", "5" )), false, false);
- q.add(new TermQuery(new Term("data", "4" )), false, false);
- q.add(new TermQuery(new Term("data", "3" )), false, true );
- q.add(new TermQuery(new Term("data", "2" )), false, false);
- q.add(new TermQuery(new Term("data", "1" )), false, false);
- q.add(new TermQuery(new Term("data", "C" )), false, true );
+ q.add(new TermQuery(new Term("all", "all")), BooleanClause.Occur.MUST);//true, false);
+ q.add(new TermQuery(new Term("data", "6" )), BooleanClause.Occur.MUST);//true, false);
+ q.add(new TermQuery(new Term("data", "5" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "4" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "3" )), BooleanClause.Occur.MUST_NOT);//false, true );
+ q.add(new TermQuery(new Term("data", "2" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "1" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "C" )), BooleanClause.Occur.MUST_NOT);//false, true );
q.setMinimumNumberShouldMatch(3); // 3 of 4 optional
@@ -232,14 +225,14 @@
/* two required, two prohibited, some optional */
BooleanQuery q = new BooleanQuery();
- q.add(new TermQuery(new Term("all", "all")), true, false);
- q.add(new TermQuery(new Term("data", "6" )), true, false);
- q.add(new TermQuery(new Term("data", "5" )), false, false);
- q.add(new TermQuery(new Term("data", "4" )), false, false);
- q.add(new TermQuery(new Term("data", "3" )), false, true );
- q.add(new TermQuery(new Term("data", "2" )), false, false);
- q.add(new TermQuery(new Term("data", "1" )), false, false);
- q.add(new TermQuery(new Term("data", "C" )), false, true );
+ q.add(new TermQuery(new Term("all", "all")), BooleanClause.Occur.MUST);//true, false);
+ q.add(new TermQuery(new Term("data", "6" )), BooleanClause.Occur.MUST);//true, false);
+ q.add(new TermQuery(new Term("data", "5" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "4" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "3" )), BooleanClause.Occur.MUST_NOT);//false, true );
+ q.add(new TermQuery(new Term("data", "2" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "1" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "C" )), BooleanClause.Occur.MUST_NOT);//false, true );
q.setMinimumNumberShouldMatch(90); // 90 of 4 optional ?!?!?!
@@ -250,10 +243,10 @@
/* two required, two optional */
BooleanQuery q = new BooleanQuery();
- q.add(new TermQuery(new Term("all", "all" )), false, false);
- q.add(new TermQuery(new Term("data", "6" )), true, false);
- q.add(new TermQuery(new Term("data", "3" )), true, false);
- q.add(new TermQuery(new Term("data", "2" )), false, false);
+ q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "6" )), BooleanClause.Occur.MUST);//true, false);
+ q.add(new TermQuery(new Term("data", "3" )), BooleanClause.Occur.MUST);//true, false);
+ q.add(new TermQuery(new Term("data", "2" )), BooleanClause.Occur.SHOULD);//false, false);
q.setMinimumNumberShouldMatch(2); // 2 of 2 optional
@@ -264,9 +257,9 @@
/* two required, one optional */
BooleanQuery q = new BooleanQuery();
- q.add(new TermQuery(new Term("all", "all" )), true, false);
- q.add(new TermQuery(new Term("data", "3" )), false, false);
- q.add(new TermQuery(new Term("data", "2" )), true, false);
+ q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true, false);
+ q.add(new TermQuery(new Term("data", "3" )), BooleanClause.Occur.SHOULD);//false, false);
+ q.add(new TermQuery(new Term("data", "2" )), BooleanClause.Occur.MUST);//true, false);
q.setMinimumNumberShouldMatch(1); // 1 of 1 optional
@@ -277,8 +270,8 @@
/* two required, no optional */
BooleanQuery q = new BooleanQuery();
- q.add(new TermQuery(new Term("all", "all" )), true, false);
- q.add(new TermQuery(new Term("data", "2" )), true, false);
+ q.add(new TermQuery(new Term("all", "all" )), BooleanClause.Occur.MUST);//true, false);
+ q.add(new TermQuery(new Term("data", "2" )), BooleanClause.Occur.MUST);//true, false);
q.setMinimumNumberShouldMatch(1); // 1 of 0 optional
@@ -306,7 +299,7 @@
};
- int tot=0;
+
// increase number of iterations for more complete testing
for (int i=0; i<1000; i++) {
int lev = rnd.nextInt(maxLev);
@@ -322,7 +315,6 @@
// will not normalize scores.
TopDocs top1 = s.search(q1,null,100);
TopDocs top2 = s.search(q2,null,100);
- tot+=top2.totalHits;
// The constrained query
// should be a superset to the unconstrained query.
Index: src/test/org/apache/lucene/search/TestDateFilter.java
===================================================================
--- src/test/org/apache/lucene/search/TestDateFilter.java (revision 386892)
+++ src/test/org/apache/lucene/search/TestDateFilter.java (working copy)
@@ -16,21 +16,17 @@
* limitations under the License.
*/
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Hits;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.store.RAMDirectory;
+import junit.framework.TestCase;
import org.apache.lucene.analysis.SimpleAnalyzer;
+import org.apache.lucene.document.DateTools;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-import org.apache.lucene.document.DateField;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.RAMDirectory;
import java.io.IOException;
-import junit.framework.TestCase;
-
/**
* DateFilter JUnit tests.
*
@@ -59,7 +55,7 @@
Document doc = new Document();
// add time that is in the past
- doc.add(new Field("datefield", DateField.timeToString(now - 1000), Field.Store.YES, Field.Index.UN_TOKENIZED));
+ doc.add(new Field("datefield", DateTools.timeToString(now - 1000, DateTools.Resolution.MILLISECOND), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("body", "Today is a very sunny day in New York City", Field.Store.YES, Field.Index.TOKENIZED));
writer.addDocument(doc);
writer.optimize();
@@ -68,12 +64,15 @@
IndexSearcher searcher = new IndexSearcher(indexStore);
// filter that should preserve matches
- DateFilter df1 = DateFilter.Before("datefield", now);
-
+ //DateFilter df1 = DateFilter.Before("datefield", now);
+ RangeFilter df1 = new RangeFilter("datefield", DateTools.timeToString(now - 2000, DateTools.Resolution.MILLISECOND),
+ DateTools.timeToString(now, DateTools.Resolution.MILLISECOND), false, true);
// filter that should discard matches
- DateFilter df2 = DateFilter.Before("datefield", now - 999999);
+ //DateFilter df2 = DateFilter.Before("datefield", now - 999999);
+ RangeFilter df2 = new RangeFilter("datefield", DateTools.timeToString(0, DateTools.Resolution.MILLISECOND),
+ DateTools.timeToString(now - 2000, DateTools.Resolution.MILLISECOND), true, false);
- // search something that doesn't exist with DateFilter
+ // search something that doesn't exist with DateFilter
Query query1 = new TermQuery(new Term("body", "NoMatchForThis"));
// search for something that does exists
@@ -117,7 +116,7 @@
Document doc = new Document();
// add time that is in the future
- doc.add(new Field("datefield", DateField.timeToString(now + 888888), Field.Store.YES, Field.Index.UN_TOKENIZED));
+ doc.add(new Field("datefield", DateTools.timeToString(now + 888888, DateTools.Resolution.MILLISECOND), Field.Store.YES, Field.Index.UN_TOKENIZED));
doc.add(new Field("body", "Today is a very sunny day in New York City", Field.Store.YES, Field.Index.TOKENIZED));
writer.addDocument(doc);
writer.optimize();
@@ -126,12 +125,15 @@
IndexSearcher searcher = new IndexSearcher(indexStore);
// filter that should preserve matches
- DateFilter df1 = DateFilter.After("datefield", now);
-
+ //DateFilter df1 = DateFilter.After("datefield", now);
+ RangeFilter df1 = new RangeFilter("datefield", DateTools.timeToString(now, DateTools.Resolution.MILLISECOND),
+ DateTools.timeToString(now + 999999, DateTools.Resolution.MILLISECOND), true, false);
// filter that should discard matches
- DateFilter df2 = DateFilter.After("datefield", now + 999999);
+ //DateFilter df2 = DateFilter.After("datefield", now + 999999);
+ RangeFilter df2 = new RangeFilter("datefield", DateTools.timeToString(now + 999999, DateTools.Resolution.MILLISECOND),
+ DateTools.timeToString(now + 999999999, DateTools.Resolution.MILLISECOND), false, true);
- // search something that doesn't exist with DateFilter
+ // search something that doesn't exist with DateFilter
Query query1 = new TermQuery(new Term("body", "NoMatchForThis"));
// search for something that does exists
Index: src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java (revision 386892)
+++ src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java (working copy)
@@ -60,7 +60,7 @@
assertEquals(1, hits.length());
// delete a document:
- is.getIndexReader().delete(0);
+ is.getIndexReader().deleteDocument(0);
hits = is.search(new MatchAllDocsQuery());
assertEquals(2, hits.length());
Index: src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java (revision 386892)
+++ src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java (working copy)
@@ -17,26 +17,16 @@
* limitations under the License.
*/
+import junit.framework.TestCase;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
-
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
-
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
-
-import org.apache.lucene.search.Hits;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Similarity;
-import org.apache.lucene.search.DefaultSimilarity;
-
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
-import junit.framework.TestCase;
-
import java.text.DecimalFormat;
/**
@@ -92,38 +82,38 @@
// d1 is an "ok" match for: albino elephant
{
Document d1 = new Document();
- d1.add(Field.Keyword("id", "d1"));
- d1.add(Field.Text("hed", "elephant"));
- d1.add(Field.Text("dek", "elephant"));
+ d1.add(new Field("id", "d1", Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("id", "d1"));
+ d1.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("hed", "elephant"));
+ d1.add(new Field("dek", "elephant", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("dek", "elephant"));
writer.addDocument(d1);
}
// d2 is a "good" match for: albino elephant
{
Document d2 = new Document();
- d2.add(Field.Keyword("id", "d2"));
- d2.add(Field.Text("hed", "elephant"));
- d2.add(Field.Text("dek", "albino"));
- d2.add(Field.Text("dek", "elephant"));
+ d2.add(new Field("id", "d2", Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("id", "d2"));
+ d2.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("hed", "elephant"));
+ d2.add(new Field("dek", "albino", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("dek", "albino"));
+ d2.add(new Field("dek", "elephant", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("dek", "elephant"));
writer.addDocument(d2);
}
// d3 is a "better" match for: albino elephant
{
Document d3 = new Document();
- d3.add(Field.Keyword("id", "d3"));
- d3.add(Field.Text("hed", "albino"));
- d3.add(Field.Text("hed", "elephant"));
+ d3.add(new Field("id", "d3", Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("id", "d3"));
+ d3.add(new Field("hed", "albino", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("hed", "albino"));
+ d3.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("hed", "elephant"));
writer.addDocument(d3);
}
// d4 is the "best" match for: albino elephant
{
Document d4 = new Document();
- d4.add(Field.Keyword("id", "d4"));
- d4.add(Field.Text("hed", "albino"));
- d4.add(Field.Text("hed", "elephant"));
- d4.add(Field.Text("dek", "albino"));
+ d4.add(new Field("id", "d4", Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("id", "d4"));
+ d4.add(new Field("hed", "albino", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("hed", "albino"));
+ d4.add(new Field("hed", "elephant", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("hed", "elephant"));
+ d4.add(new Field("dek", "albino", Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("dek", "albino"));
writer.addDocument(d4);
}
@@ -241,13 +231,13 @@
DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.0f);
q1.add(tq("hed","albino"));
q1.add(tq("dek","albino"));
- q.add(q1,true,false);
+ q.add(q1,BooleanClause.Occur.MUST);//true,false);
}
{
DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.0f);
q2.add(tq("hed","elephant"));
q2.add(tq("dek","elephant"));
- q.add(q2,true,false);
+ q.add(q2, BooleanClause.Occur.MUST);//true,false);
}
@@ -275,13 +265,13 @@
DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.0f);
q1.add(tq("hed","albino"));
q1.add(tq("dek","albino"));
- q.add(q1,false,false);
+ q.add(q1, BooleanClause.Occur.SHOULD);//false,false);
}
{
DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.0f);
q2.add(tq("hed","elephant"));
q2.add(tq("dek","elephant"));
- q.add(q2,false,false);
+ q.add(q2, BooleanClause.Occur.SHOULD);//false,false);
}
@@ -314,13 +304,13 @@
DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.01f);
q1.add(tq("hed","albino"));
q1.add(tq("dek","albino"));
- q.add(q1,false,false);
+ q.add(q1, BooleanClause.Occur.SHOULD);//false,false);
}
{
DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.01f);
q2.add(tq("hed","elephant"));
q2.add(tq("dek","elephant"));
- q.add(q2,false,false);
+ q.add(q2, BooleanClause.Occur.SHOULD);//false,false);
}
@@ -372,13 +362,13 @@
DisjunctionMaxQuery q1 = new DisjunctionMaxQuery(0.01f);
q1.add(tq("hed","albino", 1.5f));
q1.add(tq("dek","albino"));
- q.add(q1,false,false);
+ q.add(q1, BooleanClause.Occur.SHOULD);//false,false);
}
{
DisjunctionMaxQuery q2 = new DisjunctionMaxQuery(0.01f);
q2.add(tq("hed","elephant", 1.5f));
q2.add(tq("dek","elephant"));
- q.add(q2,false,false);
+ q.add(q2, BooleanClause.Occur.SHOULD);//false,false);
}
Index: src/test/org/apache/lucene/search/TestMultiSearcher.java
===================================================================
--- src/test/org/apache/lucene/search/TestMultiSearcher.java (revision 386892)
+++ src/test/org/apache/lucene/search/TestMultiSearcher.java (working copy)
@@ -99,7 +99,8 @@
writerB.close();
// creating the query
- Query query = QueryParser.parse("handle:1", "fulltext", new StandardAnalyzer());
+ QueryParser parser = new QueryParser("fulltext", new StandardAnalyzer());
+ Query query = parser.parse("handle:1");
// building the searchables
Searcher[] searchers = new Searcher[2];
@@ -156,7 +157,7 @@
// deleting the document just added, this will cause a different exception to take place
Term term = new Term("id", "doc1");
IndexReader readerB = IndexReader.open(indexStoreB);
- readerB.delete(term);
+ readerB.deleteDocuments(term);
readerB.close();
// optimizing the index with the writer
Index: src/test/org/apache/lucene/search/TestMultiSearcherRanking.java
===================================================================
--- src/test/org/apache/lucene/search/TestMultiSearcherRanking.java (revision 386892)
+++ src/test/org/apache/lucene/search/TestMultiSearcherRanking.java (working copy)
@@ -16,10 +16,7 @@
* limitations under the License.
*/
-import java.io.IOException;
-
import junit.framework.TestCase;
-
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
@@ -29,6 +26,8 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
+import java.io.IOException;
+
/**
* Tests {@link MultiSearcher} ranking, i.e. makes sure this bug is fixed:
* http://issues.apache.org/bugzilla/show_bug.cgi?id=31841
@@ -88,8 +87,8 @@
private void checkQuery(String queryStr) throws IOException, ParseException {
// check result hit ranking
if(verbose) System.out.println("Query: " + queryStr);
- Query query = QueryParser.parse(queryStr, FIELD_NAME,
- new StandardAnalyzer());
+ QueryParser queryParser = new QueryParser(FIELD_NAME, new StandardAnalyzer());
+ Query query = queryParser.parse(queryStr);
Hits multiSearcherHits = multiSearcher.search(query);
Hits singleSearcherHits = singleSearcher.search(query);
assertEquals(multiSearcherHits.length(), singleSearcherHits.length());
Index: src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java (revision 386892)
+++ src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java (working copy)
@@ -16,18 +16,16 @@
* limitations under the License.
*/
-import org.apache.lucene.search.IndexSearcher;
+import junit.framework.TestCase;
+import org.apache.lucene.analysis.SimpleAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermEnum;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.analysis.SimpleAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import junit.framework.TestCase;
-
import java.io.IOException;
import java.util.LinkedList;
@@ -73,8 +71,10 @@
IndexSearcher searcher = new IndexSearcher(indexStore);
- PhrasePrefixQuery query1 = new PhrasePrefixQuery();
- PhrasePrefixQuery query2 = new PhrasePrefixQuery();
+ //PhrasePrefixQuery query1 = new PhrasePrefixQuery();
+ MultiPhraseQuery query1 = new MultiPhraseQuery();
+ //PhrasePrefixQuery query2 = new PhrasePrefixQuery();
+ MultiPhraseQuery query2 = new MultiPhraseQuery();
query1.add(new Term("body", "blueberry"));
query2.add(new Term("body", "strawberry"));
Index: src/test/org/apache/lucene/search/TestConstantScoreRangeQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestConstantScoreRangeQuery.java (revision 386892)
+++ src/test/org/apache/lucene/search/TestConstantScoreRangeQuery.java (working copy)
@@ -16,20 +16,17 @@
* limitations under the License.
*/
-import java.io.IOException;
-
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.store.Directory;
-
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-
import org.apache.lucene.analysis.WhitespaceAnalyzer;
-
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import java.io.IOException;
+
public class TestConstantScoreRangeQuery extends BaseTestRangeFilter {
/** threshold for comparing floats */
@@ -69,10 +66,10 @@
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
- doc.add(Field.Keyword("id",String.valueOf(i)));
- doc.add(Field.Keyword("all","all"));
+ doc.add(new Field("id", String.valueOf(i), Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("id",String.valueOf(i)));
+ doc.add(new Field("all", "all", Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("all","all"));
if (null != data[i]) {
- doc.add(Field.Text("data",data[i]));
+ doc.add(new Field("data", data[i], Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
@@ -182,8 +179,8 @@
// ConstantScoreRangeQuery and make sure hte order is the same
BooleanQuery q = new BooleanQuery();
- q.add(rq, T, F);
- q.add(csrq("data","1","6", T, T), T, F);
+ q.add(rq, BooleanClause.Occur.MUST);//T, F);
+ q.add(csrq("data","1","6", T, T), BooleanClause.Occur.MUST);//T, F);
Hits actual = search.search(q);
Index: src/test/org/apache/lucene/index/TestIndexWriter.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriter.java (revision 386892)
+++ src/test/org/apache/lucene/index/TestIndexWriter.java (working copy)
@@ -39,7 +39,7 @@
// delete 40 documents
reader = IndexReader.open(dir);
for (i = 0; i < 40; i++) {
- reader.delete(i);
+ reader.deleteDocument(i);
}
reader.close();
Index: src/test/org/apache/lucene/index/TestMultiReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestMultiReader.java (revision 386892)
+++ src/test/org/apache/lucene/index/TestMultiReader.java (working copy)
@@ -75,7 +75,7 @@
MultiReader reader = new MultiReader(dir, sis, false, readers);
assertTrue(reader != null);
assertEquals( 2, reader.numDocs() );
- reader.delete(0);
+ reader.deleteDocument(0);
assertEquals( 1, reader.numDocs() );
reader.undeleteAll();
assertEquals( 2, reader.numDocs() );
Index: src/test/org/apache/lucene/index/TestSegmentReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestSegmentReader.java (revision 386892)
+++ src/test/org/apache/lucene/index/TestSegmentReader.java (working copy)
@@ -77,7 +77,7 @@
SegmentReader deleteReader = SegmentReader.get(new SegmentInfo("seg-to-delete", 1, dir));
assertTrue(deleteReader != null);
assertTrue(deleteReader.numDocs() == 1);
- deleteReader.delete(0);
+ deleteReader.deleteDocument(0);
assertTrue(deleteReader.isDeleted(0) == true);
assertTrue(deleteReader.hasDeletions() == true);
assertTrue(deleteReader.numDocs() == 0);
Index: src/test/org/apache/lucene/index/TestIndexModifier.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexModifier.java (revision 386892)
+++ src/test/org/apache/lucene/index/TestIndexModifier.java (working copy)
@@ -16,14 +16,7 @@
* limitations under the License.
*/
-import java.io.File;
-import java.io.IOException;
-import java.util.EmptyStackException;
-import java.util.Random;
-import java.util.Stack;
-
import junit.framework.TestCase;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
@@ -33,6 +26,12 @@
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
+import java.io.File;
+import java.io.IOException;
+import java.util.EmptyStackException;
+import java.util.Random;
+import java.util.Stack;
+
/**
* Tests for the "IndexModifier" class, including accesses from two threads at the
* same time.
@@ -56,7 +55,7 @@
i.optimize();
assertEquals(2, i.docCount());
i.flush();
- i.delete(0);
+ i.deleteDocument(0);
assertEquals(1, i.docCount());
i.flush();
assertEquals(1, i.docCount());
@@ -64,7 +63,7 @@
i.addDocument(getDoc());
i.flush();
assertEquals(3, i.docCount());
- i.delete(allDocTerm);
+ i.deleteDocuments(allDocTerm);
assertEquals(0, i.docCount());
i.optimize();
assertEquals(0, i.docCount());
@@ -88,7 +87,7 @@
assertFalse(i.getUseCompoundFile());
// test setting properties when internally the reader is opened:
- i.delete(allDocTerm);
+ i.deleteDocuments(allDocTerm);
i.setMaxBufferedDocs(100);
i.setMergeFactor(25);
i.setMaxFieldLength(250000);
@@ -241,7 +240,7 @@
continue;
}
Term delTerm = new Term("id", new Integer(delId).toString());
- int delCount = index.delete(delTerm);
+ int delCount = index.deleteDocuments(delTerm);
if (delCount != 1) {
throw new RuntimeException("Internal error: " + threadNumber + " deleted " + delCount +
" documents, term=" + delTerm);
Index: src/test/org/apache/lucene/index/TestIndexReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexReader.java (revision 386892)
+++ src/test/org/apache/lucene/index/TestIndexReader.java (working copy)
@@ -217,7 +217,7 @@
// DELETE DOCUMENTS CONTAINING TERM: aaa
int deleted = 0;
reader = IndexReader.open(dir);
- deleted = reader.delete(searchTerm);
+ deleted = reader.deleteDocuments(searchTerm);
assertEquals("deleted count", 100, deleted);
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
@@ -290,7 +290,7 @@
// NOTE: the reader was created when only "aaa" documents were in
int deleted = 0;
try {
- deleted = reader.delete(searchTerm);
+ deleted = reader.deleteDocuments(searchTerm);
fail("Delete allowed on an index reader with stale segment information");
} catch (IOException e) {
/* success */
@@ -305,7 +305,7 @@
assertTermDocsCount("first reader", reader, searchTerm, 100);
assertTermDocsCount("first reader", reader, searchTerm2, 100);
- deleted = reader.delete(searchTerm);
+ deleted = reader.deleteDocuments(searchTerm);
assertEquals("deleted count", 100, deleted);
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2));
@@ -384,13 +384,13 @@
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
IndexReader reader = IndexReader.open(dir);
try {
- reader.delete(0);
+ reader.deleteDocument(0);
fail("expected lock");
} catch(IOException e) {
// expected exception
}
IndexReader.unlock(dir); // this should not be done in the real world!
- reader.delete(0);
+ reader.deleteDocument(0);
reader.close();
writer.close();
}
@@ -402,8 +402,8 @@
addDocumentWithFields(writer);
writer.close();
IndexReader reader = IndexReader.open(dir);
- reader.delete(0);
- reader.delete(1);
+ reader.deleteDocument(0);
+ reader.deleteDocument(1);
reader.undeleteAll();
reader.close();
reader = IndexReader.open(dir);
@@ -463,7 +463,7 @@
// delete documents containing term: aaa
// when the reader is closed, the segment info is updated and
// the first reader is now stale
- reader2.delete(searchTerm1);
+ reader2.deleteDocuments(searchTerm1);
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm1));
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm2));
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm3));
@@ -484,7 +484,7 @@
// ATTEMPT TO DELETE FROM STALE READER
// delete documents containing term: bbb
try {
- reader1.delete(searchTerm2);
+ reader1.deleteDocuments(searchTerm2);
fail("Delete allowed from a stale index reader");
} catch (IOException e) {
/* success */
@@ -500,7 +500,7 @@
assertTermDocsCount("reopened", reader1, searchTerm2, 100);
assertTermDocsCount("reopened", reader1, searchTerm3, 100);
- reader1.delete(searchTerm2);
+ reader1.deleteDocuments(searchTerm2);
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm1));
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm2));
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm3));
Index: src/test/org/apache/lucene/document/TestBinaryDocument.java
===================================================================
--- src/test/org/apache/lucene/document/TestBinaryDocument.java (revision 386892)
+++ src/test/org/apache/lucene/document/TestBinaryDocument.java (working copy)
@@ -91,7 +91,7 @@
assertTrue(stringFldCompressedTest.equals(binaryValCompressed));
/** delete the document from index */
- reader.delete(0);
+ reader.deleteDocument(0);
assertEquals(0, reader.numDocs());
reader.close();
Index: src/java/org/apache/lucene/queryParser/MultiFieldQueryParser.java
===================================================================
--- src/java/org/apache/lucene/queryParser/MultiFieldQueryParser.java (revision 386892)
+++ src/java/org/apache/lucene/queryParser/MultiFieldQueryParser.java (working copy)
@@ -16,8 +16,6 @@
* limitations under the License.
*/
-import java.util.Vector;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -25,6 +23,8 @@
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
+import java.util.Vector;
+
/**
* A QueryParser which constructs queries to search multiple fields.
*
@@ -87,22 +87,8 @@
protected Query getFieldQuery(String field, String queryText) throws ParseException {
return getFieldQuery(field, queryText, 0);
}
-
- /**
- * @deprecated use {@link #getFieldQuery(String, String)}
- */
- protected Query getFieldQuery(String field, Analyzer analyzer, String queryText)
- throws ParseException {
- return getFieldQuery(field, queryText);
- }
-
- /**
- * @deprecated use {@link #getFuzzyQuery(String, String, float)}
- */
- protected Query getFuzzyQuery(String field, String termStr) throws ParseException {
- return getFuzzyQuery(field, termStr, fuzzyMinSim);
- }
+
protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException
{
if (field == null) {
@@ -140,14 +126,7 @@
}
return super.getWildcardQuery(field, termStr);
}
-
- /** @throws ParseException
- * @deprecated use {@link #getRangeQuery(String, String, String, boolean)}
- */
- protected Query getRangeQuery(String field, Analyzer analyzer,
- String part1, String part2, boolean inclusive) throws ParseException {
- return getRangeQuery(field, part1, part2, inclusive);
- }
+
protected Query getRangeQuery(String field, String part1, String part2, boolean inclusive) throws ParseException {
if (field == null) {
@@ -162,70 +141,10 @@
}
- /** @deprecated */
- public static final int NORMAL_FIELD = 0;
- /** @deprecated */
- public static final int REQUIRED_FIELD = 1;
- /** @deprecated */
- public static final int PROHIBITED_FIELD = 2;
- /**
- * @deprecated use {@link #MultiFieldQueryParser(String[], Analyzer)} instead
- */
- public MultiFieldQueryParser(QueryParserTokenManager tm)
- {
- super(tm);
- }
/**
- * @deprecated use {@link #MultiFieldQueryParser(String[], Analyzer)} instead
- */
- public MultiFieldQueryParser(CharStream stream)
- {
- super(stream);
- }
-
- /**
- * @deprecated use {@link #MultiFieldQueryParser(String[], Analyzer)} instead
- */
- public MultiFieldQueryParser(String f, Analyzer a)
- {
- super(f, a);
- }
-
- /**
* Parses a query which searches on the fields specified.
- * If x fields are specified, this effectively constructs:
- *
- *
- * (field1:query) (field2:query) (field3:query)...(fieldx:query)
- *
- *
- * @param query Query string to parse
- * @param fields Fields to search on
- * @param analyzer Analyzer to use
- * @throws ParseException if query parsing fails
- * @throws TokenMgrError if query parsing fails
- * @deprecated use {@link #parse(String)} instead but note that it
- * returns a different query for queries where all terms are required:
- * its query excepts all terms, no matter in what field they occur whereas
- * the query built by this (deprecated) method expected all terms in all fields
- * at the same time.
- */
- public static Query parse(String query, String[] fields, Analyzer analyzer)
- throws ParseException
- {
- BooleanQuery bQuery = new BooleanQuery();
- for (int i = 0; i < fields.length; i++)
- {
- Query q = parse(query, fields[i], analyzer);
- bQuery.add(q, BooleanClause.Occur.SHOULD);
- }
- return bQuery;
- }
-
- /**
- * Parses a query which searches on the fields specified.
*
* If x fields are specified, this effectively constructs: *
@@ -256,64 +175,6 @@
return bQuery;
}
- /**
- * Parses a query, searching on the fields specified.
- * Use this if you need to specify certain fields as required,
- * and others as prohibited.
- *
- * Usage:
- *
- * String[] fields = {"filename", "contents", "description"};
- * int[] flags = {MultiFieldQueryParser.NORMAL_FIELD,
- * MultiFieldQueryParser.REQUIRED_FIELD,
- * MultiFieldQueryParser.PROHIBITED_FIELD,};
- * parse(query, fields, flags, analyzer);
- *
- *
- *
- * The code above would construct a query:
- *
- *
- * (filename:query) +(contents:query) -(description:query)
- *
- *
- *
- * @param query Query string to parse
- * @param fields Fields to search on
- * @param flags Flags describing the fields
- * @param analyzer Analyzer to use
- * @throws ParseException if query parsing fails
- * @throws TokenMgrError if query parsing fails
- * @throws IllegalArgumentException if the length of the fields array differs
- * from the length of the flags array
- * @deprecated use {@link #parse(String, String[], BooleanClause.Occur[], Analyzer)} instead
- */
- public static Query parse(String query, String[] fields, int[] flags,
- Analyzer analyzer) throws ParseException
- {
- if (fields.length != flags.length)
- throw new IllegalArgumentException("fields.length != flags.length");
- BooleanQuery bQuery = new BooleanQuery();
- for (int i = 0; i < fields.length; i++)
- {
- QueryParser qp = new QueryParser(fields[i], analyzer);
- Query q = qp.parse(query);
- int flag = flags[i];
- switch (flag)
- {
- case REQUIRED_FIELD:
- bQuery.add(q, BooleanClause.Occur.MUST);
- break;
- case PROHIBITED_FIELD:
- bQuery.add(q, BooleanClause.Occur.MUST_NOT);
- break;
- default:
- bQuery.add(q, BooleanClause.Occur.SHOULD);
- break;
- }
- }
- return bQuery;
- }
/**
* Parses a query, searching on the fields specified.
@@ -359,66 +220,8 @@
return bQuery;
}
+
/**
- * Parses a query, searching on the fields specified. Use this if you need to
- * specify certain fields as required, and others as prohibited.
- *
- *
- * Usage:
- *
- * String[] fields = { "filename", "contents", "description" };
- * int[] flags = { MultiFieldQueryParser.NORMAL_FIELD,
- * MultiFieldQueryParser.REQUIRED_FIELD,
- * MultiFieldQueryParser.PROHIBITED_FIELD, };
- * parse(query, fields, flags, analyzer);
- *
- *
- *
- *
- * The code above would construct a query:
- *
- *
- * (filename:query1) +(contents:query2) -(description:query3)
- *
- *
- *
- * @param queries Queries string to parse
- * @param fields Fields to search on
- * @param flags Flags describing the fields
- * @param analyzer Analyzer to use
- * @throws ParseException if query parsing fails
- * @throws TokenMgrError if query parsing fails
- * @throws IllegalArgumentException if the length of the queries, fields, and flags array differ
- * @deprecated use {@link #parse(String[], String[], BooleanClause.Occur[], Analyzer)} instead
- */
- public static Query parse(String[] queries, String[] fields, int[] flags,
- Analyzer analyzer) throws ParseException
- {
- if (!(queries.length == fields.length && queries.length == flags.length))
- throw new IllegalArgumentException("queries, fields, and flags array have have different length");
- BooleanQuery bQuery = new BooleanQuery();
- for (int i = 0; i < fields.length; i++)
- {
- QueryParser qp = new QueryParser(fields[i], analyzer);
- Query q = qp.parse(queries[i]);
- int flag = flags[i];
- switch (flag)
- {
- case REQUIRED_FIELD:
- bQuery.add(q, BooleanClause.Occur.MUST);
- break;
- case PROHIBITED_FIELD:
- bQuery.add(q, BooleanClause.Occur.MUST_NOT);
- break;
- default:
- bQuery.add(q, BooleanClause.Occur.SHOULD);
- break;
- }
- }
- return bQuery;
- }
-
- /**
* Parses a query, searching on the fields specified.
* Use this if you need to specify certain fields as required,
* and others as prohibited.
Index: src/java/org/apache/lucene/queryParser/QueryParser.java
===================================================================
--- src/java/org/apache/lucene/queryParser/QueryParser.java (revision 386892)
+++ src/java/org/apache/lucene/queryParser/QueryParser.java (working copy)
@@ -1,16 +1,33 @@
/* Generated By:JavaCC: Do not edit this line. QueryParser.java */
package org.apache.lucene.queryParser;
-import java.util.Vector;
-import java.io.*;
-import java.text.*;
-import java.util.*;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.DateField;
+import org.apache.lucene.document.DateTools;
import org.apache.lucene.index.Term;
-import org.apache.lucene.analysis.*;
-import org.apache.lucene.document.*;
-import org.apache.lucene.search.*;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.FuzzyQuery;
+import org.apache.lucene.search.MultiPhraseQuery;
+import org.apache.lucene.search.PhraseQuery;
+import org.apache.lucene.search.PrefixQuery;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.RangeQuery;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.util.Parameter;
+import java.io.IOException;
+import java.io.StringReader;
+import java.text.DateFormat;
+import java.util.ArrayList;
+import java.util.Calendar;
+import java.util.Date;
+import java.util.List;
+import java.util.Locale;
+import java.util.Vector;
+
/**
* This class is generated by JavaCC. The most important method is
* {@link #parse(String)}.
@@ -71,11 +88,6 @@
private static final int MOD_NOT = 10;
private static final int MOD_REQ = 11;
- /** @deprecated use {@link #OR_OPERATOR} instead */
- public static final int DEFAULT_OPERATOR_OR = 0;
- /** @deprecated use {@link #AND_OPERATOR} instead */
- public static final int DEFAULT_OPERATOR_AND = 1;
-
// make it possible to call setDefaultOperator() without accessing
// the nested class:
/** Alternative form of QueryParser.Operator.AND */
@@ -106,20 +118,6 @@
static public final Operator AND = new Operator("AND");
}
- /** Parses a query string, returning a {@link org.apache.lucene.search.Query}.
- * @param query the query string to be parsed.
- * @param field the default field for query terms.
- * @param analyzer used to find terms in the query text.
- * @throws ParseException if the parsing fails
- *
- * @deprecated Use an instance of QueryParser and the {@link #parse(String)} method instead.
- */
- static public Query parse(String query, String field, Analyzer analyzer)
- throws ParseException {
- QueryParser parser = new QueryParser(field, analyzer);
- return parser.parse(query);
- }
-
/** Constructs a query parser.
* @param f the default field for query terms.
* @param a used to find terms in the query text.
@@ -209,24 +207,6 @@
/**
* Sets the boolean operator of the QueryParser.
- * In default mode (DEFAULT_OPERATOR_OR) terms without any modifiers
- * are considered optional: for example capital of Hungary is equal to
- * capital OR of OR Hungary.
- * In DEFAULT_OPERATOR_AND terms are considered to be in conjuction: the
- * above mentioned query is parsed as capital AND of AND Hungary
- * @deprecated use {@link #setDefaultOperator(QueryParser.Operator)} instead
- */
- public void setOperator(int op) {
- if (op == DEFAULT_OPERATOR_AND)
- this.operator = AND_OPERATOR;
- else if (op == DEFAULT_OPERATOR_OR)
- this.operator = OR_OPERATOR;
- else
- throw new IllegalArgumentException("Unknown operator " + op);
- }
-
- /**
- * Sets the boolean operator of the QueryParser.
* In default mode (OR_OPERATOR) terms without any modifiers
* are considered optional: for example capital of Hungary is equal to
* capital OR of OR Hungary.
@@ -237,19 +217,6 @@
this.operator = op;
}
- /**
- * Gets implicit operator setting, which will be either DEFAULT_OPERATOR_AND
- * or DEFAULT_OPERATOR_OR.
- * @deprecated use {@link #getDefaultOperator()} instead
- */
- public int getOperator() {
- if(operator == AND_OPERATOR)
- return DEFAULT_OPERATOR_AND;
- else if(operator == OR_OPERATOR)
- return DEFAULT_OPERATOR_OR;
- else
- throw new IllegalStateException("Unknown operator " + operator);
- }
/**
* Gets implicit operator setting, which will be either AND_OPERATOR
@@ -259,14 +226,6 @@
return operator;
}
- /**
- * Whether terms of wildcard, prefix, fuzzy and range queries are to be automatically
- * lower-cased or not. Default is true.
- * @deprecated use {@link #setLowercaseExpandedTerms(boolean)} instead
- */
- public void setLowercaseWildcardTerms(boolean lowercaseExpandedTerms) {
- this.lowercaseExpandedTerms = lowercaseExpandedTerms;
- }
/**
* Whether terms of wildcard, prefix, fuzzy and range queries are to be automatically
@@ -277,13 +236,6 @@
}
/**
- * @deprecated use {@link #getLowercaseExpandedTerms()} instead
- */
- public boolean getLowercaseWildcardTerms() {
- return lowercaseExpandedTerms;
- }
-
- /**
* @see #setLowercaseExpandedTerms(boolean)
*/
public boolean getLowercaseExpandedTerms() {
@@ -354,18 +306,6 @@
throw new RuntimeException("Clause cannot be both required and prohibited");
}
- /**
- * Note that parameter analyzer is ignored. Calls inside the parser always
- * use class member analyzer.
- *
- * @exception ParseException throw in overridden method to disallow
- * @deprecated use {@link #getFieldQuery(String, String)}
- */
- protected Query getFieldQuery(String field,
- Analyzer analyzer,
- String queryText) throws ParseException {
- return getFieldQuery(field, queryText);
- }
/**
* @exception ParseException throw in overridden method to disallow
@@ -451,20 +391,6 @@
}
/**
- * Note that parameter analyzer is ignored. Calls inside the parser always
- * use class member analyzer.
- *
- * @exception ParseException throw in overridden method to disallow
- * @deprecated use {@link #getFieldQuery(String, String, int)}
- */
- protected Query getFieldQuery(String field,
- Analyzer analyzer,
- String queryText,
- int slop) throws ParseException {
- return getFieldQuery(field, queryText, slop);
- }
-
- /**
* Base implementation delegates to {@link #getFieldQuery(String,String)}.
* This method may be overridden, for example, to return
* a SpanNearQuery instead of a PhraseQuery.
@@ -485,20 +411,6 @@
return query;
}
- /**
- * Note that parameter analyzer is ignored. Calls inside the parser always
- * use class member analyzer.
- *
- * @exception ParseException throw in overridden method to disallow
- * @deprecated use {@link #getRangeQuery(String, String, String, boolean)}
- */
- protected Query getRangeQuery(String field,
- Analyzer analyzer,
- String part1,
- String part2,
- boolean inclusive) throws ParseException {
- return getRangeQuery(field, part1, part2, inclusive);
- }
/**
* @exception ParseException throw in overridden method to disallow
@@ -642,12 +554,6 @@
return new PrefixQuery(t);
}
- /**
- * @deprecated use {@link #getFuzzyQuery(String, String, float)}
- */
- protected Query getFuzzyQuery(String field, String termStr) throws ParseException {
- return getFuzzyQuery(field, termStr, fuzzyMinSim);
- }
/**
* Factory method for generating a query (similar to
@@ -952,11 +858,11 @@
{if (true) throw new ParseException("Minimum similarity for a FuzzyQuery has to be between 0.0f and 1.0f !");}
}
if(fms == fuzzyMinSim)
- q = getFuzzyQuery(field, termImage);
+ q = getFuzzyQuery(field, termImage, fuzzyMinSim);
else
q = getFuzzyQuery(field, termImage, fms);
} else {
- q = getFieldQuery(field, analyzer, termImage);
+ q = getFieldQuery(field, termImage);
}
break;
case RANGEIN_START:
@@ -1013,7 +919,7 @@
} else {
goop2.image = discardEscapeChar(goop2.image);
}
- q = getRangeQuery(field, analyzer, goop1.image, goop2.image, true);
+ q = getRangeQuery(field, goop1.image, goop2.image, true);
break;
case RANGEEX_START:
jj_consume_token(RANGEEX_START);
@@ -1070,7 +976,7 @@
goop2.image = discardEscapeChar(goop2.image);
}
- q = getRangeQuery(field, analyzer, goop1.image, goop2.image, false);
+ q = getRangeQuery(field, goop1.image, goop2.image, false);
break;
case QUOTED:
term = jj_consume_token(QUOTED);
@@ -1099,7 +1005,7 @@
}
catch (Exception ignored) { }
}
- q = getFieldQuery(field, analyzer, term.image.substring(1, term.image.length()-1), s);
+ q = getFieldQuery(field, term.image.substring(1, term.image.length()-1), s);
break;
default:
jj_la1[21] = jj_gen;
Index: src/java/org/apache/lucene/queryParser/CharStream.java
===================================================================
--- src/java/org/apache/lucene/queryParser/CharStream.java (revision 386892)
+++ src/java/org/apache/lucene/queryParser/CharStream.java (working copy)
@@ -26,20 +26,6 @@
char readChar() throws java.io.IOException;
/**
- * Returns the column position of the character last read.
- * @deprecated
- * @see #getEndColumn
- */
- int getColumn();
-
- /**
- * Returns the line number of the character last read.
- * @deprecated
- * @see #getEndLine
- */
- int getLine();
-
- /**
* Returns the column number of the last character for current token (being
* matched after the last call to BeginTOken).
*/
Index: src/java/org/apache/lucene/analysis/de/WordlistLoader.java
===================================================================
--- src/java/org/apache/lucene/analysis/de/WordlistLoader.java (revision 386892)
+++ src/java/org/apache/lucene/analysis/de/WordlistLoader.java (working copy)
@@ -1,111 +0,0 @@
-package org.apache.lucene.analysis.de;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.File;
-import java.io.FileReader;
-import java.io.IOException;
-import java.io.LineNumberReader;
-import java.util.HashSet;
-import java.util.Hashtable;
-import java.util.Iterator;
-
-/**
- * Loader for text files that represent a list of stopwords.
- *
- * @deprecated Use {@link org.apache.lucene.analysis.WordlistLoader} instead
- *
- * @author Gerhard Schwarz
- * @version $Id$
- */
-public class WordlistLoader {
-
- /**
- * Loads a text file and adds every line as an entry to a HashSet (omitting
- * leading and trailing whitespace). Every line of the file should contain only
- * one word. The words need to be in lowercase if you make use of an
- * Analyzer which uses LowerCaseFilter (like GermanAnalyzer).
- *
- * @param wordfile File containing the wordlist
- * @return A HashSet with the file's words
- */
- public static HashSet getWordSet(File wordfile) throws IOException {
- HashSet result = new HashSet();
- FileReader freader = null;
- LineNumberReader lnr = null;
- try {
- freader = new FileReader(wordfile);
- lnr = new LineNumberReader(freader);
- String word = null;
- while ((word = lnr.readLine()) != null) {
- result.add(word.trim());
- }
- }
- finally {
- if (lnr != null)
- lnr.close();
- if (freader != null)
- freader.close();
- }
- return result;
- }
-
- /**
- * @param path Path to the wordlist
- * @param wordfile Name of the wordlist
- *
- * @deprecated Use {@link #getWordSet(File)} getWordSet(File)} instead
- */
- public static Hashtable getWordtable(String path, String wordfile) throws IOException {
- return getWordtable(new File(path, wordfile));
- }
-
- /**
- * @param wordfile Complete path to the wordlist
- *
- * @deprecated Use {@link #getWordSet(File)} getWordSet(File)} instead
- */
- public static Hashtable getWordtable(String wordfile) throws IOException {
- return getWordtable(new File(wordfile));
- }
-
- /**
- * @param wordfile File object that points to the wordlist
- *
- * @deprecated Use {@link #getWordSet(File)} getWordSet(File)} instead
- */
- public static Hashtable getWordtable(File wordfile) throws IOException {
- HashSet wordSet = (HashSet)getWordSet(wordfile);
- Hashtable result = makeWordTable(wordSet);
- return result;
- }
-
- /**
- * Builds a wordlist table, using words as both keys and values
- * for backward compatibility.
- *
- * @param wordSet stopword set
- */
- private static Hashtable makeWordTable(HashSet wordSet) {
- Hashtable table = new Hashtable();
- for (Iterator iter = wordSet.iterator(); iter.hasNext();) {
- String word = (String)iter.next();
- table.put(word, word);
- }
- return table;
- }
-}
Index: src/java/org/apache/lucene/analysis/standard/CharStream.java
===================================================================
--- src/java/org/apache/lucene/analysis/standard/CharStream.java (revision 386892)
+++ src/java/org/apache/lucene/analysis/standard/CharStream.java (working copy)
@@ -26,20 +26,6 @@
char readChar() throws java.io.IOException;
/**
- * Returns the column position of the character last read.
- * @deprecated
- * @see #getEndColumn
- */
- int getColumn();
-
- /**
- * Returns the line number of the character last read.
- * @deprecated
- * @see #getEndLine
- */
- int getLine();
-
- /**
* Returns the column number of the last character for current token (being
* matched after the last call to BeginTOken).
*/
Index: src/java/org/apache/lucene/analysis/Analyzer.java
===================================================================
--- src/java/org/apache/lucene/analysis/Analyzer.java (revision 386892)
+++ src/java/org/apache/lucene/analysis/Analyzer.java (working copy)
@@ -34,22 +34,9 @@
compatibility with older version. Override to allow Analyzer to choose
strategy based on document and/or field. Must be able to handle null
field name for backward compatibility. */
- public TokenStream tokenStream(String fieldName, Reader reader)
- {
- // implemented for backward compatibility
- return tokenStream(reader);
- }
-
- /** Creates a TokenStream which tokenizes all the text in the provided
- * Reader. Provided for backward compatibility only.
- * @deprecated use tokenStream(String, Reader) instead.
- * @see #tokenStream(String, Reader)
- */
- public TokenStream tokenStream(Reader reader)
- {
- return tokenStream(null, reader);
- }
+ public abstract TokenStream tokenStream(String fieldName, Reader reader);
+
/**
* Invoked before indexing a Field instance if
* terms have already been added to that field. This allows custom
Index: src/java/org/apache/lucene/analysis/TokenFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/TokenFilter.java (revision 386892)
+++ src/java/org/apache/lucene/analysis/TokenFilter.java (working copy)
@@ -27,10 +27,6 @@
/** The source of tokens for this filter. */
protected TokenStream input;
- /** Call TokenFilter(TokenStream) instead.
- * @deprecated */
- protected TokenFilter() {}
-
/** Construct a token stream filtering the given input. */
protected TokenFilter(TokenStream input) {
this.input = input;
Index: src/java/org/apache/lucene/analysis/StopFilter.java
===================================================================
--- src/java/org/apache/lucene/analysis/StopFilter.java (revision 386892)
+++ src/java/org/apache/lucene/analysis/StopFilter.java (working copy)
@@ -18,7 +18,6 @@
import java.io.IOException;
import java.util.HashSet;
-import java.util.Hashtable;
import java.util.Set;
/**
@@ -48,25 +47,6 @@
this.stopWords = makeStopSet(stopWords, ignoreCase);
}
- /**
- * Constructs a filter which removes words from the input
- * TokenStream that are named in the Hashtable.
- *
- * @deprecated Use {@link #StopFilter(TokenStream, Set)} instead
- */
- public StopFilter(TokenStream in, Hashtable stopTable) {
- this(in, stopTable, false);
- }
- /**
- * Constructs a filter which removes words from the input
- * TokenStream that are named in the Hashtable.
- * If ignoreCase is true, all keys in the stopTable should already
- * be lowercased.
- * @deprecated Use {@link #StopFilter(TokenStream, Set)} instead
- */
- public StopFilter(TokenStream in, Hashtable stopTable, boolean ignoreCase) {
- this(in, stopTable.keySet(), ignoreCase);
- }
/**
* Construct a token stream filtering the given input.
@@ -92,34 +72,6 @@
public StopFilter(TokenStream in, Set stopWords) {
this(in, stopWords, false);
}
- /**
- * Builds a Hashtable from an array of stop words,
- * appropriate for passing into the StopFilter constructor.
- * This permits this table construction to be cached once when
- * an Analyzer is constructed.
- *
- * @deprecated Use {@link #makeStopSet(String[])} instead.
- */
- public static final Hashtable makeStopTable(String[] stopWords) {
- return makeStopTable(stopWords, false);
- }
-
- /**
- * Builds a Hashtable from an array of stop words,
- * appropriate for passing into the StopFilter constructor.
- * This permits this table construction to be cached once when
- * an Analyzer is constructed.
- * @deprecated Use {@link #makeStopSet(java.lang.String[], boolean)} instead.
- */
- public static final Hashtable makeStopTable(String [] stopWords, boolean ignoreCase) {
- Hashtable stopTable = new Hashtable(stopWords.length);
- for (int i = 0; i < stopWords.length; i++)
- {
- String stopWord = ignoreCase ? stopWords[i].toLowerCase() : stopWords[i];
- stopTable.put(stopWord, stopWord);
- }
- return stopTable;
- }
/**
* Builds a Set from an array of stop words,
Index: src/java/org/apache/lucene/analysis/WordlistLoader.java
===================================================================
--- src/java/org/apache/lucene/analysis/WordlistLoader.java (revision 386892)
+++ src/java/org/apache/lucene/analysis/WordlistLoader.java (working copy)
@@ -86,37 +86,8 @@
return result;
}
- /**
- * @param path Path to the wordlist
- * @param wordfile Name of the wordlist
- *
- * @deprecated Use {@link #getWordSet(File)} instead
- */
- public static Hashtable getWordtable(String path, String wordfile) throws IOException {
- return getWordtable(new File(path, wordfile));
- }
/**
- * @param wordfile Complete path to the wordlist
- *
- * @deprecated Use {@link #getWordSet(File)} instead
- */
- public static Hashtable getWordtable(String wordfile) throws IOException {
- return getWordtable(new File(wordfile));
- }
-
- /**
- * @param wordfile File object that points to the wordlist
- *
- * @deprecated Use {@link #getWordSet(File)} instead
- */
- public static Hashtable getWordtable(File wordfile) throws IOException {
- HashSet wordSet = (HashSet)getWordSet(wordfile);
- Hashtable result = makeWordTable(wordSet);
- return result;
- }
-
- /**
* Builds a wordlist table, using words as both keys and values
* for backward compatibility.
*
Index: src/java/org/apache/lucene/search/RemoteSearchable.java
===================================================================
--- src/java/org/apache/lucene/search/RemoteSearchable.java (revision 386892)
+++ src/java/org/apache/lucene/search/RemoteSearchable.java (working copy)
@@ -16,16 +16,15 @@
* limitations under the License.
*/
-import java.io.IOException;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.index.Term;
+import java.io.IOException;
import java.rmi.Naming;
-import java.rmi.RemoteException;
import java.rmi.RMISecurityManager;
+import java.rmi.RemoteException;
import java.rmi.server.UnicastRemoteObject;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.index.Term;
-
/**
* A remote searchable implementation.
*
@@ -42,14 +41,8 @@
super();
this.local = local;
}
-
- // this implementation should be removed when the deprecated
- // Searchable#search(Query,Filter,HitCollector) is removed
- public void search(Query query, Filter filter, HitCollector results)
- throws IOException {
- local.search(query, filter, results);
- }
+
public void search(Weight weight, Filter filter, HitCollector results)
throws IOException {
local.search(weight, filter, results);
@@ -72,22 +65,10 @@
return local.maxDoc();
}
- // this implementation should be removed when the deprecated
- // Searchable#search(Query,Filter,int) is removed
- public TopDocs search(Query query, Filter filter, int n) throws IOException {
- return local.search(query, filter, n);
- }
-
public TopDocs search(Weight weight, Filter filter, int n) throws IOException {
return local.search(weight, filter, n);
}
- // this implementation should be removed when the deprecated
- // Searchable#search(Query,Filter,int,Sort) is removed
- public TopFieldDocs search (Query query, Filter filter, int n, Sort sort)
- throws IOException {
- return local.search (query, filter, n, sort);
- }
public TopFieldDocs search (Weight weight, Filter filter, int n, Sort sort)
throws IOException {
@@ -102,12 +83,6 @@
return local.rewrite(original);
}
- // this implementation should be removed when the deprecated
- // Searchable#explain(Query,int) is removed
- public Explanation explain(Query query, int doc) throws IOException {
- return local.explain(query, doc);
- }
-
public Explanation explain(Weight weight, int doc) throws IOException {
return local.explain(weight, doc);
}
Index: src/java/org/apache/lucene/search/DateFilter.java
===================================================================
--- src/java/org/apache/lucene/search/DateFilter.java (revision 386892)
+++ src/java/org/apache/lucene/search/DateFilter.java (working copy)
@@ -1,148 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.util.BitSet;
-import java.util.Date;
-import java.io.IOException;
-
-import org.apache.lucene.document.DateField;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermDocs;
-import org.apache.lucene.index.TermEnum;
-import org.apache.lucene.index.IndexReader;
-
-/**
- * A Filter that restricts search results to a range of time.
- *
- * For this to work, documents must have been indexed with a
- * {@link DateField}.
- *
- * @deprecated Instead, use {@link RangeFilter} combined with
- * {@link org.apache.lucene.document.DateTools}.
- */
-public class DateFilter extends Filter {
- String field;
-
- String start = DateField.MIN_DATE_STRING();
- String end = DateField.MAX_DATE_STRING();
-
- private DateFilter(String f) {
- field = f;
- }
-
- /**
- * Constructs a filter for field f matching dates
- * between from and to inclusively.
- */
- public DateFilter(String f, Date from, Date to) {
- field = f;
- start = DateField.dateToString(from);
- end = DateField.dateToString(to);
- }
-
- /**
- * Constructs a filter for field f matching times
- * between from and to inclusively.
- */
- public DateFilter(String f, long from, long to) {
- field = f;
- start = DateField.timeToString(from);
- end = DateField.timeToString(to);
- }
-
- /**
- * Constructs a filter for field f matching
- * dates on or before before date.
- */
- public static DateFilter Before(String field, Date date) {
- DateFilter result = new DateFilter(field);
- result.end = DateField.dateToString(date);
- return result;
- }
-
- /**
- * Constructs a filter for field f matching times
- * on or before time.
- */
- public static DateFilter Before(String field, long time) {
- DateFilter result = new DateFilter(field);
- result.end = DateField.timeToString(time);
- return result;
- }
-
- /**
- * Constructs a filter for field f matching
- * dates on or after date.
- */
- public static DateFilter After(String field, Date date) {
- DateFilter result = new DateFilter(field);
- result.start = DateField.dateToString(date);
- return result;
- }
-
- /**
- * Constructs a filter for field f matching
- * times on or after time.
- */
- public static DateFilter After(String field, long time) {
- DateFilter result = new DateFilter(field);
- result.start = DateField.timeToString(time);
- return result;
- }
-
- /**
- * Returns a BitSet with true for documents which should be
- * permitted in search results, and false for those that should
- * not.
- */
- public BitSet bits(IndexReader reader) throws IOException {
- BitSet bits = new BitSet(reader.maxDoc());
- TermEnum enumerator = reader.terms(new Term(field, start));
- TermDocs termDocs = reader.termDocs();
- if (enumerator.term() == null) {
- return bits;
- }
-
- try {
- Term stop = new Term(field, end);
- while (enumerator.term().compareTo(stop) <= 0) {
- termDocs.seek(enumerator.term());
- while (termDocs.next()) {
- bits.set(termDocs.doc());
- }
- if (!enumerator.next()) {
- break;
- }
- }
- } finally {
- enumerator.close();
- termDocs.close();
- }
- return bits;
- }
-
- public String toString() {
- StringBuffer buffer = new StringBuffer();
- buffer.append(field);
- buffer.append(":");
- buffer.append(DateField.stringToDate(start).toString());
- buffer.append("-");
- buffer.append(DateField.stringToDate(end).toString());
- return buffer.toString();
- }
-}
Index: src/java/org/apache/lucene/search/RangeFilter.java
===================================================================
--- src/java/org/apache/lucene/search/RangeFilter.java (revision 386892)
+++ src/java/org/apache/lucene/search/RangeFilter.java (working copy)
@@ -16,22 +16,21 @@
* limitations under the License.
*/
-import java.util.BitSet;
-import java.io.IOException;
-
-import org.apache.lucene.search.Filter;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.TermEnum;
-import org.apache.lucene.index.IndexReader;
+import java.io.IOException;
+import java.util.BitSet;
+
/**
* A Filter that restricts search results to a range of values in a given
* field.
*
*
* This code borrows heavily from {@link RangeQuery}, but is implemented as a Filter
- * (much like {@link DateFilter}).
+ *
*
*/
public class RangeFilter extends Filter {
Index: src/java/org/apache/lucene/search/Searchable.java
===================================================================
--- src/java/org/apache/lucene/search/Searchable.java (revision 386892)
+++ src/java/org/apache/lucene/search/Searchable.java (working copy)
@@ -16,12 +16,12 @@
* limitations under the License.
*/
-import java.io.IOException;
-
import org.apache.lucene.document.Document;
+import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexReader; // for javadoc
+import java.io.IOException; // for javadoc
+
/** The interface for search implementations.
*
* Searchable is the abstract network protocol for searching.
@@ -52,11 +52,6 @@
void search(Weight weight, Filter filter, HitCollector results)
throws IOException;
- /** Expert: Low-level search implementation.
- * @deprecated use {@link Searcher#search(Weight, Filter, HitCollector)} instead.
- */
- void search(Query query, Filter filter, HitCollector results)
- throws IOException;
/** Frees resources associated with this Searcher.
* Be careful not to call this method while you are still using objects
@@ -93,11 +88,6 @@
*/
TopDocs search(Weight weight, Filter filter, int n) throws IOException;
- /** Expert: Low-level search implementation.
- * @deprecated use {@link Searcher#search(Weight, Filter, int)} instead.
- */
- TopDocs search(Query query, Filter filter, int n) throws IOException;
-
/** Expert: Returns the stored fields of document i.
* Called by {@link HitCollector} implementations.
* @see IndexReader#document(int)
@@ -122,11 +112,6 @@
*/
Explanation explain(Weight weight, int doc) throws IOException;
- /**
- * @deprecated use {@link Searcher#explain(Weight, int)} instead.
- */
- Explanation explain(Query query, int doc) throws IOException;
-
/** Expert: Low-level search implementation with arbitrary sorting. Finds
* the top n hits for query, applying
* filter if non-null, and sorting the hits by the criteria in
@@ -139,9 +124,4 @@
TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort)
throws IOException;
- /** Expert: Low-level search implementation.
- * @deprecated use {@link Searcher#search(Weight, Filter, int, Sort)} instead.
- */
- TopFieldDocs search(Query query, Filter filter, int n, Sort sort)
- throws IOException;
}
Index: src/java/org/apache/lucene/search/BooleanQuery.java
===================================================================
--- src/java/org/apache/lucene/search/BooleanQuery.java (revision 386892)
+++ src/java/org/apache/lucene/search/BooleanQuery.java (working copy)
@@ -16,14 +16,14 @@
* limitations under the License.
*/
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.util.ToStringUtils;
+
import java.io.IOException;
import java.util.Iterator;
import java.util.Set;
import java.util.Vector;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.util.ToStringUtils;
-
/** A Query that matches documents matching boolean combinations of other
* queries, e.g. {@link TermQuery}s, {@link PhraseQuery}s or other
* BooleanQuerys.
@@ -31,9 +31,9 @@
public class BooleanQuery extends Query {
/**
- * @deprecated use {@link #setMaxClauseCount(int)} instead
+
*/
- public static int maxClauseCount = 1024;
+ private static int maxClauseCount = 1024;
/** Thrown when an attempt is made to add more than {@link
* #getMaxClauseCount()} clauses. This typically happens if
@@ -142,30 +142,6 @@
return minNrShouldMatch;
}
- /** Adds a clause to a boolean query. Clauses may be:
- *
required which means that documents which do not
- * match this sub-query will not match the boolean query;
- * prohibited which means that documents which do
- * match this sub-query will not match the boolean query; or
- * required and
- * prohibited.
- *
- * @deprecated use {@link #add(Query, BooleanClause.Occur)} instead:
- * n in the array
* used to construct this searcher. */
Index: src/java/org/apache/lucene/search/BooleanClause.java
===================================================================
--- src/java/org/apache/lucene/search/BooleanClause.java (revision 386892)
+++ src/java/org/apache/lucene/search/BooleanClause.java (working copy)
@@ -49,57 +49,18 @@
}
/** The query whose matching documents are combined by the boolean query.
- * @deprecated use {@link #setQuery(Query)} instead */
- public Query query; // TODO: decrease visibility for Lucene 2.0
+ */
+ private Query query; // TODO: decrease visibility for Lucene 2.0
- /** If true, documents documents which do not
- match this sub-query will not match the boolean query.
- @deprecated use {@link #setOccur(BooleanClause.Occur)} instead */
- public boolean required = false; // TODO: decrease visibility for Lucene 2.0
-
- /** If true, documents documents which do
- match this sub-query will not match the boolean query.
- @deprecated use {@link #setOccur(BooleanClause.Occur)} instead */
- public boolean prohibited = false; // TODO: decrease visibility for Lucene 2.0
-
private Occur occur = Occur.SHOULD;
- /** Constructs a BooleanClause with query q, required
- * r and prohibited p.
- * @deprecated use BooleanClause(Query, Occur) instead
- * o is equal to this. */
public boolean equals(Object o) {
if (!(o instanceof BooleanClause))
return false;
BooleanClause other = (BooleanClause)o;
return this.query.equals(other.query)
- && (this.required == other.required)
- && (this.prohibited == other.prohibited);
+ && this.occur.equals(other.occur);
}
/** Returns a hash code value for this object.*/
public int hashCode() {
- return query.hashCode() ^ (this.required?1:0) ^ (this.prohibited?2:0);
+ return query.hashCode() ^ (Occur.MUST.equals(occur)?1:0) ^ (Occur.MUST_NOT.equals(occur)?2:0);
}
Index: src/java/org/apache/lucene/search/PhrasePrefixQuery.java
===================================================================
--- src/java/org/apache/lucene/search/PhrasePrefixQuery.java (revision 386892)
+++ src/java/org/apache/lucene/search/PhrasePrefixQuery.java (working copy)
@@ -1,273 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.Vector;
-
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.MultipleTermPositions;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermPositions;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.util.ToStringUtils;
-
-/**
- * PhrasePrefixQuery is a generalized version of PhraseQuery, with an added
- * method {@link #add(Term[])}.
- * To use this class, to search for the phrase "Microsoft app*" first use
- * add(Term) on the term "Microsoft", then find all terms that has "app" as
- * prefix using IndexReader.terms(Term), and use PhrasePrefixQuery.add(Term[]
- * terms) to add them to the query.
- *
- * @deprecated use {@link org.apache.lucene.search.MultiPhraseQuery} instead
- * @author Anders Nielsen
- * @version 1.0
- */
-public class PhrasePrefixQuery extends Query {
- private String field;
- private ArrayList termArrays = new ArrayList();
- private Vector positions = new Vector();
-
- private int slop = 0;
-
- /** Sets the phrase slop for this query.
- * @see PhraseQuery#setSlop(int)
- */
- public void setSlop(int s) { slop = s; }
-
- /** Sets the phrase slop for this query.
- * @see PhraseQuery#getSlop()
- */
- public int getSlop() { return slop; }
-
- /** Add a single term at the next position in the phrase.
- * @see PhraseQuery#add(Term)
- */
- public void add(Term term) { add(new Term[]{term}); }
-
- /** Add multiple terms at the next position in the phrase. Any of the terms
- * may match.
- *
- * @see PhraseQuery#add(Term)
- */
- public void add(Term[] terms) {
- int position = 0;
- if (positions.size() > 0)
- position = ((Integer) positions.lastElement()).intValue() + 1;
-
- add(terms, position);
- }
-
- /**
- * Allows to specify the relative position of terms within the phrase.
- *
- * @see PhraseQuery#add(Term, int)
- * @param terms
- * @param position
- */
- public void add(Term[] terms, int position) {
- if (termArrays.size() == 0)
- field = terms[0].field();
-
- for (int i = 0; i < terms.length; i++) {
- if (terms[i].field() != field) {
- throw new IllegalArgumentException(
- "All phrase terms must be in the same field (" + field + "): "
- + terms[i]);
- }
- }
-
- termArrays.add(terms);
- positions.addElement(new Integer(position));
- }
-
- /**
- * Returns the relative positions of terms in this phrase.
- */
- public int[] getPositions() {
- int[] result = new int[positions.size()];
- for (int i = 0; i < positions.size(); i++)
- result[i] = ((Integer) positions.elementAt(i)).intValue();
- return result;
- }
-
- private class PhrasePrefixWeight implements Weight {
- private Similarity similarity;
- private float value;
- private float idf;
- private float queryNorm;
- private float queryWeight;
-
- public PhrasePrefixWeight(Searcher searcher)
- throws IOException {
- this.similarity = getSimilarity(searcher);
-
- // compute idf
- Iterator i = termArrays.iterator();
- while (i.hasNext()) {
- Term[] terms = (Term[])i.next();
- for (int j=0; jdocNum. Once a document is
- * deleted it will not appear in TermDocs or TermPostitions enumerations.
- * Attempts to read its field with the {@link #document}
- * method will result in an error. The presence of this document may still be
- * reflected in the {@link #docFreq} statistic, though
- * this will be corrected eventually as the index is further modified.
- *
- * @deprecated Use {@link #deleteDocument(int docNum)} instead.
- */
- public final synchronized void delete(int docNum) throws IOException {
- deleteDocument(docNum);
- }
/** Deletes the document numbered docNum. Once a document is
* deleted it will not appear in TermDocs or TermPostitions enumerations.
@@ -536,20 +524,6 @@
*/
protected abstract void doDelete(int docNum) throws IOException;
- /** Deletes all documents containing term.
- * This is useful if one uses a document field to hold a unique ID string for
- * the document. Then to delete such a document, one merely constructs a
- * term with the appropriate field and the unique ID string as its text and
- * passes it to this method.
- * See {@link #delete(int)} for information about when this deletion will
- * become effective.
- * @return the number of documents deleted
- *
- * @deprecated Use {@link #deleteDocuments(Term term)} instead.
- */
- public final int delete(Term term) throws IOException {
- return deleteDocuments(term);
- }
/** Deletes all documents containing term.
* This is useful if one uses a document field to hold a unique ID string for
@@ -640,63 +614,9 @@
writeLock = null;
}
}
-
- /**
- * Returns a list of all unique field names that exist in the index pointed
- * to by this IndexReader.
- * @return Collection of Strings indicating the names of the fields
- * @throws IOException if there is a problem with accessing the index
- *
- * @deprecated Replaced by {@link #getFieldNames(IndexReader.FieldOption)}
- */
- public abstract Collection getFieldNames() throws IOException;
- /**
- * Returns a list of all unique field names that exist in the index pointed
- * to by this IndexReader. The boolean argument specifies whether the fields
- * returned are indexed or not.
- * @param indexed true if only indexed fields should be returned;
- * false if only unindexed fields should be returned.
- * @return Collection of Strings indicating the names of the fields
- * @throws IOException if there is a problem with accessing the index
- *
- * @deprecated Replaced by {@link #getFieldNames(IndexReader.FieldOption)}
- */
- public abstract Collection getFieldNames(boolean indexed) throws IOException;
-
- /**
- *
- * @param storedTermVector if true, returns only Indexed fields that have term vector info,
- * else only indexed fields without term vector info
- * @return Collection of Strings indicating the names of the fields
- *
- * @deprecated Replaced by {@link #getFieldNames(IndexReader.FieldOption)}
- */
- public Collection getIndexedFieldNames(boolean storedTermVector){
- if(storedTermVector){
- Set fieldSet = new HashSet();
- fieldSet.addAll(getIndexedFieldNames(Field.TermVector.YES));
- fieldSet.addAll(getIndexedFieldNames(Field.TermVector.WITH_POSITIONS));
- fieldSet.addAll(getIndexedFieldNames(Field.TermVector.WITH_OFFSETS));
- fieldSet.addAll(getIndexedFieldNames(Field.TermVector.WITH_POSITIONS_OFFSETS));
- return fieldSet;
- }
- else
- return getIndexedFieldNames(Field.TermVector.NO);
- }
/**
- * Get a list of unique field names that exist in this index, are indexed, and have
- * the specified term vector information.
- *
- * @param tvSpec specifies which term vector information should be available for the fields
- * @return Collection of Strings indicating the names of the fields
- *
- * @deprecated Replaced by {@link #getFieldNames(IndexReader.FieldOption)}
- */
- public abstract Collection getIndexedFieldNames(Field.TermVector tvSpec);
-
- /**
* Get a list of unique field names that exist in this index and have the specified
* field option information.
* @param fldOption specifies which field option should be available for the returned fields
Index: src/java/org/apache/lucene/index/FilterIndexReader.java
===================================================================
--- src/java/org/apache/lucene/index/FilterIndexReader.java (revision 386892)
+++ src/java/org/apache/lucene/index/FilterIndexReader.java (working copy)
@@ -17,7 +17,6 @@
*/
import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
import java.io.IOException;
import java.util.Collection;
@@ -130,21 +129,10 @@
return in.termPositions();
}
- protected void doDelete(int n) throws IOException { in.delete(n); }
+ protected void doDelete(int n) throws IOException { in.deleteDocument(n); }
protected void doCommit() throws IOException { in.commit(); }
protected void doClose() throws IOException { in.close(); }
- public Collection getFieldNames() throws IOException {
- return in.getFieldNames();
- }
-
- public Collection getFieldNames(boolean indexed) throws IOException {
- return in.getFieldNames(indexed);
- }
-
- public Collection getIndexedFieldNames (Field.TermVector tvSpec){
- return in.getIndexedFieldNames(tvSpec);
- }
public Collection getFieldNames(IndexReader.FieldOption fieldNames) {
return in.getFieldNames(fieldNames);
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java (revision 386892)
+++ src/java/org/apache/lucene/index/IndexWriter.java (working copy)
@@ -16,22 +16,22 @@
* limitations under the License.
*/
-import java.io.IOException;
-import java.io.File;
-import java.io.PrintStream;
-import java.util.Vector;
-
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.FSDirectory;
-import org.apache.lucene.store.Lock;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.search.Similarity;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.store.Lock;
+import org.apache.lucene.store.RAMDirectory;
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.Vector;
+
/**
An IndexWriter creates and maintains an index.
@@ -86,11 +86,6 @@
public final static int DEFAULT_MAX_BUFFERED_DOCS = 10;
/**
- * @deprecated use {@link #DEFAULT_MAX_BUFFERED_DOCS} instead
- */
- public final static int DEFAULT_MIN_MERGE_DOCS = DEFAULT_MAX_BUFFERED_DOCS;
-
- /**
* Default value is {@link Integer#MAX_VALUE}. Change using {@link #setMaxMergeDocs(int)}.
*/
public final static int DEFAULT_MAX_MERGE_DOCS = Integer.MAX_VALUE;
@@ -454,10 +449,9 @@
* the expected size. If you set it to Integer.MAX_VALUE, then the only limit
* is your memory, but you should anticipate an OutOfMemoryError.
* By default, no more than 10,000 terms will be indexed for a field.
- *
- * @deprecated use {@link #setMaxFieldLength} instead
+ *
*/
- public int maxFieldLength = DEFAULT_MAX_FIELD_LENGTH;
+ private int maxFieldLength = DEFAULT_MAX_FIELD_LENGTH;
/**
* Adds a document to this index. If the document contains more than
@@ -502,10 +496,10 @@
* for batch index creation, and smaller values (< 10) for indices that are
* interactively maintained.
*
- * This must never be less than 2. The default value is 10. - * @deprecated use {@link #setMergeFactor} instead + *
This must never be less than 2. The default value is {@link DEFAULT_MERGE_FACTOR}. + */ - public int mergeFactor = DEFAULT_MERGE_FACTOR; + private int mergeFactor = DEFAULT_MERGE_FACTOR; /** Determines the minimal number of documents required before the buffered * in-memory documents are merging and a new Segment is created. @@ -513,10 +507,10 @@ * large value gives faster indexing. At the same time, mergeFactor limits * the number of files open in a FSDirectory. * - *
The default value is 10. - * @deprecated use {@link #setMaxBufferedDocs} instead + *
The default value is {@link DEFAULT_MAX_BUFFERED_DOCS}. + */ - public int minMergeDocs = DEFAULT_MIN_MERGE_DOCS; + private int minMergeDocs = DEFAULT_MAX_BUFFERED_DOCS; /** Determines the largest number of documents ever merged by addDocument(). @@ -524,15 +518,15 @@ * as this limits the length of pauses while indexing to a few seconds. * Larger values are best for batched indexing and speedier searches. * - *
The default value is {@link Integer#MAX_VALUE}. - * @deprecated use {@link #setMaxMergeDocs} instead + *
The default value is {@link DEFAULT_MAX_MERGE_DOCS}.
+
*/
- public int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
+ private int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
/** If non-null, information about merges will be printed to this.
- * @deprecated use {@link #setInfoStream} instead
+
*/
- public PrintStream infoStream = null;
+ private PrintStream infoStream = null;
/** Merges all segments together into a single segment, optimizing an index
for search. */
Index: src/java/org/apache/lucene/index/ParallelReader.java
===================================================================
--- src/java/org/apache/lucene/index/ParallelReader.java (revision 386892)
+++ src/java/org/apache/lucene/index/ParallelReader.java (working copy)
@@ -221,30 +221,7 @@
((IndexReader)readers.get(i)).close();
}
- public Collection getFieldNames() throws IOException {
- return fieldToReader.keySet();
- }
- public Collection getFieldNames(boolean indexed) throws IOException {
- Set fieldSet = new HashSet();
- for (int i = 0; i < readers.size(); i++) {
- IndexReader reader = ((IndexReader)readers.get(i));
- Collection names = reader.getFieldNames(indexed);
- fieldSet.addAll(names);
- }
- return fieldSet;
- }
-
- public Collection getIndexedFieldNames (Field.TermVector tvSpec){
- Set fieldSet = new HashSet();
- for (int i = 0; i < readers.size(); i++) {
- IndexReader reader = ((IndexReader)readers.get(i));
- Collection names = reader.getIndexedFieldNames(tvSpec);
- fieldSet.addAll(names);
- }
- return fieldSet;
- }
-
public Collection getFieldNames (IndexReader.FieldOption fieldNames) {
Set fieldSet = new HashSet();
for (int i = 0; i < readers.size(); i++) {
Index: src/java/org/apache/lucene/index/SegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentReader.java (revision 386892)
+++ src/java/org/apache/lucene/index/SegmentReader.java (working copy)
@@ -316,86 +316,6 @@
}
/**
- * @see IndexReader#getFieldNames()
- * @deprecated Replaced by {@link #getFieldNames (IndexReader.FieldOption fldOption)}
- */
- public Collection getFieldNames() {
- // maintain a unique set of field names
- Set fieldSet = new HashSet();
- for (int i = 0; i < fieldInfos.size(); i++) {
- FieldInfo fi = fieldInfos.fieldInfo(i);
- fieldSet.add(fi.name);
- }
- return fieldSet;
- }
-
- /**
- * @see IndexReader#getFieldNames(boolean)
- * @deprecated Replaced by {@link #getFieldNames (IndexReader.FieldOption fldOption)}
- */
- public Collection getFieldNames(boolean indexed) {
- // maintain a unique set of field names
- Set fieldSet = new HashSet();
- for (int i = 0; i < fieldInfos.size(); i++) {
- FieldInfo fi = fieldInfos.fieldInfo(i);
- if (fi.isIndexed == indexed)
- fieldSet.add(fi.name);
- }
- return fieldSet;
- }
-
- /**
- * @see IndexReader#getIndexedFieldNames(Field.TermVector tvSpec)
- * @deprecated Replaced by {@link #getFieldNames (IndexReader.FieldOption fldOption)}
- */
- public Collection getIndexedFieldNames (Field.TermVector tvSpec){
- boolean storedTermVector;
- boolean storePositionWithTermVector;
- boolean storeOffsetWithTermVector;
-
- if(tvSpec == Field.TermVector.NO){
- storedTermVector = false;
- storePositionWithTermVector = false;
- storeOffsetWithTermVector = false;
- }
- else if(tvSpec == Field.TermVector.YES){
- storedTermVector = true;
- storePositionWithTermVector = false;
- storeOffsetWithTermVector = false;
- }
- else if(tvSpec == Field.TermVector.WITH_POSITIONS){
- storedTermVector = true;
- storePositionWithTermVector = true;
- storeOffsetWithTermVector = false;
- }
- else if(tvSpec == Field.TermVector.WITH_OFFSETS){
- storedTermVector = true;
- storePositionWithTermVector = false;
- storeOffsetWithTermVector = true;
- }
- else if(tvSpec == Field.TermVector.WITH_POSITIONS_OFFSETS){
- storedTermVector = true;
- storePositionWithTermVector = true;
- storeOffsetWithTermVector = true;
- }
- else{
- throw new IllegalArgumentException("unknown termVector parameter " + tvSpec);
- }
-
- // maintain a unique set of field names
- Set fieldSet = new HashSet();
- for (int i = 0; i < fieldInfos.size(); i++) {
- FieldInfo fi = fieldInfos.fieldInfo(i);
- if (fi.isIndexed && fi.storeTermVector == storedTermVector &&
- fi.storePositionWithTermVector == storePositionWithTermVector &&
- fi.storeOffsetWithTermVector == storeOffsetWithTermVector){
- fieldSet.add(fi.name);
- }
- }
- return fieldSet;
- }
-
- /**
* @see IndexReader#getFieldNames(IndexReader.FieldOption fldOption)
*/
public Collection getFieldNames(IndexReader.FieldOption fieldOption) {
Index: src/java/org/apache/lucene/index/IndexModifier.java
===================================================================
--- src/java/org/apache/lucene/index/IndexModifier.java (revision 386892)
+++ src/java/org/apache/lucene/index/IndexModifier.java (working copy)
@@ -16,15 +16,15 @@
* limitations under the License.
*/
-import java.io.File;
-import java.io.IOException;
-import java.io.PrintStream;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintStream;
+
/**
* A class to modify an index, i.e. to delete and add documents. This
* class hides {@link IndexReader} and {@link IndexWriter} so that you
@@ -261,21 +261,6 @@
}
/**
- * Deletes all documents containing term.
- * This is useful if one uses a document field to hold a unique ID string for
- * the document. Then to delete such a document, one merely constructs a
- * term with the appropriate field and the unique ID string as its text and
- * passes it to this method. Returns the number of documents deleted.
- * @return the number of documents deleted
- * @see IndexReader#deleteDocuments(Term)
- * @throws IllegalStateException if the index is closed
- * @deprecated Use {@link #deleteDocuments(Term)} instead.
- */
- public int delete(Term term) throws IOException {
- return deleteDocuments(term);
- }
-
- /**
* Deletes the document numbered docNum.
* @see IndexReader#deleteDocument(int)
* @throws IllegalStateException if the index is closed
@@ -288,15 +273,6 @@
}
}
- /**
- * Deletes the document numbered docNum.
- * @see IndexReader#deleteDocument(int)
- * @throws IllegalStateException if the index is closed
- * @deprecated Use {@link #deleteDocument(int)} instead.
- */
- public void delete(int docNum) throws IOException {
- deleteDocument(docNum);
- }
/**
* Returns the number of documents currently in this index.
Index: src/java/org/apache/lucene/store/Directory.java
===================================================================
--- src/java/org/apache/lucene/store/Directory.java (revision 386892)
+++ src/java/org/apache/lucene/store/Directory.java (working copy)
@@ -62,31 +62,15 @@
public abstract long fileLength(String name)
throws IOException;
- /** @deprecated use {@link #createOutput(String)} */
- public OutputStream createFile(String name) throws IOException {
- return (OutputStream)createOutput(name);
- }
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
- public IndexOutput createOutput(String name) throws IOException {
- // default implementation for back compatibility
- // this method should be abstract
- return (IndexOutput)createFile(name);
- }
+ public abstract IndexOutput createOutput(String name) throws IOException;
- /** @deprecated use {@link #openInput(String)} */
- public InputStream openFile(String name) throws IOException {
- return (InputStream)openInput(name);
- }
/** Returns a stream reading an existing file. */
- public IndexInput openInput(String name)
- throws IOException {
- // default implementation for back compatibility
- // this method should be abstract
- return (IndexInput)openFile(name);
- }
+ public abstract IndexInput openInput(String name)
+ throws IOException;
/** Construct a {@link Lock}.
* @param name the name of the lock file
Index: src/java/org/apache/lucene/store/InputStream.java
===================================================================
--- src/java/org/apache/lucene/store/InputStream.java (revision 386892)
+++ src/java/org/apache/lucene/store/InputStream.java (working copy)
@@ -1,28 +0,0 @@
-package org.apache.lucene.store;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** @deprecated Use {@link IndexInput} or {@link BufferedIndexInput} instead.*/
-public abstract class InputStream extends BufferedIndexInput {
-
- protected long length; // set by subclasses
-
- public long length() {
- return length;
- }
-
-}
Index: src/java/org/apache/lucene/store/OutputStream.java
===================================================================
--- src/java/org/apache/lucene/store/OutputStream.java (revision 386892)
+++ src/java/org/apache/lucene/store/OutputStream.java (working copy)
@@ -1,22 +0,0 @@
-package org.apache.lucene.store;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/** @deprecated Use {@link IndexOutput} or {@link BufferedIndexOutput}
- * instead.*/
-public abstract class OutputStream extends BufferedIndexOutput {
-}
Index: src/java/org/apache/lucene/store/Lock.java
===================================================================
--- src/java/org/apache/lucene/store/Lock.java (revision 386892)
+++ src/java/org/apache/lucene/store/Lock.java (working copy)
@@ -16,8 +16,6 @@
* limitations under the License.
*/
-import org.apache.lucene.index.IndexWriter;
-
import java.io.IOException;
/** An interprocess mutex lock.
@@ -80,14 +78,6 @@
private Lock lock;
private long lockWaitTimeout;
- /** Constructs an executor that will grab the named lock.
- * Defaults lockWaitTimeout to Lock.COMMIT_LOCK_TIMEOUT.
- * @deprecated Kept only to avoid breaking existing code.
- */
- public With(Lock lock)
- {
- this(lock, IndexWriter.COMMIT_LOCK_TIMEOUT);
- }
/** Constructs an executor that will grab the named lock. */
public With(Lock lock, long lockWaitTimeout) {
Index: src/java/org/apache/lucene/document/DateField.java
===================================================================
--- src/java/org/apache/lucene/document/DateField.java (revision 386892)
+++ src/java/org/apache/lucene/document/DateField.java (working copy)
@@ -16,30 +16,29 @@
* limitations under the License.
*/
-import java.util.Date;
+import org.apache.lucene.search.PrefixQuery;
+import org.apache.lucene.search.RangeQuery;
-import org.apache.lucene.search.PrefixQuery; // for javadoc
-import org.apache.lucene.search.RangeQuery; // for javadoc
+import java.util.Date; // for javadoc
/**
* Provides support for converting dates to strings and vice-versa.
* The strings are structured so that lexicographic sorting orders by date,
* which makes them suitable for use as field values and search terms.
- *
+ *
*
Note that this class saves dates with millisecond granularity, * which is bad for {@link RangeQuery} and {@link PrefixQuery}, as those - * queries are expanded to a BooleanQuery with a potentially large number + * queries are expanded to a BooleanQuery with a potentially large number * of terms when searching. Thus you might want to use * {@link DateTools} instead. - * + * *
* Note: dates before 1970 cannot be used, and therefore cannot be
* indexed when using this class. See {@link DateTools} for an
* alternative without such a limitation.
- *
- * @deprecated If you build a new index, use {@link DateTools} instead. For
- * existing indices you can continue using this class, as it will not be
- * removed in the near future despite being deprecated.
+ *
+ * @deprecated If you build a new index, use {@link DateTools} instead. This class is included for use with existing
+ * indices and will be removed in a future release.
*/
public class DateField {
Index: src/java/org/apache/lucene/document/Field.java
===================================================================
--- src/java/org/apache/lucene/document/Field.java (revision 386892)
+++ src/java/org/apache/lucene/document/Field.java (working copy)
@@ -16,15 +16,14 @@
* limitations under the License.
*/
-import java.io.Reader;
-import java.io.Serializable;
-import java.util.Date;
-
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.util.Parameter;
+import java.io.Reader;
+import java.io.Serializable;
+
/**
A field is a section of a Document. Each field has two parts, a name and a
value. Values may be free text, provided as a String or as a Reader, or they
@@ -53,45 +52,45 @@
/** Specifies whether and how a field should be stored. */
public static final class Store extends Parameter implements Serializable {
-
+
private Store(String name) {
super(name);
}
-
+
/** Store the original field value in the index in a compressed form. This is
* useful for long documents and for binary valued fields.
*/
public static final Store COMPRESS = new Store("COMPRESS");
-
+
/** Store the original field value in the index. This is useful for short texts
* like a document's title which should be displayed with the results. The
* value is stored in its original form, i.e. no analyzer is used before it is
- * stored.
+ * stored.
*/
public static final Store YES = new Store("YES");
-
+
/** Do not store the field value in the index. */
public static final Store NO = new Store("NO");
}
-
+
/** Specifies whether and how a field should be indexed. */
public static final class Index extends Parameter implements Serializable {
-
+
private Index(String name) {
super(name);
}
-
+
/** Do not index the field value. This field can thus not be searched,
- * but one can still access its contents provided it is
+ * but one can still access its contents provided it is
* {@link Field.Store stored}. */
public static final Index NO = new Index("NO");
-
+
/** Index the field's value so it can be searched. An Analyzer will be used
* to tokenize and possibly further normalize the text before its
* terms will be stored in the index. This is useful for common text.
*/
public static final Index TOKENIZED = new Index("TOKENIZED");
-
+
/** Index the field's value without using an Analyzer, so it can be searched.
* As no analyzer is used the value will be stored as a single term. This is
* useful for unique Ids like product numbers.
@@ -181,84 +180,6 @@
public float getBoost() {
return boost;
}
-
- /** Constructs a String-valued Field that is not tokenized, but is indexed
- and stored. Useful for non-text fields, e.g. date or url.
- @deprecated use {@link #Field(String, String, Field.Store, Field.Index)
- Field(name, value, Field.Store.YES, Field.Index.UN_TOKENIZED)} instead */
- public static final Field Keyword(String name, String value) {
- return new Field(name, value, true, true, false);
- }
-
- /** Constructs a String-valued Field that is not tokenized nor indexed,
- but is stored in the index, for return with hits.
- @deprecated use {@link #Field(String, String, Field.Store, Field.Index)
- Field(name, value, Field.Store.YES, Field.Index.NO)} instead */
- public static final Field UnIndexed(String name, String value) {
- return new Field(name, value, true, false, false);
- }
-
- /** Constructs a String-valued Field that is tokenized and indexed,
- and is stored in the index, for return with hits. Useful for short text
- fields, like "title" or "subject". Term vector will not be stored for this field.
- @deprecated use {@link #Field(String, String, Field.Store, Field.Index)
- Field(name, value, Field.Store.YES, Field.Index.TOKENIZED)} instead */
- public static final Field Text(String name, String value) {
- return Text(name, value, false);
- }
-
- /** Constructs a Date-valued Field that is not tokenized and is indexed,
- and stored in the index, for return with hits.
- @deprecated use {@link #Field(String, String, Field.Store, Field.Index)
- Field(name, value, Field.Store.YES, Field.Index.UN_TOKENIZED)} instead */
- public static final Field Keyword(String name, Date value) {
- return new Field(name, DateField.dateToString(value), true, true, false);
- }
-
- /** Constructs a String-valued Field that is tokenized and indexed,
- and is stored in the index, for return with hits. Useful for short text
- fields, like "title" or "subject".
- @deprecated use {@link #Field(String, String, Field.Store, Field.Index, Field.TermVector)
- Field(name, value, Field.Store.YES, Field.Index.TOKENIZED, storeTermVector)} instead */
- public static final Field Text(String name, String value, boolean storeTermVector) {
- return new Field(name, value, true, true, true, storeTermVector);
- }
-
- /** Constructs a String-valued Field that is tokenized and indexed,
- but that is not stored in the index. Term vector will not be stored for this field.
- @deprecated use {@link #Field(String, String, Field.Store, Field.Index)
- Field(name, value, Field.Store.NO, Field.Index.TOKENIZED)} instead */
- public static final Field UnStored(String name, String value) {
- return UnStored(name, value, false);
- }
-
- /** Constructs a String-valued Field that is tokenized and indexed,
- but that is not stored in the index.
- @deprecated use {@link #Field(String, String, Field.Store, Field.Index, Field.TermVector)
- Field(name, value, Field.Store.NO, Field.Index.TOKENIZED, storeTermVector)} instead */
- public static final Field UnStored(String name, String value, boolean storeTermVector) {
- return new Field(name, value, false, true, true, storeTermVector);
- }
-
- /** Constructs a Reader-valued Field that is tokenized and indexed, but is
- not stored in the index verbatim. Useful for longer text fields, like
- "body". Term vector will not be stored for this field.
- @deprecated use {@link #Field(String, Reader) Field(name, value)} instead */
- public static final Field Text(String name, Reader value) {
- return Text(name, value, false);
- }
-
- /** Constructs a Reader-valued Field that is tokenized and indexed, but is
- not stored in the index verbatim. Useful for longer text fields, like
- "body".
- @deprecated use {@link #Field(String, Reader, Field.TermVector)
- Field(name, value, storeTermVector)} instead */
- public static final Field Text(String name, Reader value, boolean storeTermVector) {
- Field f = new Field(name, value);
- f.storeTermVector = storeTermVector;
- return f;
- }
-
/** Returns the name of the field as an interned string.
* For example "date", "title", "body", ...
*/
@@ -405,15 +326,6 @@
setStoreTermVector(termVector);
}
- /** Create a field by specifying all parameters except for Uses a collection of 1000 documents, each the english rendition of their
- * document number. For example, the document numbered 333 has text "three
- * hundred thirty three".
- *
- * Tests are each a single query, and its hits are checked to ensure that
- * all and only the correct documents are returned, thus providing end-to-end
- * testing of the indexing and search code.
- *
- * @author Doug Cutting
- */
-public class TestBasics extends TestCase {
- private IndexSearcher searcher;
-
- public void setUp() throws Exception {
- RAMDirectory directory = new RAMDirectory();
- IndexWriter writer
- = new IndexWriter(directory, new SimpleAnalyzer(), true);
- //writer.infoStream = System.out;
- for (int i = 0; i < 1000; i++) {
- Document doc = new Document();
- doc.add(Field.Text("field", English.intToEnglish(i)));
- writer.addDocument(doc);
- }
-
- writer.close();
-
- searcher = new IndexSearcher(directory);
- }
-
- public void testTerm() throws Exception {
- Query query = new TermQuery(new Term("field", "seventy"));
- checkHits(query, new int[]
- {70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 170, 171, 172, 173, 174, 175,
- 176, 177, 178, 179, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
- 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 470, 471, 472, 473,
- 474, 475, 476, 477, 478, 479, 570, 571, 572, 573, 574, 575, 576, 577,
- 578, 579, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 770, 771,
- 772, 773, 774, 775, 776, 777, 778, 779, 870, 871, 872, 873, 874, 875,
- 876, 877, 878, 879, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979});
- }
-
- public void testTerm2() throws Exception {
- Query query = new TermQuery(new Term("field", "seventish"));
- checkHits(query, new int[] {});
- }
-
- public void testPhrase() throws Exception {
- PhraseQuery query = new PhraseQuery();
- query.add(new Term("field", "seventy"));
- query.add(new Term("field", "seven"));
- checkHits(query, new int[]
- {77, 177, 277, 377, 477, 577, 677, 777, 877, 977});
- }
-
- public void testPhrase2() throws Exception {
- PhraseQuery query = new PhraseQuery();
- query.add(new Term("field", "seventish"));
- query.add(new Term("field", "sevenon"));
- checkHits(query, new int[] {});
- }
-
- public void testBoolean() throws Exception {
- BooleanQuery query = new BooleanQuery();
- query.add(new TermQuery(new Term("field", "seventy")), true, false);
- query.add(new TermQuery(new Term("field", "seven")), true, false);
- checkHits(query, new int[]
- {77, 777, 177, 277, 377, 477, 577, 677, 770, 771, 772, 773, 774, 775,
- 776, 778, 779, 877, 977});
- }
-
- public void testBoolean2() throws Exception {
- BooleanQuery query = new BooleanQuery();
- query.add(new TermQuery(new Term("field", "sevento")), true, false);
- query.add(new TermQuery(new Term("field", "sevenly")), true, false);
- checkHits(query, new int[] {});
- }
-
- public void testSpanNearExact() throws Exception {
- SpanTermQuery term1 = new SpanTermQuery(new Term("field", "seventy"));
- SpanTermQuery term2 = new SpanTermQuery(new Term("field", "seven"));
- SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {term1, term2},
- 0, true);
- checkHits(query, new int[]
- {77, 177, 277, 377, 477, 577, 677, 777, 877, 977});
-
- assertTrue(searcher.explain(query, 77).getValue() > 0.0f);
- assertTrue(searcher.explain(query, 977).getValue() > 0.0f);
- }
-
- public void testSpanNearUnordered() throws Exception {
- SpanTermQuery term1 = new SpanTermQuery(new Term("field", "nine"));
- SpanTermQuery term2 = new SpanTermQuery(new Term("field", "six"));
- SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {term1, term2},
- 4, false);
-
- checkHits(query, new int[]
- {609, 629, 639, 649, 659, 669, 679, 689, 699,
- 906, 926, 936, 946, 956, 966, 976, 986, 996});
- }
-
- public void testSpanNearOrdered() throws Exception {
- SpanTermQuery term1 = new SpanTermQuery(new Term("field", "nine"));
- SpanTermQuery term2 = new SpanTermQuery(new Term("field", "six"));
- SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {term1, term2},
- 4, true);
- checkHits(query, new int[]
- {906, 926, 936, 946, 956, 966, 976, 986, 996});
- }
-
- public void testSpanNot() throws Exception {
- SpanTermQuery term1 = new SpanTermQuery(new Term("field", "eight"));
- SpanTermQuery term2 = new SpanTermQuery(new Term("field", "one"));
- SpanNearQuery near = new SpanNearQuery(new SpanQuery[] {term1, term2},
- 4, true);
- SpanTermQuery term3 = new SpanTermQuery(new Term("field", "forty"));
- SpanNotQuery query = new SpanNotQuery(near, term3);
-
- checkHits(query, new int[]
- {801, 821, 831, 851, 861, 871, 881, 891});
-
- assertTrue(searcher.explain(query, 801).getValue() > 0.0f);
- assertTrue(searcher.explain(query, 891).getValue() > 0.0f);
- }
-
- public void testSpanFirst() throws Exception {
- SpanTermQuery term1 = new SpanTermQuery(new Term("field", "five"));
- SpanFirstQuery query = new SpanFirstQuery(term1, 1);
-
- checkHits(query, new int[]
- {5, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513,
- 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527,
- 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541,
- 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555,
- 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569,
- 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583,
- 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597,
- 598, 599});
-
- assertTrue(searcher.explain(query, 5).getValue() > 0.0f);
- assertTrue(searcher.explain(query, 599).getValue() > 0.0f);
-
- }
-
- public void testSpanOr() throws Exception {
- SpanTermQuery term1 = new SpanTermQuery(new Term("field", "thirty"));
- SpanTermQuery term2 = new SpanTermQuery(new Term("field", "three"));
- SpanNearQuery near1 = new SpanNearQuery(new SpanQuery[] {term1, term2},
- 0, true);
- SpanTermQuery term3 = new SpanTermQuery(new Term("field", "forty"));
- SpanTermQuery term4 = new SpanTermQuery(new Term("field", "seven"));
- SpanNearQuery near2 = new SpanNearQuery(new SpanQuery[] {term3, term4},
- 0, true);
-
- SpanOrQuery query = new SpanOrQuery(new SpanQuery[] {near1, near2});
-
- checkHits(query, new int[]
- {33, 47, 133, 147, 233, 247, 333, 347, 433, 447, 533, 547, 633, 647, 733,
- 747, 833, 847, 933, 947});
-
- assertTrue(searcher.explain(query, 33).getValue() > 0.0f);
- assertTrue(searcher.explain(query, 947).getValue() > 0.0f);
- }
-
- public void testSpanExactNested() throws Exception {
- SpanTermQuery term1 = new SpanTermQuery(new Term("field", "three"));
- SpanTermQuery term2 = new SpanTermQuery(new Term("field", "hundred"));
- SpanNearQuery near1 = new SpanNearQuery(new SpanQuery[] {term1, term2},
- 0, true);
- SpanTermQuery term3 = new SpanTermQuery(new Term("field", "thirty"));
- SpanTermQuery term4 = new SpanTermQuery(new Term("field", "three"));
- SpanNearQuery near2 = new SpanNearQuery(new SpanQuery[] {term3, term4},
- 0, true);
-
- SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {near1, near2},
- 0, true);
-
- checkHits(query, new int[] {333});
-
- assertTrue(searcher.explain(query, 333).getValue() > 0.0f);
- }
-
- public void testSpanNearOr() throws Exception {
-
- SpanTermQuery t1 = new SpanTermQuery(new Term("field","six"));
- SpanTermQuery t3 = new SpanTermQuery(new Term("field","seven"));
-
- SpanTermQuery t5 = new SpanTermQuery(new Term("field","seven"));
- SpanTermQuery t6 = new SpanTermQuery(new Term("field","six"));
-
- SpanOrQuery to1 = new SpanOrQuery(new SpanQuery[] {t1, t3});
- SpanOrQuery to2 = new SpanOrQuery(new SpanQuery[] {t5, t6});
-
- SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {to1, to2},
- 10, true);
-
- checkHits(query, new int[]
- {606, 607, 626, 627, 636, 637, 646, 647,
- 656, 657, 666, 667, 676, 677, 686, 687, 696, 697,
- 706, 707, 726, 727, 736, 737, 746, 747,
- 756, 757, 766, 767, 776, 777, 786, 787, 796, 797});
- }
-
- public void testSpanComplex1() throws Exception {
-
- SpanTermQuery t1 = new SpanTermQuery(new Term("field","six"));
- SpanTermQuery t2 = new SpanTermQuery(new Term("field","hundred"));
- SpanNearQuery tt1 = new SpanNearQuery(new SpanQuery[] {t1, t2}, 0,true);
-
- SpanTermQuery t3 = new SpanTermQuery(new Term("field","seven"));
- SpanTermQuery t4 = new SpanTermQuery(new Term("field","hundred"));
- SpanNearQuery tt2 = new SpanNearQuery(new SpanQuery[] {t3, t4}, 0,true);
-
- SpanTermQuery t5 = new SpanTermQuery(new Term("field","seven"));
- SpanTermQuery t6 = new SpanTermQuery(new Term("field","six"));
-
- SpanOrQuery to1 = new SpanOrQuery(new SpanQuery[] {tt1, tt2});
- SpanOrQuery to2 = new SpanOrQuery(new SpanQuery[] {t5, t6});
-
- SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {to1, to2},
- 100, true);
-
- checkHits(query, new int[]
- {606, 607, 626, 627, 636, 637, 646, 647,
- 656, 657, 666, 667, 676, 677, 686, 687, 696, 697,
- 706, 707, 726, 727, 736, 737, 746, 747,
- 756, 757, 766, 767, 776, 777, 786, 787, 796, 797});
- }
-
-
- private void checkHits(Query query, int[] results) throws IOException {
- CheckHits.checkHits(query, "field", searcher, results, this);
- }
-}
Index: src/test-deprecated/org/apache/lucene/search/spans/TestSpans.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/spans/TestSpans.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/spans/TestSpans.java (working copy)
@@ -1,97 +0,0 @@
-package org.apache.lucene.search.spans;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Hits;
-import org.apache.lucene.search.CheckHits;
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import junit.framework.TestCase;
-
-import java.io.IOException;
-import java.util.Set;
-import java.util.TreeSet;
-
-public class TestSpans extends TestCase {
- private IndexSearcher searcher;
-
- public static final String field = "field";
-
- public void setUp() throws Exception {
- RAMDirectory directory = new RAMDirectory();
- IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(), true);
- StringBuffer buffer = new StringBuffer();
- for (int i = 0; i < docFields.length; i++) {
- Document doc = new Document();
- doc.add(Field.Text(field, docFields[i]));
- writer.addDocument(doc);
- }
- writer.close();
- searcher = new IndexSearcher(directory);
- }
-
- private String[] docFields = {
- "w1 w2 w3 w4 w5",
- "w1 w3 w2 w3",
- "w1 xx w2 yy w3",
- "w1 w3 xx w2 yy w3",
- ""
- };
-
- public SpanTermQuery makeSpanTermQuery(String text) {
- return new SpanTermQuery(new Term(field, text));
- }
-
- private void checkHits(Query query, int[] results) throws IOException {
- CheckHits.checkHits(query, field, searcher, results, this);
- }
-
- public void orderedSlopTest3(int slop, int[] expectedDocs) throws IOException {
- SpanTermQuery w1 = makeSpanTermQuery("w1");
- SpanTermQuery w2 = makeSpanTermQuery("w2");
- SpanTermQuery w3 = makeSpanTermQuery("w3");
- boolean ordered = true;
- SpanNearQuery snq = new SpanNearQuery( new SpanQuery[]{w1,w2,w3}, slop, ordered);
- checkHits(snq, expectedDocs);
- }
-
- public void testSpanNearOrdered01() throws Exception {
- orderedSlopTest3(0, new int[] {0});
- }
-
- public void testSpanNearOrdered02() throws Exception {
- orderedSlopTest3(1, new int[] {0,1});
- }
-
- public void testSpanNearOrdered03() throws Exception {
- orderedSlopTest3(2, new int[] {0,1,2});
- }
-
- public void testSpanNearOrdered04() throws Exception {
- orderedSlopTest3(3, new int[] {0,1,2,3});
- }
-
- public void testSpanNearOrdered05() throws Exception {
- orderedSlopTest3(4, new int[] {0,1,2,3});
- }
-}
Index: src/test-deprecated/org/apache/lucene/search/CheckHits.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/CheckHits.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/CheckHits.java (working copy)
@@ -1,65 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* 20 May 2004: Factored out of spans tests. Please leave this comment
- until this class is evt. also used by tests in search package.
- */
-
-import org.apache.lucene.search.Searcher;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Hits;
-import junit.framework.TestCase;
-
-import java.io.IOException;
-import java.util.Set;
-import java.util.TreeSet;
-
-public class CheckHits {
- public static void checkHits(
- Query query,
- String defaultFieldName,
- Searcher searcher,
- int[] results,
- TestCase testCase)
- throws IOException {
- Hits hits = searcher.search(query);
-
- Set correct = new TreeSet();
- for (int i = 0; i < results.length; i++) {
- correct.add(new Integer(results[i]));
- }
-
- Set actual = new TreeSet();
- for (int i = 0; i < hits.length(); i++) {
- actual.add(new Integer(hits.id(i)));
- }
-
- testCase.assertEquals(query.toString(defaultFieldName), correct, actual);
- }
-
- public static void printDocNrs(Hits hits) throws IOException {
- System.out.print("new int[] {");
- for (int i = 0; i < hits.length(); i++) {
- System.out.print(hits.id(i));
- if (i != hits.length()-1)
- System.out.print(", ");
- }
- System.out.println("}");
- }
-}
-
Index: src/test-deprecated/org/apache/lucene/search/TestRemoteSearchable.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestRemoteSearchable.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestRemoteSearchable.java (working copy)
@@ -1,109 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import junit.framework.TestCase;
-
-import java.rmi.Naming;
-import java.rmi.registry.LocateRegistry;
-
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.analysis.SimpleAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-
-/**
- * @version $Id$
- */
-public class TestRemoteSearchable extends TestCase {
- public TestRemoteSearchable(String name) {
- super(name);
- }
-
- private static Searchable getRemote() throws Exception {
- try {
- return lookupRemote();
- } catch (Throwable e) {
- startServer();
- return lookupRemote();
- }
- }
-
- private static Searchable lookupRemote() throws Exception {
- return (Searchable)Naming.lookup("//localhost/Searchable");
- }
-
- private static void startServer() throws Exception {
- // construct an index
- RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore,new SimpleAnalyzer(),true);
- Document doc = new Document();
- doc.add(Field.Text("test", "test text"));
- writer.addDocument(doc);
- writer.optimize();
- writer.close();
-
- // publish it
- LocateRegistry.createRegistry(1099);
- Searchable local = new IndexSearcher(indexStore);
- RemoteSearchable impl = new RemoteSearchable(local);
- Naming.rebind("//localhost/Searchable", impl);
- }
-
- private static void search(Query query) throws Exception {
- // try to search the published index
- Searchable[] searchables = { getRemote() };
- Searcher searcher = new MultiSearcher(searchables);
- Hits result = searcher.search(query);
-
- assertEquals(1, result.length());
- assertEquals("test text", result.doc(0).get("test"));
- }
-
- public void testTermQuery() throws Exception {
- search(new TermQuery(new Term("test", "test")));
- }
-
- public void testBooleanQuery() throws Exception {
- BooleanQuery query = new BooleanQuery();
- query.add(new TermQuery(new Term("test", "test")), true, false);
- search(query);
- }
-
- public void testPhraseQuery() throws Exception {
- PhraseQuery query = new PhraseQuery();
- query.add(new Term("test", "test"));
- query.add(new Term("test", "text"));
- search(query);
- }
-
- // Tests bug fix at http://nagoya.apache.org/bugzilla/show_bug.cgi?id=20290
- public void testQueryFilter() throws Exception {
- // try to search the published index
- Searchable[] searchables = { getRemote() };
- Searcher searcher = new MultiSearcher(searchables);
- Hits hits = searcher.search(
- new TermQuery(new Term("test", "text")),
- new QueryFilter(new TermQuery(new Term("test", "test"))));
- Hits nohits = searcher.search(
- new TermQuery(new Term("test", "text")),
- new QueryFilter(new TermQuery(new Term("test", "non-existent-term"))));
- assertEquals(0, nohits.length());
- }
-}
Index: src/test-deprecated/org/apache/lucene/search/TestMultiSearcher.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestMultiSearcher.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestMultiSearcher.java (working copy)
@@ -1,206 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.queryParser.QueryParser;
-import org.apache.lucene.search.Searcher;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.RAMDirectory;
-
-import junit.framework.TestCase;
-
-import java.io.IOException;
-
-/**
- * Tests {@link MultiSearcher} class.
- *
- * @version $Id$
- */
-public class TestMultiSearcher extends TestCase
-{
- public TestMultiSearcher(String name)
- {
- super(name);
- }
-
- /**
- * ReturnS a new instance of the concrete MultiSearcher class
- * used in this test.
- */
- protected MultiSearcher getMultiSearcherInstance(Searcher[] searchers) throws IOException {
- return new MultiSearcher(searchers);
- }
-
- public void testEmptyIndex()
- throws Exception
- {
- // creating two directories for indices
- Directory indexStoreA = new RAMDirectory();
- Directory indexStoreB = new RAMDirectory();
-
- // creating a document to store
- Document lDoc = new Document();
- lDoc.add(Field.Text("fulltext", "Once upon a time....."));
- lDoc.add(Field.Keyword("id", "doc1"));
- lDoc.add(Field.Keyword("handle", "1"));
-
- // creating a document to store
- Document lDoc2 = new Document();
- lDoc2.add(Field.Text("fulltext", "in a galaxy far far away....."));
- lDoc2.add(Field.Keyword("id", "doc2"));
- lDoc2.add(Field.Keyword("handle", "1"));
-
- // creating a document to store
- Document lDoc3 = new Document();
- lDoc3.add(Field.Text("fulltext", "a bizarre bug manifested itself...."));
- lDoc3.add(Field.Keyword("id", "doc3"));
- lDoc3.add(Field.Keyword("handle", "1"));
-
- // creating an index writer for the first index
- IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(), true);
- // creating an index writer for the second index, but writing nothing
- IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(), true);
-
- //--------------------------------------------------------------------
- // scenario 1
- //--------------------------------------------------------------------
-
- // writing the documents to the first index
- writerA.addDocument(lDoc);
- writerA.addDocument(lDoc2);
- writerA.addDocument(lDoc3);
- writerA.optimize();
- writerA.close();
-
- // closing the second index
- writerB.close();
-
- // creating the query
- Query query = QueryParser.parse("handle:1", "fulltext", new StandardAnalyzer());
-
- // building the searchables
- Searcher[] searchers = new Searcher[2];
- // VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index
- searchers[0] = new IndexSearcher(indexStoreB);
- searchers[1] = new IndexSearcher(indexStoreA);
- // creating the multiSearcher
- Searcher mSearcher = getMultiSearcherInstance(searchers);
- // performing the search
- Hits hits = mSearcher.search(query);
-
- assertEquals(3, hits.length());
-
- try {
- // iterating over the hit documents
- for (int i = 0; i < hits.length(); i++) {
- Document d = hits.doc(i);
- }
- }
- catch (ArrayIndexOutOfBoundsException e)
- {
- fail("ArrayIndexOutOfBoundsException thrown: " + e.getMessage());
- e.printStackTrace();
- } finally{
- mSearcher.close();
- }
-
-
- //--------------------------------------------------------------------
- // scenario 2
- //--------------------------------------------------------------------
-
- // adding one document to the empty index
- writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(), false);
- writerB.addDocument(lDoc);
- writerB.optimize();
- writerB.close();
-
- // building the searchables
- Searcher[] searchers2 = new Searcher[2];
- // VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index
- searchers2[0] = new IndexSearcher(indexStoreB);
- searchers2[1] = new IndexSearcher(indexStoreA);
- // creating the mulitSearcher
- Searcher mSearcher2 = getMultiSearcherInstance(searchers2);
- // performing the same search
- Hits hits2 = mSearcher2.search(query);
-
- assertEquals(4, hits2.length());
-
- try {
- // iterating over the hit documents
- for (int i = 0; i < hits2.length(); i++) {
- // no exception should happen at this point
- Document d = hits2.doc(i);
- }
- }
- catch (Exception e)
- {
- fail("Exception thrown: " + e.getMessage());
- e.printStackTrace();
- } finally{
- mSearcher2.close();
- }
-
- //--------------------------------------------------------------------
- // scenario 3
- //--------------------------------------------------------------------
-
- // deleting the document just added, this will cause a different exception to take place
- Term term = new Term("id", "doc1");
- IndexReader readerB = IndexReader.open(indexStoreB);
- readerB.delete(term);
- readerB.close();
-
- // optimizing the index with the writer
- writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(), false);
- writerB.optimize();
- writerB.close();
-
- // building the searchables
- Searcher[] searchers3 = new Searcher[2];
-
- searchers3[0] = new IndexSearcher(indexStoreB);
- searchers3[1] = new IndexSearcher(indexStoreA);
- // creating the mulitSearcher
- Searcher mSearcher3 = getMultiSearcherInstance(searchers3);
- // performing the same search
- Hits hits3 = mSearcher3.search(query);
-
- assertEquals(3, hits3.length());
-
- try {
- // iterating over the hit documents
- for (int i = 0; i < hits3.length(); i++) {
- Document d = hits3.doc(i);
- }
- }
- catch (IOException e)
- {
- fail("IOException thrown: " + e.getMessage());
- e.printStackTrace();
- } finally{
- mSearcher3.close();
- }
- }
-}
Index: src/test-deprecated/org/apache/lucene/search/SampleComparable.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/SampleComparable.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/SampleComparable.java (working copy)
@@ -1,143 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermDocs;
-import org.apache.lucene.index.TermEnum;
-
-import java.io.IOException;
-import java.io.Serializable;
-
-/**
- * An example Comparable for use with the custom sort tests.
- * It implements a comparable for "id" sort of values which
- * consist of an alphanumeric part and a numeric part, such as:
- * ABC-123, A-1, A-7, A-100, B-99999
- * Such values cannot be sorted as strings, since A-100 needs
- * to come after A-7.
- * It could be argued that the "ids" should be rewritten as
- * A-0001, A-0100, etc. so they will sort as strings. That is
- * a valid alternate way to solve it - but
- * this is only supposed to be a simple test case.
- * Created: Apr 21, 2004 5:34:47 PM
- *
- * @author Tim Jones
- * @version $Id$
- * @since 1.4
- */
-public class SampleComparable
-implements Comparable, Serializable {
-
- String string_part;
- Integer int_part;
-
- public SampleComparable (String s) {
- int i = s.indexOf ("-");
- string_part = s.substring (0, i);
- int_part = new Integer (s.substring (i + 1));
- }
-
- public int compareTo (Object o) {
- SampleComparable otherid = (SampleComparable) o;
- int i = string_part.compareTo (otherid.string_part);
- if (i == 0) return int_part.compareTo (otherid.int_part);
- return i;
- }
-
- public static SortComparatorSource getComparatorSource () {
- return new SortComparatorSource () {
- public ScoreDocComparator newComparator (final IndexReader reader, String fieldname)
- throws IOException {
- final String field = fieldname.intern ();
- final TermEnum enumerator = reader.terms (new Term (fieldname, ""));
- try {
- return new ScoreDocComparator () {
- protected Comparable[] cachedValues = fillCache (reader, enumerator, field);
-
- public int compare (ScoreDoc i, ScoreDoc j) {
- return cachedValues[i.doc].compareTo (cachedValues[j.doc]);
- }
-
- public Comparable sortValue (ScoreDoc i) {
- return cachedValues[i.doc];
- }
-
- public int sortType () {
- return SortField.CUSTOM;
- }
- };
- } finally {
- enumerator.close ();
- }
- }
-
- /**
- * Returns an array of objects which represent that natural order
- * of the term values in the given field.
- *
- * @param reader Terms are in this index.
- * @param enumerator Use this to get the term values and TermDocs.
- * @param fieldname Comparables should be for this field.
- * @return Array of objects representing natural order of terms in field.
- * @throws IOException If an error occurs reading the index.
- */
- protected Comparable[] fillCache (IndexReader reader, TermEnum enumerator, String fieldname)
- throws IOException {
- final String field = fieldname.intern ();
- Comparable[] retArray = new Comparable[reader.maxDoc ()];
- if (retArray.length > 0) {
- TermDocs termDocs = reader.termDocs ();
- try {
- if (enumerator.term () == null) {
- throw new RuntimeException ("no terms in field " + field);
- }
- do {
- Term term = enumerator.term ();
- if (term.field () != field) break;
- Comparable termval = getComparable (term.text ());
- termDocs.seek (enumerator);
- while (termDocs.next ()) {
- retArray[termDocs.doc ()] = termval;
- }
- } while (enumerator.next ());
- } finally {
- termDocs.close ();
- }
- }
- return retArray;
- }
-
- Comparable getComparable (String termtext) {
- return new SampleComparable (termtext);
- }
- };
- }
-
- public static SortComparator getComparator() {
- return new SortComparator() {
- protected Comparable getComparable (String termtext) {
- return new SampleComparable (termtext);
- }
- };
- }
-}
\ No newline at end of file
Index: src/test-deprecated/org/apache/lucene/search/TestPhraseQuery.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestPhraseQuery.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestPhraseQuery.java (working copy)
@@ -1,257 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import junit.framework.TestCase;
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
-import org.apache.lucene.analysis.StopAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.store.RAMDirectory;
-
-/**
- * Tests {@link PhraseQuery}.
- *
- * @see TestPositionIncrement
- * @author Erik Hatcher
- */
-public class TestPhraseQuery extends TestCase {
- private IndexSearcher searcher;
- private PhraseQuery query;
- private RAMDirectory directory;
-
- public void setUp() throws Exception {
- directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
-
- Document doc = new Document();
- doc.add(Field.Text("field", "one two three four five"));
- writer.addDocument(doc);
-
- writer.optimize();
- writer.close();
-
- searcher = new IndexSearcher(directory);
- query = new PhraseQuery();
- }
-
- public void tearDown() throws Exception {
- searcher.close();
- directory.close();
- }
-
- public void testNotCloseEnough() throws Exception {
- query.setSlop(2);
- query.add(new Term("field", "one"));
- query.add(new Term("field", "five"));
- Hits hits = searcher.search(query);
- assertEquals(0, hits.length());
- }
-
- public void testBarelyCloseEnough() throws Exception {
- query.setSlop(3);
- query.add(new Term("field", "one"));
- query.add(new Term("field", "five"));
- Hits hits = searcher.search(query);
- assertEquals(1, hits.length());
- }
-
- /**
- * Ensures slop of 0 works for exact matches, but not reversed
- */
- public void testExact() throws Exception {
- // slop is zero by default
- query.add(new Term("field", "four"));
- query.add(new Term("field", "five"));
- Hits hits = searcher.search(query);
- assertEquals("exact match", 1, hits.length());
-
- query = new PhraseQuery();
- query.add(new Term("field", "two"));
- query.add(new Term("field", "one"));
- hits = searcher.search(query);
- assertEquals("reverse not exact", 0, hits.length());
- }
-
- public void testSlop1() throws Exception {
- // Ensures slop of 1 works with terms in order.
- query.setSlop(1);
- query.add(new Term("field", "one"));
- query.add(new Term("field", "two"));
- Hits hits = searcher.search(query);
- assertEquals("in order", 1, hits.length());
-
- // Ensures slop of 1 does not work for phrases out of order;
- // must be at least 2.
- query = new PhraseQuery();
- query.setSlop(1);
- query.add(new Term("field", "two"));
- query.add(new Term("field", "one"));
- hits = searcher.search(query);
- assertEquals("reversed, slop not 2 or more", 0, hits.length());
- }
-
- /**
- * As long as slop is at least 2, terms can be reversed
- */
- public void testOrderDoesntMatter() throws Exception {
- query.setSlop(2); // must be at least two for reverse order match
- query.add(new Term("field", "two"));
- query.add(new Term("field", "one"));
- Hits hits = searcher.search(query);
- assertEquals("just sloppy enough", 1, hits.length());
-
- query = new PhraseQuery();
- query.setSlop(2);
- query.add(new Term("field", "three"));
- query.add(new Term("field", "one"));
- hits = searcher.search(query);
- assertEquals("not sloppy enough", 0, hits.length());
- }
-
- /**
- * slop is the total number of positional moves allowed
- * to line up a phrase
- */
- public void testMulipleTerms() throws Exception {
- query.setSlop(2);
- query.add(new Term("field", "one"));
- query.add(new Term("field", "three"));
- query.add(new Term("field", "five"));
- Hits hits = searcher.search(query);
- assertEquals("two total moves", 1, hits.length());
-
- query = new PhraseQuery();
- query.setSlop(5); // it takes six moves to match this phrase
- query.add(new Term("field", "five"));
- query.add(new Term("field", "three"));
- query.add(new Term("field", "one"));
- hits = searcher.search(query);
- assertEquals("slop of 5 not close enough", 0, hits.length());
-
- query.setSlop(6);
- hits = searcher.search(query);
- assertEquals("slop of 6 just right", 1, hits.length());
- }
-
- public void testPhraseQueryWithStopAnalyzer() throws Exception {
- RAMDirectory directory = new RAMDirectory();
- StopAnalyzer stopAnalyzer = new StopAnalyzer();
- IndexWriter writer = new IndexWriter(directory, stopAnalyzer, true);
- Document doc = new Document();
- doc.add(Field.Text("field", "the stop words are here"));
- writer.addDocument(doc);
- writer.close();
-
- IndexSearcher searcher = new IndexSearcher(directory);
-
- // valid exact phrase query
- PhraseQuery query = new PhraseQuery();
- query.add(new Term("field","stop"));
- query.add(new Term("field","words"));
- Hits hits = searcher.search(query);
- assertEquals(1, hits.length());
-
- // currently StopAnalyzer does not leave "holes", so this matches.
- query = new PhraseQuery();
- query.add(new Term("field", "words"));
- query.add(new Term("field", "here"));
- hits = searcher.search(query);
- assertEquals(1, hits.length());
-
- searcher.close();
- }
-
- public void testPhraseQueryInConjunctionScorer() throws Exception {
- RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
-
- Document doc = new Document();
- doc.add(new Field("source", "marketing info", true, true, true));
- writer.addDocument(doc);
-
- doc = new Document();
- doc.add(new Field("contents", "foobar", true, true, true));
- doc.add(new Field("source", "marketing info", true, true, true));
- writer.addDocument(doc);
-
- writer.optimize();
- writer.close();
-
- IndexSearcher searcher = new IndexSearcher(directory);
-
- PhraseQuery phraseQuery = new PhraseQuery();
- phraseQuery.add(new Term("source", "marketing"));
- phraseQuery.add(new Term("source", "info"));
- Hits hits = searcher.search(phraseQuery);
- assertEquals(2, hits.length());
-
- TermQuery termQuery = new TermQuery(new Term("contents","foobar"));
- BooleanQuery booleanQuery = new BooleanQuery();
- booleanQuery.add(termQuery, true, false);
- booleanQuery.add(phraseQuery, true, false);
- hits = searcher.search(booleanQuery);
- assertEquals(1, hits.length());
-
- searcher.close();
-
- writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
- doc = new Document();
- doc.add(new Field("contents", "map entry woo", true, true, true));
- writer.addDocument(doc);
-
- doc = new Document();
- doc.add(new Field("contents", "woo map entry", true, true, true));
- writer.addDocument(doc);
-
- doc = new Document();
- doc.add(new Field("contents", "map foobarword entry woo", true, true, true));
- writer.addDocument(doc);
-
- writer.optimize();
- writer.close();
-
- searcher = new IndexSearcher(directory);
-
- termQuery = new TermQuery(new Term("contents","woo"));
- phraseQuery = new PhraseQuery();
- phraseQuery.add(new Term("contents","map"));
- phraseQuery.add(new Term("contents","entry"));
-
- hits = searcher.search(termQuery);
- assertEquals(3, hits.length());
- hits = searcher.search(phraseQuery);
- assertEquals(2, hits.length());
-
- booleanQuery = new BooleanQuery();
- booleanQuery.add(termQuery, true, false);
- booleanQuery.add(phraseQuery, true, false);
- hits = searcher.search(booleanQuery);
- assertEquals(2, hits.length());
-
- booleanQuery = new BooleanQuery();
- booleanQuery.add(phraseQuery, true, false);
- booleanQuery.add(termQuery, true, false);
- hits = searcher.search(booleanQuery);
- assertEquals(2, hits.length());
-
- searcher.close();
- directory.close();
- }
-}
Index: src/test-deprecated/org/apache/lucene/search/TestPositionIncrement.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestPositionIncrement.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestPositionIncrement.java (working copy)
@@ -1,135 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.search.PhraseQuery;
-import org.apache.lucene.search.Hits;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.Token;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-
-import java.io.Reader;
-import java.io.IOException;
-import java.io.StringReader;
-
-import junit.framework.TestCase;
-
-/**
- * Term position unit test.
- *
- * @author Doug Cutting
- * @version $Revision$
- */
-public class TestPositionIncrement extends TestCase {
-
- public void testSetPosition() throws Exception {
- Analyzer analyzer = new Analyzer() {
- public TokenStream tokenStream(String fieldName, Reader reader) {
- return new TokenStream() {
- private final String[] TOKENS = {"1", "2", "3", "4", "5"};
- private final int[] INCREMENTS = {1, 2, 1, 0, 1};
- private int i = 0;
-
- public Token next() throws IOException {
- if (i == TOKENS.length)
- return null;
- Token t = new Token(TOKENS[i], i, i);
- t.setPositionIncrement(INCREMENTS[i]);
- i++;
- return t;
- }
- };
- }
- };
- RAMDirectory store = new RAMDirectory();
- IndexWriter writer = new IndexWriter(store, analyzer, true);
- Document d = new Document();
- d.add(Field.Text("field", "bogus"));
- writer.addDocument(d);
- writer.optimize();
- writer.close();
-
- IndexSearcher searcher = new IndexSearcher(store);
- PhraseQuery q;
- Hits hits;
-
- q = new PhraseQuery();
- q.add(new Term("field", "1"));
- q.add(new Term("field", "2"));
- hits = searcher.search(q);
- assertEquals(0, hits.length());
-
- q = new PhraseQuery();
- q.add(new Term("field", "2"));
- q.add(new Term("field", "3"));
- hits = searcher.search(q);
- assertEquals(1, hits.length());
-
- q = new PhraseQuery();
- q.add(new Term("field", "3"));
- q.add(new Term("field", "4"));
- hits = searcher.search(q);
- assertEquals(0, hits.length());
-
- q = new PhraseQuery();
- q.add(new Term("field", "2"));
- q.add(new Term("field", "4"));
- hits = searcher.search(q);
- assertEquals(1, hits.length());
-
- q = new PhraseQuery();
- q.add(new Term("field", "3"));
- q.add(new Term("field", "5"));
- hits = searcher.search(q);
- assertEquals(1, hits.length());
-
- q = new PhraseQuery();
- q.add(new Term("field", "4"));
- q.add(new Term("field", "5"));
- hits = searcher.search(q);
- assertEquals(1, hits.length());
-
- q = new PhraseQuery();
- q.add(new Term("field", "2"));
- q.add(new Term("field", "5"));
- hits = searcher.search(q);
- assertEquals(0, hits.length());
- }
-
- /**
- * Basic analyzer behavior should be to keep sequential terms in one
- * increment from one another.
- */
- public void testIncrementingPositions() throws Exception {
- Analyzer analyzer = new WhitespaceAnalyzer();
- TokenStream ts = analyzer.tokenStream("field",
- new StringReader("one two three four five"));
-
- while (true) {
- Token token = ts.next();
- if (token == null) break;
- assertEquals(token.termText(), 1, token.getPositionIncrement());
- }
- }
-}
Index: src/test-deprecated/org/apache/lucene/search/TestDocBoost.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestDocBoost.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestDocBoost.java (working copy)
@@ -1,83 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import junit.framework.TestCase;
-
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.analysis.SimpleAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-
-/** Document boost unit test.
- *
- * @author Doug Cutting
- * @version $Revision$
- */
-public class TestDocBoost extends TestCase {
- public TestDocBoost(String name) {
- super(name);
- }
-
- public void testDocBoost() throws Exception {
- RAMDirectory store = new RAMDirectory();
- IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(), true);
-
- Field f1 = Field.Text("field", "word");
- Field f2 = Field.Text("field", "word");
- f2.setBoost(2.0f);
-
- Document d1 = new Document();
- Document d2 = new Document();
- Document d3 = new Document();
- Document d4 = new Document();
- d3.setBoost(3.0f);
- d4.setBoost(2.0f);
-
- d1.add(f1); // boost = 1
- d2.add(f2); // boost = 2
- d3.add(f1); // boost = 3
- d4.add(f2); // boost = 4
-
- writer.addDocument(d1);
- writer.addDocument(d2);
- writer.addDocument(d3);
- writer.addDocument(d4);
- writer.optimize();
- writer.close();
-
- final float[] scores = new float[4];
-
- new IndexSearcher(store).search
- (new TermQuery(new Term("field", "word")),
- new HitCollector() {
- public final void collect(int doc, float score) {
- scores[doc] = score;
- }
- });
-
- float lastScore = 0.0f;
-
- for (int i = 0; i < 4; i++) {
- assertTrue(scores[i] > lastScore);
- lastScore = scores[i];
- }
- }
-}
Index: src/test-deprecated/org/apache/lucene/search/TestFuzzyQuery.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestFuzzyQuery.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestFuzzyQuery.java (working copy)
@@ -1,137 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-
-import junit.framework.TestCase;
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.store.RAMDirectory;
-
-/**
- * Tests {@link FuzzyQuery}.
- *
- * @author Daniel Naber
- */
-public class TestFuzzyQuery extends TestCase {
-
- public void testDefaultFuzziness() throws Exception {
- RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
- addDoc("aaaaa", writer);
- addDoc("aaaab", writer);
- addDoc("aaabb", writer);
- addDoc("aabbb", writer);
- addDoc("abbbb", writer);
- addDoc("bbbbb", writer);
- addDoc("ddddd", writer);
- writer.optimize();
- writer.close();
- IndexSearcher searcher = new IndexSearcher(directory);
-
- FuzzyQuery query = new FuzzyQuery(new Term("field", "aaaaa"));
- Hits hits = searcher.search(query);
- assertEquals(3, hits.length());
-
- // not similar enough:
- query = new FuzzyQuery(new Term("field", "xxxxx"));
- hits = searcher.search(query);
- assertEquals(0, hits.length());
- query = new FuzzyQuery(new Term("field", "aaccc")); // edit distance to "aaaaa" = 3
- hits = searcher.search(query);
- assertEquals(0, hits.length());
-
- // query identical to a word in the index:
- query = new FuzzyQuery(new Term("field", "aaaaa"));
- hits = searcher.search(query);
- assertEquals(3, hits.length());
- assertEquals(hits.doc(0).get("field"), ("aaaaa"));
- // default allows for up to two edits:
- assertEquals(hits.doc(1).get("field"), ("aaaab"));
- assertEquals(hits.doc(2).get("field"), ("aaabb"));
-
- // query similar to a word in the index:
- query = new FuzzyQuery(new Term("field", "aaaac"));
- hits = searcher.search(query);
- assertEquals(3, hits.length());
- assertEquals(hits.doc(0).get("field"), ("aaaaa"));
- assertEquals(hits.doc(1).get("field"), ("aaaab"));
- assertEquals(hits.doc(2).get("field"), ("aaabb"));
-
- query = new FuzzyQuery(new Term("field", "ddddX"));
- hits = searcher.search(query);
- assertEquals(1, hits.length());
- assertEquals(hits.doc(0).get("field"), ("ddddd"));
-
- // different field = no match:
- query = new FuzzyQuery(new Term("anotherfield", "ddddX"));
- hits = searcher.search(query);
- assertEquals(0, hits.length());
-
- searcher.close();
- directory.close();
- }
-
- public void testDefaultFuzzinessLong() throws Exception {
- RAMDirectory directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
- addDoc("aaaaaaa", writer);
- addDoc("segment", writer);
- writer.optimize();
- writer.close();
- IndexSearcher searcher = new IndexSearcher(directory);
-
- FuzzyQuery query;
- // not similar enough:
- query = new FuzzyQuery(new Term("field", "xxxxx"));
- Hits hits = searcher.search(query);
- assertEquals(0, hits.length());
- // edit distance to "aaaaaaa" = 3, this matches because the string is longer than
- // in testDefaultFuzziness so a bigger difference is allowed:
- query = new FuzzyQuery(new Term("field", "aaaaccc"));
- hits = searcher.search(query);
- assertEquals(1, hits.length());
- assertEquals(hits.doc(0).get("field"), ("aaaaaaa"));
-
- // no match, more than half of the characters is wrong:
- query = new FuzzyQuery(new Term("field", "aaacccc"));
- hits = searcher.search(query);
- assertEquals(0, hits.length());
-
- // "student" and "stellent" are indeed similar to "segment" by default:
- query = new FuzzyQuery(new Term("field", "student"));
- hits = searcher.search(query);
- assertEquals(1, hits.length());
- query = new FuzzyQuery(new Term("field", "stellent"));
- hits = searcher.search(query);
- assertEquals(1, hits.length());
-
- searcher.close();
- directory.close();
- }
-
- private void addDoc(String text, IndexWriter writer) throws IOException {
- Document doc = new Document();
- doc.add(Field.Text("field", text));
- writer.addDocument(doc);
- }
-
-}
Index: src/test-deprecated/org/apache/lucene/search/TestTermVectors.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestTermVectors.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestTermVectors.java (working copy)
@@ -1,223 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import junit.framework.TestCase;
-import org.apache.lucene.analysis.SimpleAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.*;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.util.English;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-public class TestTermVectors extends TestCase {
- private IndexSearcher searcher;
- private RAMDirectory directory = new RAMDirectory();
- public TestTermVectors(String s) {
- super(s);
- }
-
- public void setUp() throws Exception {
- IndexWriter writer
- = new IndexWriter(directory, new SimpleAnalyzer(), true);
- //writer.setUseCompoundFile(true);
- //writer.infoStream = System.out;
- StringBuffer buffer = new StringBuffer();
- for (int i = 0; i < 1000; i++) {
- Document doc = new Document();
- doc.add(Field.Text("field", English.intToEnglish(i), true));
- writer.addDocument(doc);
- }
- writer.close();
- searcher = new IndexSearcher(directory);
- }
-
- protected void tearDown() {
-
- }
-
- public void test() {
- assertTrue(searcher != null);
- }
-
- public void testTermVectors() {
- Query query = new TermQuery(new Term("field", "seventy"));
- try {
- Hits hits = searcher.search(query);
- assertEquals(100, hits.length());
-
- for (int i = 0; i < hits.length(); i++)
- {
- TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits.id(i));
- assertTrue(vector != null);
- assertTrue(vector.length == 1);
- //assertTrue();
- }
- TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits.id(50));
- //System.out.println("Explain: " + searcher.explain(query, hits.id(50)));
- //System.out.println("Vector: " + vector[0].toString());
- } catch (IOException e) {
- assertTrue(false);
- }
- }
-
- public void testTermPositionVectors() {
- Query query = new TermQuery(new Term("field", "fifty"));
- try {
- Hits hits = searcher.search(query);
- assertEquals(100, hits.length());
-
- for (int i = 0; i < hits.length(); i++)
- {
- TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits.id(i));
- assertTrue(vector != null);
- assertTrue(vector.length == 1);
- //assertTrue();
- }
- } catch (IOException e) {
- assertTrue(false);
- }
- }
-
- public void testKnownSetOfDocuments() {
- String [] termArray = {"eating", "chocolate", "in", "a", "computer", "lab", "grows", "old", "colored",
- "with", "an"};
- String test1 = "eating chocolate in a computer lab"; //6 terms
- String test2 = "computer in a computer lab"; //5 terms
- String test3 = "a chocolate lab grows old"; //5 terms
- String test4 = "eating chocolate with a chocolate lab in an old chocolate colored computer lab"; //13 terms
- Map test4Map = new HashMap();
- test4Map.put("chocolate", new Integer(3));
- test4Map.put("lab", new Integer(2));
- test4Map.put("eating", new Integer(1));
- test4Map.put("computer", new Integer(1));
- test4Map.put("with", new Integer(1));
- test4Map.put("a", new Integer(1));
- test4Map.put("colored", new Integer(1));
- test4Map.put("in", new Integer(1));
- test4Map.put("an", new Integer(1));
- test4Map.put("computer", new Integer(1));
- test4Map.put("old", new Integer(1));
-
- Document testDoc1 = new Document();
- setupDoc(testDoc1, test1);
- Document testDoc2 = new Document();
- setupDoc(testDoc2, test2);
- Document testDoc3 = new Document();
- setupDoc(testDoc3, test3);
- Document testDoc4 = new Document();
- setupDoc(testDoc4, test4);
-
- Directory dir = new RAMDirectory();
-
- try {
- IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), true);
- assertTrue(writer != null);
- writer.addDocument(testDoc1);
- writer.addDocument(testDoc2);
- writer.addDocument(testDoc3);
- writer.addDocument(testDoc4);
- writer.close();
- IndexSearcher knownSearcher = new IndexSearcher(dir);
- TermEnum termEnum = knownSearcher.reader.terms();
- TermDocs termDocs = knownSearcher.reader.termDocs();
- //System.out.println("Terms: " + termEnum.size() + " Orig Len: " + termArray.length);
-
- Similarity sim = knownSearcher.getSimilarity();
- while (termEnum.next() == true)
- {
- Term term = termEnum.term();
- //System.out.println("Term: " + term);
- termDocs.seek(term);
- while (termDocs.next())
- {
- int docId = termDocs.doc();
- int freq = termDocs.freq();
- //System.out.println("Doc Id: " + docId + " freq " + freq);
- TermFreqVector vector = knownSearcher.reader.getTermFreqVector(docId, "field");
- float tf = sim.tf(freq);
- float idf = sim.idf(term, knownSearcher);
- //float qNorm = sim.queryNorm()
- //This is fine since we don't have stop words
- float lNorm = sim.lengthNorm("field", vector.getTerms().length);
- //float coord = sim.coord()
- //System.out.println("TF: " + tf + " IDF: " + idf + " LenNorm: " + lNorm);
- assertTrue(vector != null);
- String[] vTerms = vector.getTerms();
- int [] freqs = vector.getTermFrequencies();
- for (int i = 0; i < vTerms.length; i++)
- {
- if (term.text().equals(vTerms[i]) == true)
- {
- assertTrue(freqs[i] == freq);
- }
- }
-
- }
- //System.out.println("--------");
- }
- Query query = new TermQuery(new Term("field", "chocolate"));
- Hits hits = knownSearcher.search(query);
- //doc 3 should be the first hit b/c it is the shortest match
- assertTrue(hits.length() == 3);
- float score = hits.score(0);
- /*System.out.println("Hit 0: " + hits.id(0) + " Score: " + hits.score(0) + " String: " + hits.doc(0).toString());
- System.out.println("Explain: " + knownSearcher.explain(query, hits.id(0)));
- System.out.println("Hit 1: " + hits.id(1) + " Score: " + hits.score(1) + " String: " + hits.doc(1).toString());
- System.out.println("Explain: " + knownSearcher.explain(query, hits.id(1)));
- System.out.println("Hit 2: " + hits.id(2) + " Score: " + hits.score(2) + " String: " + hits.doc(2).toString());
- System.out.println("Explain: " + knownSearcher.explain(query, hits.id(2)));*/
- assertTrue(testDoc3.toString().equals(hits.doc(0).toString()));
- assertTrue(testDoc4.toString().equals(hits.doc(1).toString()));
- assertTrue(testDoc1.toString().equals(hits.doc(2).toString()));
- TermFreqVector vector = knownSearcher.reader.getTermFreqVector(hits.id(1), "field");
- assertTrue(vector != null);
- //System.out.println("Vector: " + vector);
- String[] terms = vector.getTerms();
- int [] freqs = vector.getTermFrequencies();
- assertTrue(terms != null && terms.length == 10);
- for (int i = 0; i < terms.length; i++) {
- String term = terms[i];
- //System.out.println("Term: " + term);
- int freq = freqs[i];
- assertTrue(test4.indexOf(term) != -1);
- Integer freqInt = (Integer)test4Map.get(term);
- assertTrue(freqInt != null);
- assertTrue(freqInt.intValue() == freq);
- }
- knownSearcher.close();
- } catch (IOException e) {
- e.printStackTrace();
- assertTrue(false);
- }
-
-
- }
-
- private void setupDoc(Document doc, String text)
- {
- doc.add(Field.Text("field", text, true));
- //System.out.println("Document: " + doc);
- }
-
-
-}
Index: src/test-deprecated/org/apache/lucene/search/TestQueryTermVector.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestQueryTermVector.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestQueryTermVector.java (working copy)
@@ -1,66 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import junit.framework.TestCase;
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
-
-public class TestQueryTermVector extends TestCase {
-
-
- public TestQueryTermVector(String s) {
- super(s);
- }
-
- protected void setUp() {
- }
-
- protected void tearDown() {
-
- }
-
- public void testConstructor() {
- String [] queryTerm = {"foo", "bar", "foo", "again", "foo", "bar", "go", "go", "go"};
- //Items are sorted lexicographically
- String [] gold = {"again", "bar", "foo", "go"};
- int [] goldFreqs = {1, 2, 3, 3};
- QueryTermVector result = new QueryTermVector(queryTerm);
- assertTrue(result != null);
- String [] terms = result.getTerms();
- assertTrue(terms.length == 4);
- int [] freq = result.getTermFrequencies();
- assertTrue(freq.length == 4);
- checkGold(terms, gold, freq, goldFreqs);
- result = new QueryTermVector(null);
- assertTrue(result.getTerms().length == 0);
-
- result = new QueryTermVector("foo bar foo again foo bar go go go", new WhitespaceAnalyzer());
- assertTrue(result != null);
- terms = result.getTerms();
- assertTrue(terms.length == 4);
- freq = result.getTermFrequencies();
- assertTrue(freq.length == 4);
- checkGold(terms, gold, freq, goldFreqs);
- }
-
- private void checkGold(String[] terms, String[] gold, int[] freq, int[] goldFreqs) {
- for (int i = 0; i < terms.length; i++) {
- assertTrue(terms[i].equals(gold[i]));
- assertTrue(freq[i] == goldFreqs[i]);
- }
- }
-}
Index: src/test-deprecated/org/apache/lucene/search/TestRangeQuery.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestRangeQuery.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestRangeQuery.java (working copy)
@@ -1,96 +0,0 @@
-package org.apache.lucene.search;
-
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.store.RAMDirectory;
-
-import junit.framework.TestCase;
-import java.io.IOException;
-
-/**
- * @author goller
- */
-public class TestRangeQuery extends TestCase {
-
- private int docCount = 0;
- private RAMDirectory dir;
-
- public void setUp() {
- dir = new RAMDirectory();
- }
-
- public void testExclusive() throws Exception {
- Query query = new RangeQuery(new Term("content", "A"),
- new Term("content", "C"),
- false);
- initializeIndex(new String[] {"A", "B", "C", "D"});
- IndexSearcher searcher = new IndexSearcher(dir);
- Hits hits = searcher.search(query);
- assertEquals("A,B,C,D, only B in range", 1, hits.length());
- searcher.close();
-
- initializeIndex(new String[] {"A", "B", "D"});
- searcher = new IndexSearcher(dir);
- hits = searcher.search(query);
- assertEquals("A,B,D, only B in range", 1, hits.length());
- searcher.close();
-
- addDoc("C");
- searcher = new IndexSearcher(dir);
- hits = searcher.search(query);
- assertEquals("C added, still only B in range", 1, hits.length());
- searcher.close();
- }
-
- public void testInclusive() throws Exception {
- Query query = new RangeQuery(new Term("content", "A"),
- new Term("content", "C"),
- true);
-
- initializeIndex(new String[]{"A", "B", "C", "D"});
- IndexSearcher searcher = new IndexSearcher(dir);
- Hits hits = searcher.search(query);
- assertEquals("A,B,C,D - A,B,C in range", 3, hits.length());
- searcher.close();
-
- initializeIndex(new String[]{"A", "B", "D"});
- searcher = new IndexSearcher(dir);
- hits = searcher.search(query);
- assertEquals("A,B,D - A and B in range", 2, hits.length());
- searcher.close();
-
- addDoc("C");
- searcher = new IndexSearcher(dir);
- hits = searcher.search(query);
- assertEquals("C added - A, B, C in range", 3, hits.length());
- searcher.close();
- }
-
- private void initializeIndex(String[] values) throws IOException {
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
- for (int i = 0; i < values.length; i++) {
- insertDoc(writer, values[i]);
- }
- writer.close();
- }
-
- private void addDoc(String content) throws IOException {
- IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
- insertDoc(writer, content);
- writer.close();
- }
-
- private void insertDoc(IndexWriter writer, String content) throws IOException {
- Document doc = new Document();
-
- doc.add(Field.Keyword("id", "id" + docCount));
- doc.add(Field.UnStored("content", content));
-
- writer.addDocument(doc);
- docCount++;
- }
-}
-
Index: src/test-deprecated/org/apache/lucene/search/TestDateFilter.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestDateFilter.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestDateFilter.java (working copy)
@@ -1,163 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Hits;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.analysis.SimpleAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.DateField;
-
-import java.io.IOException;
-
-import junit.framework.TestCase;
-
- /**
- * DateFilter JUnit tests.
- *
- * @author Otis Gospodnetic
- * @version $Revision$
- */
-public class TestDateFilter
- extends TestCase
-{
- public TestDateFilter(String name)
- {
- super(name);
- }
-
- /**
- *
- */
- public static void testBefore()
- throws IOException
- {
- // create an index
- RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true);
-
- long now = System.currentTimeMillis();
-
- Document doc = new Document();
- // add time that is in the past
- doc.add(Field.Keyword("datefield", DateField.timeToString(now - 1000)));
- doc.add(Field.Text("body", "Today is a very sunny day in New York City"));
- writer.addDocument(doc);
- writer.optimize();
- writer.close();
-
- IndexSearcher searcher = new IndexSearcher(indexStore);
-
- // filter that should preserve matches
- DateFilter df1 = DateFilter.Before("datefield", now);
-
- // filter that should discard matches
- DateFilter df2 = DateFilter.Before("datefield", now - 999999);
-
- // search something that doesn't exist with DateFilter
- Query query1 = new TermQuery(new Term("body", "NoMatchForThis"));
-
- // search for something that does exists
- Query query2 = new TermQuery(new Term("body", "sunny"));
-
- Hits result;
-
- // ensure that queries return expected results without DateFilter first
- result = searcher.search(query1);
- assertEquals(0, result.length());
-
- result = searcher.search(query2);
- assertEquals(1, result.length());
-
-
- // run queries with DateFilter
- result = searcher.search(query1, df1);
- assertEquals(0, result.length());
-
- result = searcher.search(query1, df2);
- assertEquals(0, result.length());
-
- result = searcher.search(query2, df1);
- assertEquals(1, result.length());
-
- result = searcher.search(query2, df2);
- assertEquals(0, result.length());
- }
-
- /**
- *
- */
- public static void testAfter()
- throws IOException
- {
- // create an index
- RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true);
-
- long now = System.currentTimeMillis();
-
- Document doc = new Document();
- // add time that is in the future
- doc.add(Field.Keyword("datefield", DateField.timeToString(now + 888888)));
- doc.add(Field.Text("body", "Today is a very sunny day in New York City"));
- writer.addDocument(doc);
- writer.optimize();
- writer.close();
-
- IndexSearcher searcher = new IndexSearcher(indexStore);
-
- // filter that should preserve matches
- DateFilter df1 = DateFilter.After("datefield", now);
-
- // filter that should discard matches
- DateFilter df2 = DateFilter.After("datefield", now + 999999);
-
- // search something that doesn't exist with DateFilter
- Query query1 = new TermQuery(new Term("body", "NoMatchForThis"));
-
- // search for something that does exists
- Query query2 = new TermQuery(new Term("body", "sunny"));
-
- Hits result;
-
- // ensure that queries return expected results without DateFilter first
- result = searcher.search(query1);
- assertEquals(0, result.length());
-
- result = searcher.search(query2);
- assertEquals(1, result.length());
-
-
- // run queries with DateFilter
- result = searcher.search(query1, df1);
- assertEquals(0, result.length());
-
- result = searcher.search(query1, df2);
- assertEquals(0, result.length());
-
- result = searcher.search(query2, df1);
- assertEquals(1, result.length());
-
- result = searcher.search(query2, df2);
- assertEquals(0, result.length());
- }
-}
Index: src/test-deprecated/org/apache/lucene/search/TestBooleanPrefixQuery.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestBooleanPrefixQuery.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestBooleanPrefixQuery.java (working copy)
@@ -1,104 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import junit.framework.TestCase;
-import junit.framework.Test;
-import junit.framework.TestSuite;
-import junit.textui.TestRunner;
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.search.PrefixQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.BooleanQuery;
-
-import java.io.IOException;
-
-/**
- * @author schnee
- * @version $Id$
- **/
-
-public class TestBooleanPrefixQuery extends TestCase {
-
- public static void main(String[] args) {
- TestRunner.run(suite());
- }
-
- public static Test suite() {
- return new TestSuite(TestBooleanPrefixQuery.class);
- }
-
- public TestBooleanPrefixQuery(String name) {
- super(name);
- }
-
- public void testMethod() {
- RAMDirectory directory = new RAMDirectory();
-
- String[] categories = new String[]{"food",
- "foodanddrink",
- "foodanddrinkandgoodtimes",
- "food and drink"};
-
- Query rw1 = null;
- Query rw2 = null;
- try {
- IndexWriter writer = new IndexWriter(directory, new
- WhitespaceAnalyzer(), true);
- for (int i = 0; i < categories.length; i++) {
- Document doc = new Document();
- doc.add(Field.Keyword("category", categories[i]));
- writer.addDocument(doc);
- }
- writer.close();
-
- IndexReader reader = IndexReader.open(directory);
- PrefixQuery query = new PrefixQuery(new Term("category", "foo"));
-
- rw1 = query.rewrite(reader);
-
- BooleanQuery bq = new BooleanQuery();
- bq.add(query, true, false);
-
- rw2 = bq.rewrite(reader);
- } catch (IOException e) {
- fail(e.getMessage());
- }
-
- BooleanQuery bq1 = null;
- if (rw1 instanceof BooleanQuery) {
- bq1 = (BooleanQuery) rw1;
- }
-
- BooleanQuery bq2 = null;
- if (rw2 instanceof BooleanQuery) {
- bq2 = (BooleanQuery) rw2;
- } else {
- fail("Rewrite");
- }
-
- assertEquals("Number of Clauses Mismatch", bq1.getClauses().length,
- bq2.getClauses().length);
- }
-}
-
Index: src/test-deprecated/org/apache/lucene/search/TestPrefixQuery.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestPrefixQuery.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestPrefixQuery.java (working copy)
@@ -1,56 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import junit.framework.TestCase;
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-
-/**
- * Tests {@link PrefixQuery} class.
- *
- * @author Erik Hatcher
- */
-public class TestPrefixQuery extends TestCase {
- public void testPrefixQuery() throws Exception {
- RAMDirectory directory = new RAMDirectory();
-
- String[] categories = new String[] {"/Computers",
- "/Computers/Mac",
- "/Computers/Windows"};
- IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
- for (int i = 0; i < categories.length; i++) {
- Document doc = new Document();
- doc.add(Field.Keyword("category", categories[i]));
- writer.addDocument(doc);
- }
- writer.close();
-
- PrefixQuery query = new PrefixQuery(new Term("category", "/Computers"));
- IndexSearcher searcher = new IndexSearcher(directory);
- Hits hits = searcher.search(query);
- assertEquals("All documents in /Computers category and below", 3, hits.length());
-
- query = new PrefixQuery(new Term("category", "/Computers/Mac"));
- hits = searcher.search(query);
- assertEquals("One in /Computers/Mac", 1, hits.length());
- }
-}
Index: src/test-deprecated/org/apache/lucene/search/TestParallelMultiSearcher.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestParallelMultiSearcher.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestParallelMultiSearcher.java (working copy)
@@ -1,35 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-
-/**
- * Unit tests for the ParallelMultiSearcher
- */
-public class TestParallelMultiSearcher extends TestMultiSearcher {
-
- public TestParallelMultiSearcher(String name) {
- super(name);
- }
-
- protected MultiSearcher getMultiSearcherInstance(Searcher[] searchers)
- throws IOException {
- return new ParallelMultiSearcher(searchers);
- }
-
-}
Index: src/test-deprecated/org/apache/lucene/search/TestFilteredQuery.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestFilteredQuery.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestFilteredQuery.java (working copy)
@@ -1,132 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import junit.framework.TestCase;
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.store.RAMDirectory;
-import java.util.BitSet;
-import java.io.IOException;
-
-
-/**
- * FilteredQuery JUnit tests.
- *
- * Created: Apr 21, 2004 1:21:46 PM
- *
- * @author Tim Jones
- * @version $Id$
- * @since 1.4
- */
-public class TestFilteredQuery
-extends TestCase {
-
- private IndexSearcher searcher;
- private RAMDirectory directory;
- private Query query;
- private Filter filter;
-
- public void setUp()
- throws Exception {
- directory = new RAMDirectory();
- IndexWriter writer = new IndexWriter (directory, new WhitespaceAnalyzer(), true);
-
- Document doc = new Document();
- doc.add (Field.Text ("field", "one two three four five"));
- doc.add (Field.Text ("sorter", "b"));
- writer.addDocument (doc);
-
- doc = new Document();
- doc.add (Field.Text ("field", "one two three four"));
- doc.add (Field.Text ("sorter", "d"));
- writer.addDocument (doc);
-
- doc = new Document();
- doc.add (Field.Text ("field", "one two three y"));
- doc.add (Field.Text ("sorter", "a"));
- writer.addDocument (doc);
-
- doc = new Document();
- doc.add (Field.Text ("field", "one two x"));
- doc.add (Field.Text ("sorter", "c"));
- writer.addDocument (doc);
-
- writer.optimize ();
- writer.close ();
-
- searcher = new IndexSearcher (directory);
- query = new TermQuery (new Term ("field", "three"));
- filter = new Filter() {
- public BitSet bits (IndexReader reader) throws IOException {
- BitSet bitset = new BitSet(5);
- bitset.set (1);
- bitset.set (3);
- return bitset;
- }
- };
- }
-
- public void tearDown()
- throws Exception {
- searcher.close();
- directory.close();
- }
-
- public void testFilteredQuery()
- throws Exception {
- Query filteredquery = new FilteredQuery (query, filter);
- Hits hits = searcher.search (filteredquery);
- assertEquals (1, hits.length());
- assertEquals (1, hits.id(0));
-
- hits = searcher.search (filteredquery, new Sort("sorter"));
- assertEquals (1, hits.length());
- assertEquals (1, hits.id(0));
-
- filteredquery = new FilteredQuery (new TermQuery (new Term ("field", "one")), filter);
- hits = searcher.search (filteredquery);
- assertEquals (2, hits.length());
-
- filteredquery = new FilteredQuery (new TermQuery (new Term ("field", "x")), filter);
- hits = searcher.search (filteredquery);
- assertEquals (1, hits.length());
- assertEquals (3, hits.id(0));
-
- filteredquery = new FilteredQuery (new TermQuery (new Term ("field", "y")), filter);
- hits = searcher.search (filteredquery);
- assertEquals (0, hits.length());
- }
-
- /**
- * This tests FilteredQuery's rewrite correctness
- */
- public void testRangeQuery() throws Exception {
- RangeQuery rq = new RangeQuery(
- new Term("sorter", "b"), new Term("sorter", "d"), true);
-
- Query filteredquery = new FilteredQuery(rq, filter);
- Hits hits = searcher.search(filteredquery);
- assertEquals(2, hits.length());
- }
-
-}
-
Index: src/test-deprecated/org/apache/lucene/search/TestPhrasePrefixQuery.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestPhrasePrefixQuery.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestPhrasePrefixQuery.java (working copy)
@@ -1,104 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.TermEnum;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.analysis.SimpleAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-
-import junit.framework.TestCase;
-
-import java.io.IOException;
-import java.util.LinkedList;
-
-/**
- * This class tests PhrasePrefixQuery class.
- *
- * @author Otis Gospodnetic
- * @version $Id$
- */
-public class TestPhrasePrefixQuery
- extends TestCase
-{
- public TestPhrasePrefixQuery(String name)
- {
- super(name);
- }
-
- /**
- *
- */
- public void testPhrasePrefix()
- throws IOException
- {
- RAMDirectory indexStore = new RAMDirectory();
- IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true);
- Document doc1 = new Document();
- Document doc2 = new Document();
- Document doc3 = new Document();
- Document doc4 = new Document();
- Document doc5 = new Document();
- doc1.add(Field.Text("body", "blueberry pie"));
- doc2.add(Field.Text("body", "blueberry strudel"));
- doc3.add(Field.Text("body", "blueberry pizza"));
- doc4.add(Field.Text("body", "blueberry chewing gum"));
- doc5.add(Field.Text("body", "piccadilly circus"));
- writer.addDocument(doc1);
- writer.addDocument(doc2);
- writer.addDocument(doc3);
- writer.addDocument(doc4);
- writer.addDocument(doc5);
- writer.optimize();
- writer.close();
-
- IndexSearcher searcher = new IndexSearcher(indexStore);
-
- PhrasePrefixQuery query1 = new PhrasePrefixQuery();
- PhrasePrefixQuery query2 = new PhrasePrefixQuery();
- query1.add(new Term("body", "blueberry"));
- query2.add(new Term("body", "strawberry"));
-
- LinkedList termsWithPrefix = new LinkedList();
- IndexReader ir = IndexReader.open(indexStore);
-
- // this TermEnum gives "piccadilly", "pie" and "pizza".
- String prefix = "pi";
- TermEnum te = ir.terms(new Term("body", prefix + "*"));
- do {
- if (te.term().text().startsWith(prefix))
- {
- termsWithPrefix.add(te.term());
- }
- } while (te.next());
-
- query1.add((Term[])termsWithPrefix.toArray(new Term[0]));
- query2.add((Term[])termsWithPrefix.toArray(new Term[0]));
-
- Hits result;
- result = searcher.search(query1);
- assertEquals(2, result.length());
-
- result = searcher.search(query2);
- assertEquals(0, result.length());
- }
-}
Index: src/test-deprecated/org/apache/lucene/search/TestSimilarity.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestSimilarity.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestSimilarity.java (working copy)
@@ -1,121 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import junit.framework.TestCase;
-
-import java.util.Collection;
-
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.analysis.SimpleAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-
-/** Similarity unit test.
- *
- * @author Doug Cutting
- * @version $Revision$
- */
-public class TestSimilarity extends TestCase {
- public TestSimilarity(String name) {
- super(name);
- }
-
- public static class SimpleSimilarity extends Similarity {
- public float lengthNorm(String field, int numTerms) { return 1.0f; }
- public float queryNorm(float sumOfSquaredWeights) { return 1.0f; }
- public float tf(float freq) { return freq; }
- public float sloppyFreq(int distance) { return 2.0f; }
- public float idf(Collection terms, Searcher searcher) { return 1.0f; }
- public float idf(int docFreq, int numDocs) { return 1.0f; }
- public float coord(int overlap, int maxOverlap) { return 1.0f; }
- }
-
- public void testSimilarity() throws Exception {
- RAMDirectory store = new RAMDirectory();
- IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(), true);
- writer.setSimilarity(new SimpleSimilarity());
-
- Document d1 = new Document();
- d1.add(Field.Text("field", "a c"));
-
- Document d2 = new Document();
- d2.add(Field.Text("field", "a b c"));
-
- writer.addDocument(d1);
- writer.addDocument(d2);
- writer.optimize();
- writer.close();
-
- final float[] scores = new float[4];
-
- Searcher searcher = new IndexSearcher(store);
- searcher.setSimilarity(new SimpleSimilarity());
-
- Term a = new Term("field", "a");
- Term b = new Term("field", "b");
- Term c = new Term("field", "c");
-
- searcher.search
- (new TermQuery(b),
- new HitCollector() {
- public final void collect(int doc, float score) {
- assertTrue(score == 1.0f);
- }
- });
-
- BooleanQuery bq = new BooleanQuery();
- bq.add(new TermQuery(a), false, false);
- bq.add(new TermQuery(b), false, false);
- //System.out.println(bq.toString("field"));
- searcher.search
- (bq,
- new HitCollector() {
- public final void collect(int doc, float score) {
- //System.out.println("Doc=" + doc + " score=" + score);
- assertTrue(score == (float)doc+1);
- }
- });
-
- PhraseQuery pq = new PhraseQuery();
- pq.add(a);
- pq.add(c);
- //System.out.println(pq.toString("field"));
- searcher.search
- (pq,
- new HitCollector() {
- public final void collect(int doc, float score) {
- //System.out.println("Doc=" + doc + " score=" + score);
- assertTrue(score == 1.0f);
- }
- });
-
- pq.setSlop(2);
- //System.out.println(pq.toString("field"));
- searcher.search
- (pq,
- new HitCollector() {
- public final void collect(int doc, float score) {
- //System.out.println("Doc=" + doc + " score=" + score);
- assertTrue(score == 2.0f);
- }
- });
- }
-}
Index: src/test-deprecated/org/apache/lucene/search/TestSetNorm.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestSetNorm.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestSetNorm.java (working copy)
@@ -1,80 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import junit.framework.TestCase;
-
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.analysis.SimpleAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-
-/** Document boost unit test.
- *
- * @author Doug Cutting
- * @version $Revision$
- */
-public class TestSetNorm extends TestCase {
- public TestSetNorm(String name) {
- super(name);
- }
-
- public void testSetNorm() throws Exception {
- RAMDirectory store = new RAMDirectory();
- IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(), true);
-
- // add the same document four times
- Field f1 = Field.Text("field", "word");
- Document d1 = new Document();
- d1.add(f1);
- writer.addDocument(d1);
- writer.addDocument(d1);
- writer.addDocument(d1);
- writer.addDocument(d1);
- writer.close();
-
- // reset the boost of each instance of this document
- IndexReader reader = IndexReader.open(store);
- reader.setNorm(0, "field", 1.0f);
- reader.setNorm(1, "field", 2.0f);
- reader.setNorm(2, "field", 4.0f);
- reader.setNorm(3, "field", 16.0f);
- reader.close();
-
- // check that searches are ordered by this boost
- final float[] scores = new float[4];
-
- new IndexSearcher(store).search
- (new TermQuery(new Term("field", "word")),
- new HitCollector() {
- public final void collect(int doc, float score) {
- scores[doc] = score;
- }
- });
-
- float lastScore = 0.0f;
-
- for (int i = 0; i < 4; i++) {
- assertTrue(scores[i] > lastScore);
- lastScore = scores[i];
- }
- }
-}
Index: src/test-deprecated/org/apache/lucene/search/TestSort.java
===================================================================
--- src/test-deprecated/org/apache/lucene/search/TestSort.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/search/TestSort.java (working copy)
@@ -1,588 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.store.RAMDirectory;
-import org.apache.lucene.index.*;
-import org.apache.lucene.analysis.SimpleAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-
-import java.rmi.Naming;
-import java.rmi.registry.LocateRegistry;
-import java.rmi.registry.Registry;
-import java.io.IOException;
-import java.io.Serializable;
-import java.util.regex.Pattern;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.Locale;
-
-import junit.framework.TestCase;
-import junit.framework.Test;
-import junit.framework.TestSuite;
-import junit.textui.TestRunner;
-
-/**
- * Unit tests for sorting code.
- *
- * Created: Feb 17, 2004 4:55:10 PM
- *
- * @author Tim Jones (Nacimiento Software)
- * @since lucene 1.4
- * @version $Id$
- */
-
-public class TestSort
-extends TestCase
-implements Serializable {
-
- private Searcher full;
- private Searcher searchX;
- private Searcher searchY;
- private Query queryX;
- private Query queryY;
- private Query queryA;
- private Query queryF;
- private Sort sort;
-
-
- public TestSort (String name) {
- super (name);
- }
-
- public static void main (String[] argv) {
- if (argv == null || argv.length < 1)
- TestRunner.run (suite());
- else if ("server".equals (argv[0])) {
- TestSort test = new TestSort (null);
- try {
- test.startServer();
- Thread.sleep (500000);
- } catch (Exception e) {
- System.out.println (e);
- e.printStackTrace();
- }
- }
- }
-
- public static Test suite() {
- return new TestSuite (TestSort.class);
- }
-
-
- // document data:
- // the tracer field is used to determine which document was hit
- // the contents field is used to search and sort by relevance
- // the int field to sort by int
- // the float field to sort by float
- // the string field to sort by string
- private String[][] data = new String[][] {
- // tracer contents int float string custom
- { "A", "x a", "5", "4f", "c", "A-3" },
- { "B", "y a", "5", "3.4028235E38", "i", "B-10" },
- { "C", "x a b c", "2147483647", "1.0", "j", "A-2" },
- { "D", "y a b c", "-1", "0.0f", "a", "C-0" },
- { "E", "x a b c d", "5", "2f", "h", "B-8" },
- { "F", "y a b c d", "2", "3.14159f", "g", "B-1" },
- { "G", "x a b c d", "3", "-1.0", "f", "C-100" },
- { "H", "y a b c d", "0", "1.4E-45", "e", "C-88" },
- { "I", "x a b c d e f", "-2147483648", "1.0e+0", "d", "A-10" },
- { "J", "y a b c d e f", "4", ".5", "b", "C-7" },
- { "Z", "f", null, null, null, null }
- };
-
- // create an index of all the documents, or just the x, or just the y documents
- private Searcher getIndex (boolean even, boolean odd)
- throws IOException {
- RAMDirectory indexStore = new RAMDirectory ();
- IndexWriter writer = new IndexWriter (indexStore, new SimpleAnalyzer(), true);
- for (int i=0; istoreTermVector,
- * which is set to false.
- *
- * @deprecated use {@link #Field(String, String, Field.Store, Field.Index)} instead
- */
- public Field(String name, String string,
- boolean store, boolean index, boolean token) {
- this(name, string, store, index, token, false);
- }
/**
@@ -454,34 +366,6 @@
setStoreTermVector(TermVector.NO);
}
- /**
- *
- * @param name The name of the field
- * @param string The string to process
- * @param store true if the field should store the string
- * @param index true if the field should be indexed
- * @param token true if the field should be tokenized
- * @param storeTermVector true if we should store the Term Vector info
- *
- * @deprecated use {@link #Field(String, String, Field.Store, Field.Index, Field.TermVector)} instead
- */
- public Field(String name, String string,
- boolean store, boolean index, boolean token, boolean storeTermVector) {
- if (name == null)
- throw new NullPointerException("name cannot be null");
- if (string == null)
- throw new NullPointerException("value cannot be null");
- if (!index && storeTermVector)
- throw new IllegalArgumentException("cannot store a term vector for fields that are not indexed");
-
- this.name = name.intern(); // field names are interned
- this.fieldsData = string;
- this.isStored = store;
- this.isIndexed = index;
- this.isTokenized = token;
- this.storeTermVector = storeTermVector;
- }
-
private void setStoreTermVector(TermVector termVector) {
if (termVector == TermVector.NO) {
this.storeTermVector = false;
Index: src/demo/org/apache/lucene/demo/html/HTMLParser.java
===================================================================
--- src/demo/org/apache/lucene/demo/html/HTMLParser.java (revision 386892)
+++ src/demo/org/apache/lucene/demo/html/HTMLParser.java (working copy)
@@ -40,12 +40,6 @@
}
}
- /**
- * @deprecated Use HTMLParser(FileInputStream) instead
- */
- public HTMLParser(File file) throws FileNotFoundException {
- this(new FileInputStream(file));
- }
public String getTitle() throws IOException, InterruptedException {
if (pipeIn == null)
Index: src/demo/org/apache/lucene/demo/html/SimpleCharStream.java
===================================================================
--- src/demo/org/apache/lucene/demo/html/SimpleCharStream.java (revision 386892)
+++ src/demo/org/apache/lucene/demo/html/SimpleCharStream.java (working copy)
@@ -193,24 +193,6 @@
return (c);
}
- /**
- * @deprecated
- * @see #getEndColumn
- */
-
- public int getColumn() {
- return bufcolumn[bufpos];
- }
-
- /**
- * @deprecated
- * @see #getEndLine
- */
-
- public int getLine() {
- return bufline[bufpos];
- }
-
public int getEndColumn() {
return bufcolumn[bufpos];
}
Index: src/demo/org/apache/lucene/demo/DeleteFiles.java
===================================================================
--- src/demo/org/apache/lucene/demo/DeleteFiles.java (revision 386892)
+++ src/demo/org/apache/lucene/demo/DeleteFiles.java (working copy)
@@ -40,7 +40,7 @@
IndexReader reader = IndexReader.open(directory);
Term term = new Term("path", args[0]);
- int deleted = reader.delete(term);
+ int deleted = reader.deleteDocuments(term);
System.out.println("deleted " + deleted +
" documents containing " + term);
Index: src/demo/org/apache/lucene/demo/SearchFiles.java
===================================================================
--- src/demo/org/apache/lucene/demo/SearchFiles.java (revision 386892)
+++ src/demo/org/apache/lucene/demo/SearchFiles.java (working copy)
@@ -16,23 +16,23 @@
* limitations under the License.
*/
-import java.io.BufferedReader;
-import java.io.InputStreamReader;
-import java.io.FileReader;
-import java.io.IOException;
-import java.util.Date;
-
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
+import org.apache.lucene.index.FilterIndexReader;
import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.FilterIndexReader;
-import org.apache.lucene.search.Searcher;
+import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Hits;
-import org.apache.lucene.queryParser.QueryParser;
+import org.apache.lucene.search.Searcher;
+import java.io.BufferedReader;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.util.Date;
+
/** Simple command-line based search demo. */
public class SearchFiles {
@@ -107,7 +107,7 @@
} else {
in = new BufferedReader(new InputStreamReader(System.in, "UTF-8"));
}
-
+ QueryParser parser = new QueryParser(field, analyzer);
while (true) {
if (queries == null) // prompt the user
System.out.print("Query: ");
@@ -117,7 +117,7 @@
if (line == null || line.length() == -1)
break;
- Query query = QueryParser.parse(line, field, analyzer);
+ Query query = parser.parse(line);
System.out.println("Searching for: " + query.toString(field));
Hits hits = searcher.search(query);
Index: src/demo/org/apache/lucene/demo/IndexHTML.java
===================================================================
--- src/demo/org/apache/lucene/demo/IndexHTML.java (revision 386892)
+++ src/demo/org/apache/lucene/demo/IndexHTML.java (working copy)
@@ -105,7 +105,7 @@
while (uidIter.term() != null && uidIter.term().field() == "uid") {
System.out.println("deleting " +
HTMLDocument.uid2url(uidIter.term().text()));
- reader.delete(uidIter.term());
+ reader.deleteDocuments(uidIter.term());
uidIter.next();
}
deleting = false;
@@ -137,7 +137,7 @@
if (deleting) { // delete stale docs
System.out.println("deleting " +
HTMLDocument.uid2url(uidIter.term().text()));
- reader.delete(uidIter.term());
+ reader.deleteDocuments(uidIter.term());
}
uidIter.next();
}
Index: src/test-deprecated/org/apache/lucene/queryParser/TestQueryParser.java
===================================================================
--- src/test-deprecated/org/apache/lucene/queryParser/TestQueryParser.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/queryParser/TestQueryParser.java (working copy)
@@ -1,495 +0,0 @@
-package org.apache.lucene.queryParser;
-
-/**
- * Copyright 2002-2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import junit.framework.TestCase;
-import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.analysis.LowerCaseTokenizer;
-import org.apache.lucene.analysis.SimpleAnalyzer;
-import org.apache.lucene.analysis.Token;
-import org.apache.lucene.analysis.TokenFilter;
-import org.apache.lucene.analysis.TokenStream;
-import org.apache.lucene.analysis.WhitespaceAnalyzer;
-import org.apache.lucene.analysis.standard.StandardAnalyzer;
-import org.apache.lucene.document.DateField;
-import org.apache.lucene.search.BooleanQuery;
-import org.apache.lucene.search.FuzzyQuery;
-import org.apache.lucene.search.PhraseQuery;
-import org.apache.lucene.search.PrefixQuery;
-import org.apache.lucene.search.Query;
-import org.apache.lucene.search.RangeQuery;
-import org.apache.lucene.search.TermQuery;
-import org.apache.lucene.search.WildcardQuery;
-import java.io.IOException;
-import java.io.Reader;
-import java.text.DateFormat;
-import java.util.Calendar;
-
-/**
- * Tests QueryParser.
- */
-public class TestQueryParser extends TestCase {
-
- public static Analyzer qpAnalyzer = new QPTestAnalyzer();
-
- public static class QPTestFilter extends TokenFilter {
- /**
- * Filter which discards the token 'stop' and which expands the
- * token 'phrase' into 'phrase1 phrase2'
- */
- public QPTestFilter(TokenStream in) {
- super(in);
- }
-
- boolean inPhrase = false;
- int savedStart = 0, savedEnd = 0;
-
- public Token next() throws IOException {
- if (inPhrase) {
- inPhrase = false;
- return new Token("phrase2", savedStart, savedEnd);
- } else
- for (Token token = input.next(); token != null; token = input.next()) {
- if (token.termText().equals("phrase")) {
- inPhrase = true;
- savedStart = token.startOffset();
- savedEnd = token.endOffset();
- return new Token("phrase1", savedStart, savedEnd);
- } else if (!token.termText().equals("stop"))
- return token;
- }
- return null;
- }
- }
-
- public static class QPTestAnalyzer extends Analyzer {
-
- /** Filters LowerCaseTokenizer with StopFilter. */
- public final TokenStream tokenStream(String fieldName, Reader reader) {
- return new QPTestFilter(new LowerCaseTokenizer(reader));
- }
- }
-
- public static class QPTestParser extends QueryParser {
- public QPTestParser(String f, Analyzer a) {
- super(f, a);
- }
-
- protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException {
- throw new ParseException("Fuzzy queries not allowed");
- }
-
- protected Query getWildcardQuery(String field, String termStr) throws ParseException {
- throw new ParseException("Wildcard queries not allowed");
- }
- }
-
- private int originalMaxClauses;
-
- public void setUp() {
- originalMaxClauses = BooleanQuery.getMaxClauseCount();
- }
-
- public QueryParser getParser(Analyzer a) throws Exception {
- if (a == null)
- a = new SimpleAnalyzer();
- QueryParser qp = new QueryParser("field", a);
- qp.setOperator(QueryParser.DEFAULT_OPERATOR_OR);
- return qp;
- }
-
- public Query getQuery(String query, Analyzer a) throws Exception {
- return getParser(a).parse(query);
- }
-
- public void assertQueryEquals(String query, Analyzer a, String result)
- throws Exception {
- Query q = getQuery(query, a);
- String s = q.toString("field");
- if (!s.equals(result)) {
- fail("Query /" + query + "/ yielded /" + s
- + "/, expecting /" + result + "/");
- }
- }
-
- public void assertWildcardQueryEquals(String query, boolean lowercase, String result)
- throws Exception {
- QueryParser qp = getParser(null);
- qp.setLowercaseWildcardTerms(lowercase);
- Query q = qp.parse(query);
- String s = q.toString("field");
- if (!s.equals(result)) {
- fail("WildcardQuery /" + query + "/ yielded /" + s
- + "/, expecting /" + result + "/");
- }
- }
-
- public Query getQueryDOA(String query, Analyzer a)
- throws Exception {
- if (a == null)
- a = new SimpleAnalyzer();
- QueryParser qp = new QueryParser("field", a);
- qp.setOperator(QueryParser.DEFAULT_OPERATOR_AND);
- return qp.parse(query);
- }
-
- public void assertQueryEqualsDOA(String query, Analyzer a, String result)
- throws Exception {
- Query q = getQueryDOA(query, a);
- String s = q.toString("field");
- if (!s.equals(result)) {
- fail("Query /" + query + "/ yielded /" + s
- + "/, expecting /" + result + "/");
- }
- }
-
- public void testSimple() throws Exception {
- assertQueryEquals("term term term", null, "term term term");
- assertQueryEquals("türm term term", null, "türm term term");
- assertQueryEquals("ümlaut", null, "ümlaut");
-
- assertQueryEquals("a AND b", null, "+a +b");
- assertQueryEquals("(a AND b)", null, "+a +b");
- assertQueryEquals("c OR (a AND b)", null, "c (+a +b)");
- assertQueryEquals("a AND NOT b", null, "+a -b");
- assertQueryEquals("a AND -b", null, "+a -b");
- assertQueryEquals("a AND !b", null, "+a -b");
- assertQueryEquals("a && b", null, "+a +b");
- assertQueryEquals("a && ! b", null, "+a -b");
-
- assertQueryEquals("a OR b", null, "a b");
- assertQueryEquals("a || b", null, "a b");
- assertQueryEquals("a OR !b", null, "a -b");
- assertQueryEquals("a OR ! b", null, "a -b");
- assertQueryEquals("a OR -b", null, "a -b");
-
- assertQueryEquals("+term -term term", null, "+term -term term");
- assertQueryEquals("foo:term AND field:anotherTerm", null,
- "+foo:term +anotherterm");
- assertQueryEquals("term AND \"phrase phrase\"", null,
- "+term +\"phrase phrase\"");
- assertQueryEquals("\"hello there\"", null, "\"hello there\"");
- assertTrue(getQuery("a AND b", null) instanceof BooleanQuery);
- assertTrue(getQuery("hello", null) instanceof TermQuery);
- assertTrue(getQuery("\"hello there\"", null) instanceof PhraseQuery);
-
- assertQueryEquals("germ term^2.0", null, "germ term^2.0");
- assertQueryEquals("(term)^2.0", null, "term^2.0");
- assertQueryEquals("(germ term)^2.0", null, "(germ term)^2.0");
- assertQueryEquals("term^2.0", null, "term^2.0");
- assertQueryEquals("term^2", null, "term^2.0");
- assertQueryEquals("\"germ term\"^2.0", null, "\"germ term\"^2.0");
- assertQueryEquals("\"term germ\"^2", null, "\"term germ\"^2.0");
-
- assertQueryEquals("(foo OR bar) AND (baz OR boo)", null,
- "+(foo bar) +(baz boo)");
- assertQueryEquals("((a OR b) AND NOT c) OR d", null,
- "(+(a b) -c) d");
- assertQueryEquals("+(apple \"steve jobs\") -(foo bar baz)", null,
- "+(apple \"steve jobs\") -(foo bar baz)");
- assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null,
- "+(title:dog title:cat) -author:\"bob dole\"");
- }
-
- public void testPunct() throws Exception {
- Analyzer a = new WhitespaceAnalyzer();
- assertQueryEquals("a&b", a, "a&b");
- assertQueryEquals("a&&b", a, "a&&b");
- assertQueryEquals(".NET", a, ".NET");
- }
-
- public void testSlop() throws Exception {
- assertQueryEquals("\"term germ\"~2", null, "\"term germ\"~2");
- assertQueryEquals("\"term germ\"~2 flork", null, "\"term germ\"~2 flork");
- assertQueryEquals("\"term\"~2", null, "term");
- assertQueryEquals("\" \"~2 germ", null, "germ");
- assertQueryEquals("\"term germ\"~2^2", null, "\"term germ\"~2^2.0");
- }
-
- public void testNumber() throws Exception {
-// The numbers go away because SimpleAnalzyer ignores them
- assertQueryEquals("3", null, "");
- assertQueryEquals("term 1.0 1 2", null, "term");
- assertQueryEquals("term term1 term2", null, "term term term");
-
- Analyzer a = new StandardAnalyzer();
- assertQueryEquals("3", a, "3");
- assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
- assertQueryEquals("term term1 term2", a, "term term1 term2");
- }
-
- public void testWildcard() throws Exception {
- assertQueryEquals("term*", null, "term*");
- assertQueryEquals("term*^2", null, "term*^2.0");
- assertQueryEquals("term~", null, "term~0.5");
- assertQueryEquals("term~0.7", null, "term~0.7");
- assertQueryEquals("term~^2", null, "term^2.0~0.5");
- assertQueryEquals("term^2~", null, "term^2.0~0.5");
- assertQueryEquals("term*germ", null, "term*germ");
- assertQueryEquals("term*germ^3", null, "term*germ^3.0");
-
- assertTrue(getQuery("term*", null) instanceof PrefixQuery);
- assertTrue(getQuery("term*^2", null) instanceof PrefixQuery);
- assertTrue(getQuery("term~", null) instanceof FuzzyQuery);
- assertTrue(getQuery("term~0.7", null) instanceof FuzzyQuery);
- FuzzyQuery fq = (FuzzyQuery)getQuery("term~0.7", null);
- assertEquals(0.7f, fq.getMinSimilarity(), 0.1f);
- assertEquals(0, fq.getPrefixLength());
- fq = (FuzzyQuery)getQuery("term~", null);
- assertEquals(0.5f, fq.getMinSimilarity(), 0.1f);
- assertEquals(0, fq.getPrefixLength());
- try {
- getQuery("term~1.1", null); // value > 1, throws exception
- fail();
- } catch(ParseException pe) {
- // expected exception
- }
- assertTrue(getQuery("term*germ", null) instanceof WildcardQuery);
-
-/* Tests to see that wild card terms are (or are not) properly
- * lower-cased with propery parser configuration
- */
-// First prefix queries:
- assertWildcardQueryEquals("term*", true, "term*");
- assertWildcardQueryEquals("Term*", true, "term*");
- assertWildcardQueryEquals("TERM*", true, "term*");
- assertWildcardQueryEquals("term*", false, "term*");
- assertWildcardQueryEquals("Term*", false, "Term*");
- assertWildcardQueryEquals("TERM*", false, "TERM*");
-// Then 'full' wildcard queries:
- assertWildcardQueryEquals("te?m", true, "te?m");
- assertWildcardQueryEquals("Te?m", true, "te?m");
- assertWildcardQueryEquals("TE?M", true, "te?m");
- assertWildcardQueryEquals("Te?m*gerM", true, "te?m*germ");
- assertWildcardQueryEquals("te?m", false, "te?m");
- assertWildcardQueryEquals("Te?m", false, "Te?m");
- assertWildcardQueryEquals("TE?M", false, "TE?M");
- assertWildcardQueryEquals("Te?m*gerM", false, "Te?m*gerM");
- }
-
- public void testQPA() throws Exception {
- assertQueryEquals("term term term", qpAnalyzer, "term term term");
- assertQueryEquals("term +stop term", qpAnalyzer, "term term");
- assertQueryEquals("term -stop term", qpAnalyzer, "term term");
- assertQueryEquals("drop AND stop AND roll", qpAnalyzer, "+drop +roll");
- assertQueryEquals("term phrase term", qpAnalyzer,
- "term \"phrase1 phrase2\" term");
- assertQueryEquals("term AND NOT phrase term", qpAnalyzer,
- "+term -\"phrase1 phrase2\" term");
- assertQueryEquals("stop", qpAnalyzer, "");
- assertTrue(getQuery("term term term", qpAnalyzer) instanceof BooleanQuery);
- assertTrue(getQuery("term +stop", qpAnalyzer) instanceof TermQuery);
- }
-
- public void testRange() throws Exception {
- assertQueryEquals("[ a TO z]", null, "[a TO z]");
- assertTrue(getQuery("[ a TO z]", null) instanceof RangeQuery);
- assertQueryEquals("[ a TO z ]", null, "[a TO z]");
- assertQueryEquals("{ a TO z}", null, "{a TO z}");
- assertQueryEquals("{ a TO z }", null, "{a TO z}");
- assertQueryEquals("{ a TO z }^2.0", null, "{a TO z}^2.0");
- assertQueryEquals("[ a TO z] OR bar", null, "[a TO z] bar");
- assertQueryEquals("[ a TO z] AND bar", null, "+[a TO z] +bar");
- assertQueryEquals("( bar blar { a TO z}) ", null, "bar blar {a TO z}");
- assertQueryEquals("gack ( bar blar { a TO z}) ", null, "gack (bar blar {a TO z})");
- }
-
- public String getDate(String s) throws Exception {
- DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT);
- return DateField.dateToString(df.parse(s));
- }
-
- public String getLocalizedDate(int year, int month, int day) {
- DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT);
- Calendar calendar = Calendar.getInstance();
- calendar.set(year, month, day);
- return df.format(calendar.getTime());
- }
-
- public void testDateRange() throws Exception {
- String startDate = getLocalizedDate(2002, 1, 1);
- String endDate = getLocalizedDate(2002, 1, 4);
- assertQueryEquals("[ " + startDate + " TO " + endDate + "]", null,
- "[" + getDate(startDate) + " TO " + getDate(endDate) + "]");
- assertQueryEquals("{ " + startDate + " " + endDate + " }", null,
- "{" + getDate(startDate) + " TO " + getDate(endDate) + "}");
- }
-
- public void testEscaped() throws Exception {
- Analyzer a = new WhitespaceAnalyzer();
-
- /*assertQueryEquals("\\[brackets", a, "\\[brackets");
- assertQueryEquals("\\[brackets", null, "brackets");
- assertQueryEquals("\\\\", a, "\\\\");
- assertQueryEquals("\\+blah", a, "\\+blah");
- assertQueryEquals("\\(blah", a, "\\(blah");
-
- assertQueryEquals("\\-blah", a, "\\-blah");
- assertQueryEquals("\\!blah", a, "\\!blah");
- assertQueryEquals("\\{blah", a, "\\{blah");
- assertQueryEquals("\\}blah", a, "\\}blah");
- assertQueryEquals("\\:blah", a, "\\:blah");
- assertQueryEquals("\\^blah", a, "\\^blah");
- assertQueryEquals("\\[blah", a, "\\[blah");
- assertQueryEquals("\\]blah", a, "\\]blah");
- assertQueryEquals("\\\"blah", a, "\\\"blah");
- assertQueryEquals("\\(blah", a, "\\(blah");
- assertQueryEquals("\\)blah", a, "\\)blah");
- assertQueryEquals("\\~blah", a, "\\~blah");
- assertQueryEquals("\\*blah", a, "\\*blah");
- assertQueryEquals("\\?blah", a, "\\?blah");
- //assertQueryEquals("foo \\&\\& bar", a, "foo \\&\\& bar");
- //assertQueryEquals("foo \\|| bar", a, "foo \\|| bar");
- //assertQueryEquals("foo \\AND bar", a, "foo \\AND bar");*/
-
- assertQueryEquals("a\\-b:c", a, "a-b:c");
- assertQueryEquals("a\\+b:c", a, "a+b:c");
- assertQueryEquals("a\\:b:c", a, "a:b:c");
- assertQueryEquals("a\\\\b:c", a, "a\\b:c");
-
- assertQueryEquals("a:b\\-c", a, "a:b-c");
- assertQueryEquals("a:b\\+c", a, "a:b+c");
- assertQueryEquals("a:b\\:c", a, "a:b:c");
- assertQueryEquals("a:b\\\\c", a, "a:b\\c");
-
- assertQueryEquals("a:b\\-c*", a, "a:b-c*");
- assertQueryEquals("a:b\\+c*", a, "a:b+c*");
- assertQueryEquals("a:b\\:c*", a, "a:b:c*");
-
- assertQueryEquals("a:b\\\\c*", a, "a:b\\c*");
-
- assertQueryEquals("a:b\\-?c", a, "a:b-?c");
- assertQueryEquals("a:b\\+?c", a, "a:b+?c");
- assertQueryEquals("a:b\\:?c", a, "a:b:?c");
-
- assertQueryEquals("a:b\\\\?c", a, "a:b\\?c");
-
- assertQueryEquals("a:b\\-c~", a, "a:b-c~0.5");
- assertQueryEquals("a:b\\+c~", a, "a:b+c~0.5");
- assertQueryEquals("a:b\\:c~", a, "a:b:c~0.5");
- assertQueryEquals("a:b\\\\c~", a, "a:b\\c~0.5");
-
- assertQueryEquals("[ a\\- TO a\\+ ]", null, "[a- TO a+]");
- assertQueryEquals("[ a\\: TO a\\~ ]", null, "[a: TO a~]");
- assertQueryEquals("[ a\\\\ TO a\\* ]", null, "[a\\ TO a*]");
- }
-
- public void testTabNewlineCarriageReturn()
- throws Exception {
- assertQueryEqualsDOA("+weltbank +worlbank", null,
- "+weltbank +worlbank");
-
- assertQueryEqualsDOA("+weltbank\n+worlbank", null,
- "+weltbank +worlbank");
- assertQueryEqualsDOA("weltbank \n+worlbank", null,
- "+weltbank +worlbank");
- assertQueryEqualsDOA("weltbank \n +worlbank", null,
- "+weltbank +worlbank");
-
- assertQueryEqualsDOA("+weltbank\r+worlbank", null,
- "+weltbank +worlbank");
- assertQueryEqualsDOA("weltbank \r+worlbank", null,
- "+weltbank +worlbank");
- assertQueryEqualsDOA("weltbank \r +worlbank", null,
- "+weltbank +worlbank");
-
- assertQueryEqualsDOA("+weltbank\r\n+worlbank", null,
- "+weltbank +worlbank");
- assertQueryEqualsDOA("weltbank \r\n+worlbank", null,
- "+weltbank +worlbank");
- assertQueryEqualsDOA("weltbank \r\n +worlbank", null,
- "+weltbank +worlbank");
- assertQueryEqualsDOA("weltbank \r \n +worlbank", null,
- "+weltbank +worlbank");
-
- assertQueryEqualsDOA("+weltbank\t+worlbank", null,
- "+weltbank +worlbank");
- assertQueryEqualsDOA("weltbank \t+worlbank", null,
- "+weltbank +worlbank");
- assertQueryEqualsDOA("weltbank \t +worlbank", null,
- "+weltbank +worlbank");
- }
-
- public void testSimpleDAO()
- throws Exception {
- assertQueryEqualsDOA("term term term", null, "+term +term +term");
- assertQueryEqualsDOA("term +term term", null, "+term +term +term");
- assertQueryEqualsDOA("term term +term", null, "+term +term +term");
- assertQueryEqualsDOA("term +term +term", null, "+term +term +term");
- assertQueryEqualsDOA("-term term term", null, "-term +term +term");
- }
-
- public void testBoost()
- throws Exception {
- StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(new String[]{"on"});
- QueryParser qp = new QueryParser("field", oneStopAnalyzer);
- Query q = qp.parse("on^1.0");
- assertNotNull(q);
- q = qp.parse("\"hello\"^2.0");
- assertNotNull(q);
- assertEquals(q.getBoost(), (float) 2.0, (float) 0.5);
- q = qp.parse("hello^2.0");
- assertNotNull(q);
- assertEquals(q.getBoost(), (float) 2.0, (float) 0.5);
- q = qp.parse("\"on\"^1.0");
- assertNotNull(q);
-
- q = QueryParser.parse("the^3", "field", new StandardAnalyzer());
- assertNotNull(q);
- }
-
- public void testException() throws Exception {
- try {
- assertQueryEquals("\"some phrase", null, "abc");
- fail("ParseException expected, not thrown");
- } catch (ParseException expected) {
- }
- }
-
- public void testCustomQueryParserWildcard() {
- try {
- new QPTestParser("contents", new WhitespaceAnalyzer()).parse("a?t");
- } catch (ParseException expected) {
- return;
- }
- fail("Wildcard queries should not be allowed");
- }
-
- public void testCustomQueryParserFuzzy() throws Exception {
- try {
- new QPTestParser("contents", new WhitespaceAnalyzer()).parse("xunit~");
- } catch (ParseException expected) {
- return;
- }
- fail("Fuzzy queries should not be allowed");
- }
-
- public void testBooleanQuery() throws Exception {
- BooleanQuery.setMaxClauseCount(2);
- try {
- QueryParser.parse("one two three", "field", new WhitespaceAnalyzer());
- fail("ParseException expected due to too many boolean clauses");
- } catch (ParseException expected) {
- // too many boolean clauses, so ParseException is expected
- }
- }
-
- public void tearDown() {
- BooleanQuery.setMaxClauseCount(originalMaxClauses);
- }
-
-}
Index: src/test-deprecated/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java
===================================================================
--- src/test-deprecated/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/analysis/TestPerFieldAnalzyerWrapper.java (working copy)
@@ -1,43 +0,0 @@
-package org.apache.lucene.analysis;
-
-import junit.framework.TestCase;
-import java.io.StringReader;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class TestPerFieldAnalzyerWrapper extends TestCase {
- public void testPerField() throws Exception {
- String text = "Qwerty";
- PerFieldAnalyzerWrapper analyzer =
- new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer());
- analyzer.addAnalyzer("special", new SimpleAnalyzer());
-
- TokenStream tokenStream = analyzer.tokenStream("field",
- new StringReader(text));
- Token token = tokenStream.next();
- assertEquals("WhitespaceAnalyzer does not lowercase",
- "Qwerty",
- token.termText());
-
- tokenStream = analyzer.tokenStream("special",
- new StringReader(text));
- token = tokenStream.next();
- assertEquals("SimpleAnalyzer lowercases",
- "qwerty",
- token.termText());
- }
-}
Index: src/test-deprecated/org/apache/lucene/analysis/TestAnalyzers.java
===================================================================
--- src/test-deprecated/org/apache/lucene/analysis/TestAnalyzers.java (revision 386892)
+++ src/test-deprecated/org/apache/lucene/analysis/TestAnalyzers.java (working copy)
@@ -1,92 +0,0 @@
-package org.apache.lucene.analysis;
-
-/**
- * Copyright 2004 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.*;
-import junit.framework.*;
-
-import org.apache.lucene.*;
-import org.apache.lucene.analysis.*;
-
-public class TestAnalyzers extends TestCase {
-
- public TestAnalyzers(String name) {
- super(name);
- }
-
- public void assertAnalyzesTo(Analyzer a,
- String input,
- String[] output) throws Exception {
- TokenStream ts = a.tokenStream("dummy", new StringReader(input));
- for (int i=0; iTestBitVector tests the BitVector, obviously.
- *
- * @author "Peter Mularien"