Index: src/test/org/apache/lucene/store/TestLockFactory.java
===================================================================
--- src/test/org/apache/lucene/store/TestLockFactory.java (revision 689935)
+++ src/test/org/apache/lucene/store/TestLockFactory.java (working copy)
@@ -19,8 +19,10 @@
import java.io.File;
import java.io.IOException;
-import java.util.Enumeration;
-import java.util.Hashtable;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
@@ -60,8 +62,8 @@
assertTrue("# calls to makeLock is 0 (after instantiating IndexWriter)",
lf.makeLockCount >= 1);
- for(Enumeration e = lf.locksCreated.keys(); e.hasMoreElements();) {
- String lockName = (String) e.nextElement();
+ for(Iterator e = lf.locksCreated.keySet().iterator(); e.hasNext();) {
+ String lockName = (String) e.next();
MockLockFactory.MockLock lock = (MockLockFactory.MockLock) lf.locksCreated.get(lockName);
assertTrue("# calls to Lock.obtain is 0 (after instantiating IndexWriter)",
lock.lockAttempts > 0);
@@ -522,7 +524,7 @@
public class MockLockFactory extends LockFactory {
public boolean lockPrefixSet;
- public Hashtable locksCreated = new Hashtable();
+ public Map locksCreated = Collections.synchronizedMap(new HashMap());
public int makeLockCount = 0;
public void setLockPrefix(String lockPrefix) {
Index: src/test/org/apache/lucene/index/TestDoc.java
===================================================================
--- src/test/org/apache/lucene/index/TestDoc.java (revision 689935)
+++ src/test/org/apache/lucene/index/TestDoc.java (working copy)
@@ -186,7 +186,7 @@
merger.closeReaders();
if (useCompoundFile) {
- Vector filesToDelete = merger.createCompoundFile(merged + ".cfs");
+ List filesToDelete = merger.createCompoundFile(merged + ".cfs");
for (Iterator iter = filesToDelete.iterator(); iter.hasNext();)
directory.deleteFile((String) iter.next());
}
Index: src/java/org/apache/lucene/queryParser/MultiFieldQueryParser.java
===================================================================
--- src/java/org/apache/lucene/queryParser/MultiFieldQueryParser.java (revision 689935)
+++ src/java/org/apache/lucene/queryParser/MultiFieldQueryParser.java (working copy)
@@ -17,6 +17,11 @@
* limitations under the License.
*/
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Vector;
+
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -24,9 +29,6 @@
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
-import java.util.Vector;
-import java.util.Map;
-
/**
* A QueryParser which constructs queries to search multiple fields.
*
@@ -97,7 +99,7 @@
protected Query getFieldQuery(String field, String queryText, int slop) throws ParseException {
if (field == null) {
- Vector clauses = new Vector();
+ List clauses = new ArrayList();
for (int i = 0; i < fields.length; i++) {
Query q = super.getFieldQuery(fields[i], queryText);
if (q != null) {
@@ -139,7 +141,7 @@
protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException
{
if (field == null) {
- Vector clauses = new Vector();
+ List clauses = new ArrayList();
for (int i = 0; i < fields.length; i++) {
clauses.add(new BooleanClause(getFuzzyQuery(fields[i], termStr, minSimilarity),
BooleanClause.Occur.SHOULD));
@@ -152,7 +154,7 @@
protected Query getPrefixQuery(String field, String termStr) throws ParseException
{
if (field == null) {
- Vector clauses = new Vector();
+ List clauses = new ArrayList();
for (int i = 0; i < fields.length; i++) {
clauses.add(new BooleanClause(getPrefixQuery(fields[i], termStr),
BooleanClause.Occur.SHOULD));
@@ -164,7 +166,7 @@
protected Query getWildcardQuery(String field, String termStr) throws ParseException {
if (field == null) {
- Vector clauses = new Vector();
+ List clauses = new ArrayList();
for (int i = 0; i < fields.length; i++) {
clauses.add(new BooleanClause(getWildcardQuery(fields[i], termStr),
BooleanClause.Occur.SHOULD));
@@ -177,7 +179,7 @@
protected Query getRangeQuery(String field, String part1, String part2, boolean inclusive) throws ParseException {
if (field == null) {
- Vector clauses = new Vector();
+ List clauses = new ArrayList();
for (int i = 0; i < fields.length; i++) {
clauses.add(new BooleanClause(getRangeQuery(fields[i], part1, part2, inclusive),
BooleanClause.Occur.SHOULD));
Index: src/java/org/apache/lucene/queryParser/QueryParser.java
===================================================================
--- src/java/org/apache/lucene/queryParser/QueryParser.java (revision 689935)
+++ src/java/org/apache/lucene/queryParser/QueryParser.java (working copy)
@@ -164,7 +164,7 @@
public Query parse(String query) throws ParseException {
ReInit(new FastCharStream(new StringReader(query)));
try {
- // TopLevelQuery is a Query followed by the end-of-input (EOF)
+ // TopLevelQuery is a Query followed by the end-of-input (EOF)
Query res = TopLevelQuery(field);
return res!=null ? res : new BooleanQuery();
}
@@ -342,7 +342,6 @@
return useOldRangeQuery;
}
-
/**
* Set locale used by date range parsing.
*/
@@ -412,13 +411,20 @@
return resolution;
}
+ /**
+ * @deprecated use {@link #addClause(List, int, int, Query)} instead.
+ */
protected void addClause(Vector clauses, int conj, int mods, Query q) {
+ addClause((List) clauses, conj, mods, q);
+ }
+
+ protected void addClause(List clauses, int conj, int mods, Query q) {
boolean required, prohibited;
// If this term is introduced by AND, make the preceding term required,
// unless it's already prohibited
if (clauses.size() > 0 && conj == CONJ_AND) {
- BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1);
+ BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.MUST);
}
@@ -428,7 +434,7 @@
// unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
// notice if the input is a OR b, first term is parsed as required; without
// this modification a OR b would parsed as +a OR b
- BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1);
+ BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.SHOULD);
}
@@ -453,11 +459,11 @@
required = (!prohibited && conj != CONJ_OR);
}
if (required && !prohibited)
- clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST));
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST));
else if (!required && !prohibited)
- clauses.addElement(new BooleanClause(q, BooleanClause.Occur.SHOULD));
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
else if (!required && prohibited)
- clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST_NOT));
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST_NOT));
else
throw new RuntimeException("Clause cannot be both required and prohibited");
}
@@ -471,7 +477,7 @@
// PhraseQuery, or nothing based on the term count
TokenStream source = analyzer.tokenStream(field, new StringReader(queryText));
- Vector v = new Vector();
+ List list = new ArrayList();
final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token();
org.apache.lucene.analysis.Token nextToken;
int positionCount = 0;
@@ -486,7 +492,7 @@
}
if (nextToken == null)
break;
- v.addElement(nextToken.clone());
+ list.add(nextToken.clone());
if (nextToken.getPositionIncrement() != 0)
positionCount += nextToken.getPositionIncrement();
else
@@ -499,18 +505,18 @@
// ignore
}
- if (v.size() == 0)
+ if (list.size() == 0)
return null;
- else if (v.size() == 1) {
- nextToken = (org.apache.lucene.analysis.Token) v.elementAt(0);
+ else if (list.size() == 1) {
+ nextToken = (org.apache.lucene.analysis.Token) list.get(0);
return new TermQuery(new Term(field, nextToken.term()));
} else {
if (severalTokensAtSamePosition) {
if (positionCount == 1) {
// no phrase query:
BooleanQuery q = new BooleanQuery(true);
- for (int i = 0; i < v.size(); i++) {
- nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
+ for (int i = 0; i < list.size(); i++) {
+ nextToken = (org.apache.lucene.analysis.Token) list.get(i);
TermQuery currentQuery = new TermQuery(
new Term(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD);
@@ -523,8 +529,8 @@
mpq.setSlop(phraseSlop);
List multiTerms = new ArrayList();
int position = -1;
- for (int i = 0; i < v.size(); i++) {
- nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
+ for (int i = 0; i < list.size(); i++) {
+ nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (nextToken.getPositionIncrement() > 0 && multiTerms.size() > 0) {
if (enablePositionIncrements) {
mpq.add((Term[])multiTerms.toArray(new Term[0]),position);
@@ -548,8 +554,8 @@
PhraseQuery pq = new PhraseQuery();
pq.setSlop(phraseSlop);
int position = -1;
- for (int i = 0; i < v.size(); i++) {
- nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
+ for (int i = 0; i < list.size(); i++) {
+ nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (enablePositionIncrements) {
position += nextToken.getPositionIncrement();
pq.add(new Term(field, nextToken.term()),position);
@@ -630,7 +636,7 @@
if(useOldRangeQuery)
{
- return new RangeQuery(new Term(field, part1),
+ return new RangeQuery(new Term(field, part1),
new Term(field, part2),
inclusive);
}
@@ -647,13 +653,31 @@
* Can be overridden by extending classes, to modify query being
* returned.
*
- * @param clauses Vector that contains {@link BooleanClause} instances
+ * @param clauses List that contains {@link BooleanClause} instances
* to join.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
+ * @deprecated use {@link #getBooleanQuery(List)} instead
*/
protected Query getBooleanQuery(Vector clauses) throws ParseException {
+ return getBooleanQuery((List) clauses, false);
+ }
+
+ /**
+ * Factory method for generating query, given a set of clauses.
+ * By default creates a boolean query composed of clauses passed in.
+ *
+ * Can be overridden by extending classes, to modify query being
+ * returned.
+ *
+ * @param clauses List that contains {@link BooleanClause} instances
+ * to join.
+ *
+ * @return Resulting {@link Query} object.
+ * @exception ParseException throw in overridden method to disallow
+ */
+ protected Query getBooleanQuery(List clauses) throws ParseException {
return getBooleanQuery(clauses, false);
}
@@ -664,22 +688,43 @@
* Can be overridden by extending classes, to modify query being
* returned.
*
- * @param clauses Vector that contains {@link BooleanClause} instances
+ * @param clauses List that contains {@link BooleanClause} instances
* to join.
* @param disableCoord true if coord scoring should be disabled.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
+ * @deprecated use {@link #getBooleanQuery(List, boolean)} instead
*/
protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
throws ParseException
{
+ return getBooleanQuery((List) clauses, disableCoord);
+ }
+
+ /**
+ * Factory method for generating query, given a set of clauses.
+ * By default creates a boolean query composed of clauses passed in.
+ *
+ * Can be overridden by extending classes, to modify query being
+ * returned.
+ *
+ * @param clauses List that contains {@link BooleanClause} instances
+ * to join.
+ * @param disableCoord true if coord scoring should be disabled.
+ *
+ * @return Resulting {@link Query} object.
+ * @exception ParseException throw in overridden method to disallow
+ */
+ protected Query getBooleanQuery(List clauses, boolean disableCoord)
+ throws ParseException
+ {
if (clauses.size()==0) {
return null; // all clause words were filtered away by the analyzer.
}
BooleanQuery query = new BooleanQuery(disableCoord);
for (int i = 0; i < clauses.size(); i++) {
- query.add((BooleanClause)clauses.elementAt(i));
+ query.add((BooleanClause)clauses.get(i));
}
return query;
}
@@ -779,7 +824,7 @@
* removed, or kept only once if there was a double escape.
*
* Supports escaped unicode characters, e. g. translates
- * A to A.
+ * \u0041 to A.
*
*/
private String discardEscapeChar(String input) throws ParseException {
@@ -963,7 +1008,7 @@
}
final public Query Query(String field) throws ParseException {
- Vector clauses = new Vector();
+ List clauses = new ArrayList();
Query q, firstQuery=null;
int conj, mods;
mods = Modifiers();
Index: src/java/org/apache/lucene/queryParser/QueryParser.jj
===================================================================
--- src/java/org/apache/lucene/queryParser/QueryParser.jj (revision 689935)
+++ src/java/org/apache/lucene/queryParser/QueryParser.jj (working copy)
@@ -143,7 +143,7 @@
private Operator operator = OR_OPERATOR;
boolean lowercaseExpandedTerms = true;
- boolean useOldRangeQuery= false;
+ boolean useOldRangeQuery= false;
boolean allowLeadingWildcard = false;
boolean enablePositionIncrements = false;
@@ -188,7 +188,7 @@
public Query parse(String query) throws ParseException {
ReInit(new FastCharStream(new StringReader(query)));
try {
- // TopLevelQuery is a Query followed by the end-of-input (EOF)
+ // TopLevelQuery is a Query followed by the end-of-input (EOF)
Query res = TopLevelQuery(field);
return res!=null ? res : new BooleanQuery();
}
@@ -203,28 +203,28 @@
throw new ParseException("Cannot parse '" +query+ "': too many boolean clauses");
}
}
-
+
/**
* @return Returns the analyzer.
*/
public Analyzer getAnalyzer() {
return analyzer;
}
-
+
/**
* @return Returns the field.
*/
public String getField() {
return field;
}
-
+
/**
* Get the minimal similarity for fuzzy queries.
*/
public float getFuzzyMinSim() {
return fuzzyMinSim;
}
-
+
/**
* Set the minimum similarity for fuzzy queries.
* Default is 0.5f.
@@ -232,7 +232,7 @@
public void setFuzzyMinSim(float fuzzyMinSim) {
this.fuzzyMinSim = fuzzyMinSim;
}
-
+
/**
* Get the prefix length for fuzzy queries.
* @return Returns the fuzzyPrefixLength.
@@ -240,7 +240,7 @@
public int getFuzzyPrefixLength() {
return fuzzyPrefixLength;
}
-
+
/**
* Set the prefix length for fuzzy queries. Default is 0.
* @param fuzzyPrefixLength The fuzzyPrefixLength to set.
@@ -344,7 +344,7 @@
public boolean getLowercaseExpandedTerms() {
return lowercaseExpandedTerms;
}
-
+
/**
* By default QueryParser uses new ConstantScoreRangeQuery in preference to RangeQuery
* for range queries. This implementation is generally preferable because it
@@ -365,7 +365,6 @@
public boolean getUseOldRangeQuery() {
return useOldRangeQuery;
}
-
/**
* Set locale used by date range parsing.
@@ -391,7 +390,7 @@
public void setDateResolution(DateTools.Resolution dateResolution) {
this.dateResolution = dateResolution;
}
-
+
/**
* Sets the date resolution used by RangeQueries for a specific field.
*
@@ -402,12 +401,12 @@
if (fieldName == null) {
throw new IllegalArgumentException("Field cannot be null.");
}
-
+
if (fieldToDateResolution == null) {
// lazily initialize HashMap
fieldToDateResolution = new HashMap();
}
-
+
fieldToDateResolution.put(fieldName, dateResolution);
}
@@ -421,28 +420,35 @@
if (fieldName == null) {
throw new IllegalArgumentException("Field cannot be null.");
}
-
+
if (fieldToDateResolution == null) {
// no field specific date resolutions set; return default date resolution instead
return this.dateResolution;
}
-
+
DateTools.Resolution resolution = (DateTools.Resolution) fieldToDateResolution.get(fieldName);
if (resolution == null) {
// no date resolutions set for the given field; return default date resolution instead
resolution = this.dateResolution;
}
-
+
return resolution;
}
+ /**
+ * @deprecated use {@link #addClause(List, int, int, Query)} instead.
+ */
protected void addClause(Vector clauses, int conj, int mods, Query q) {
+ addClause((List) clauses, conj, mods, q);
+ }
+
+ protected void addClause(List clauses, int conj, int mods, Query q) {
boolean required, prohibited;
// If this term is introduced by AND, make the preceding term required,
// unless it's already prohibited
if (clauses.size() > 0 && conj == CONJ_AND) {
- BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1);
+ BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.MUST);
}
@@ -452,7 +458,7 @@
// unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
// notice if the input is a OR b, first term is parsed as required; without
// this modification a OR b would parsed as +a OR b
- BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1);
+ BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.SHOULD);
}
@@ -477,11 +483,11 @@
required = (!prohibited && conj != CONJ_OR);
}
if (required && !prohibited)
- clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST));
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST));
else if (!required && !prohibited)
- clauses.addElement(new BooleanClause(q, BooleanClause.Occur.SHOULD));
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
else if (!required && prohibited)
- clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST_NOT));
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST_NOT));
else
throw new RuntimeException("Clause cannot be both required and prohibited");
}
@@ -495,7 +501,7 @@
// PhraseQuery, or nothing based on the term count
TokenStream source = analyzer.tokenStream(field, new StringReader(queryText));
- Vector v = new Vector();
+ List list = new ArrayList();
final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token();
org.apache.lucene.analysis.Token nextToken;
int positionCount = 0;
@@ -510,7 +516,7 @@
}
if (nextToken == null)
break;
- v.addElement(nextToken.clone());
+ list.add(nextToken.clone());
if (nextToken.getPositionIncrement() != 0)
positionCount += nextToken.getPositionIncrement();
else
@@ -523,18 +529,18 @@
// ignore
}
- if (v.size() == 0)
+ if (list.size() == 0)
return null;
- else if (v.size() == 1) {
- nextToken = (org.apache.lucene.analysis.Token) v.elementAt(0);
+ else if (list.size() == 1) {
+ nextToken = (org.apache.lucene.analysis.Token) list.get(0);
return new TermQuery(new Term(field, nextToken.term()));
} else {
if (severalTokensAtSamePosition) {
if (positionCount == 1) {
// no phrase query:
BooleanQuery q = new BooleanQuery(true);
- for (int i = 0; i < v.size(); i++) {
- nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
+ for (int i = 0; i < list.size(); i++) {
+ nextToken = (org.apache.lucene.analysis.Token) list.get(i);
TermQuery currentQuery = new TermQuery(
new Term(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD);
@@ -544,11 +550,11 @@
else {
// phrase query:
MultiPhraseQuery mpq = new MultiPhraseQuery();
- mpq.setSlop(phraseSlop);
+ mpq.setSlop(phraseSlop);
List multiTerms = new ArrayList();
int position = -1;
- for (int i = 0; i < v.size(); i++) {
- nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
+ for (int i = 0; i < list.size(); i++) {
+ nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (nextToken.getPositionIncrement() > 0 && multiTerms.size() > 0) {
if (enablePositionIncrements) {
mpq.add((Term[])multiTerms.toArray(new Term[0]),position);
@@ -572,8 +578,8 @@
PhraseQuery pq = new PhraseQuery();
pq.setSlop(phraseSlop);
int position = -1;
- for (int i = 0; i < v.size(); i++) {
- t = (org.apache.lucene.analysis.Token) v.elementAt(i);
+ for (int i = 0; i < list.size(); i++) {
+ nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (enablePositionIncrements) {
position += nextToken.getPositionIncrement();
pq.add(new Term(field, nextToken.term()),position);
@@ -594,8 +600,8 @@
*
* @exception ParseException throw in overridden method to disallow
*/
- protected Query getFieldQuery(String field, String queryText, int slop)
- throws ParseException {
+ protected Query getFieldQuery(String field, String queryText, int slop)
+ throws ParseException {
Query query = getFieldQuery(field, queryText);
if (query instanceof PhraseQuery) {
@@ -654,7 +660,7 @@
if(useOldRangeQuery)
{
- return new RangeQuery(new Term(field, part1),
+ return new RangeQuery(new Term(field, part1),
new Term(field, part2),
inclusive);
}
@@ -671,13 +677,31 @@
* Can be overridden by extending classes, to modify query being
* returned.
*
- * @param clauses Vector that contains {@link BooleanClause} instances
+ * @param clauses List that contains {@link BooleanClause} instances
* to join.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
+ * @deprecated use {@link #getBooleanQuery(List)} instead
*/
protected Query getBooleanQuery(Vector clauses) throws ParseException {
+ return getBooleanQuery((List) clauses, false);
+ }
+
+ /**
+ * Factory method for generating query, given a set of clauses.
+ * By default creates a boolean query composed of clauses passed in.
+ *
+ * Can be overridden by extending classes, to modify query being
+ * returned.
+ *
+ * @param clauses List that contains {@link BooleanClause} instances
+ * to join.
+ *
+ * @return Resulting {@link Query} object.
+ * @exception ParseException throw in overridden method to disallow
+ */
+ protected Query getBooleanQuery(List clauses) throws ParseException {
return getBooleanQuery(clauses, false);
}
@@ -688,22 +712,43 @@
* Can be overridden by extending classes, to modify query being
* returned.
*
- * @param clauses Vector that contains {@link BooleanClause} instances
+ * @param clauses List that contains {@link BooleanClause} instances
* to join.
* @param disableCoord true if coord scoring should be disabled.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
+ * @deprecated use {@link #getBooleanQuery(List, boolean)} instead
*/
protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
throws ParseException
{
+ return getBooleanQuery((List) clauses, disableCoord);
+ }
+
+ /**
+ * Factory method for generating query, given a set of clauses.
+ * By default creates a boolean query composed of clauses passed in.
+ *
+ * Can be overridden by extending classes, to modify query being
+ * returned.
+ *
+ * @param clauses List that contains {@link BooleanClause} instances
+ * to join.
+ * @param disableCoord true if coord scoring should be disabled.
+ *
+ * @return Resulting {@link Query} object.
+ * @exception ParseException throw in overridden method to disallow
+ */
+ protected Query getBooleanQuery(List clauses, boolean disableCoord)
+ throws ParseException
+ {
if (clauses.size()==0) {
return null; // all clause words were filtered away by the analyzer.
}
BooleanQuery query = new BooleanQuery(disableCoord);
for (int i = 0; i < clauses.size(); i++) {
- query.add((BooleanClause)clauses.elementAt(i));
+ query.add((BooleanClause)clauses.get(i));
}
return query;
}
@@ -777,7 +822,6 @@
return new PrefixQuery(t);
}
-
/**
* Factory method for generating a query (similar to
* {@link #getWildcardQuery}). Called when parser parses
@@ -803,29 +847,29 @@
* removed, or kept only once if there was a double escape.
*
* Supports escaped unicode characters, e. g. translates
- * \u0041 to A.
+ * \\u0041 to A.
*
*/
private String discardEscapeChar(String input) throws ParseException {
// Create char array to hold unescaped char sequence
char[] output = new char[input.length()];
-
+
// The length of the output can be less than the input
// due to discarded escape chars. This variable holds
// the actual length of the output
int length = 0;
-
+
// We remember whether the last processed character was
// an escape character
boolean lastCharWasEscapeChar = false;
-
+
// The multiplier the current unicode digit must be multiplied with.
// E. g. the first digit must be multiplied with 16^3, the second with 16^2...
int codePointMultiplier = 0;
-
+
// Used to calculate the codepoint of the escaped unicode character
int codePoint = 0;
-
+
for (int i = 0; i < input.length(); i++) {
char curChar = input.charAt(i);
if (codePointMultiplier > 0) {
@@ -839,9 +883,9 @@
if (curChar == 'u') {
// found an escaped unicode character
codePointMultiplier = 16 * 16 * 16;
- } else {
+ } else {
// this character was escaped
- output[length] = curChar;
+ output[length] = curChar;
length++;
}
lastCharWasEscapeChar = false;
@@ -854,18 +898,18 @@
}
}
}
-
+
if (codePointMultiplier > 0) {
throw new ParseException("Truncated unicode escape sequence.");
}
-
+
if (lastCharWasEscapeChar) {
throw new ParseException("Term can not end with escape character.");
}
-
+
return new String(output, 0, length);
}
-
+
/** Returns the numeric value of the hexadecimal character */
private static final int hexToInt(char c) throws ParseException {
if ('0' <= c && c <= '9') {
@@ -878,7 +922,7 @@
throw new ParseException("None-hex character in unicode escape sequence: " + c);
}
}
-
+
/**
* Returns a String where those characters that QueryParser
* expects to be escaped are escaped by a preceding \.
@@ -1015,7 +1059,7 @@
Query Query(String field) :
{
- Vector clauses = new Vector();
+ List clauses = new ArrayList();
Query q, firstQuery=null;
int conj, mods;
}
Index: src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java
===================================================================
--- src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java (revision 689935)
+++ src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java (working copy)
@@ -1,5 +1,6 @@
/* Generated By:JavaCC: Do not edit this line. QueryParserTokenManager.java */
package org.apache.lucene.queryParser;
+
import java.io.IOException;
import java.io.StringReader;
import java.text.DateFormat;
@@ -10,7 +11,7 @@
import java.util.List;
import java.util.Locale;
import java.util.Map;
-import java.util.Vector;
+
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.DateField;
Index: src/java/org/apache/lucene/search/PhraseQuery.java
===================================================================
--- src/java/org/apache/lucene/search/PhraseQuery.java (revision 689935)
+++ src/java/org/apache/lucene/search/PhraseQuery.java (working copy)
@@ -19,7 +19,6 @@
import java.io.IOException;
import java.util.Set;
-import java.util.Vector;
import java.util.ArrayList;
import org.apache.lucene.index.Term;
Index: src/java/org/apache/lucene/index/MultiReader.java
===================================================================
--- src/java/org/apache/lucene/index/MultiReader.java (revision 689935)
+++ src/java/org/apache/lucene/index/MultiReader.java (working copy)
@@ -17,13 +17,14 @@
* limitations under the License.
*/
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldSelector;
-
import java.io.IOException;
import java.util.Collection;
-import java.util.Hashtable;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.MultiSegmentReader.MultiTermDocs;
import org.apache.lucene.index.MultiSegmentReader.MultiTermEnum;
import org.apache.lucene.index.MultiSegmentReader.MultiTermPositions;
@@ -36,7 +37,7 @@
protected IndexReader[] subReaders;
private int[] starts; // 1st docno for each segment
private boolean[] decrefOnClose; // remember which subreaders to decRef on close
- private Hashtable normsCache = new Hashtable();
+ private Map normsCache = new HashMap();
private int maxDoc = 0;
private int numDocs = -1;
private boolean hasDeletions = false;
@@ -288,7 +289,9 @@
protected void doSetNorm(int n, String field, byte value)
throws CorruptIndexException, IOException {
- normsCache.remove(field); // clear cache
+ synchronized (normsCache) {
+ normsCache.remove(field); // clear cache
+ }
int i = readerIndex(n); // find segment num
subReaders[i].setNorm(n-starts[i], field, value); // dispatch
}
Index: src/java/org/apache/lucene/index/SegmentMerger.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentMerger.java (revision 689935)
+++ src/java/org/apache/lucene/index/SegmentMerger.java (working copy)
@@ -17,17 +17,18 @@
* limitations under the License.
*/
-import java.util.Vector;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
import java.util.Iterator;
-import java.util.Collection;
-import java.io.IOException;
+import java.util.List;
+import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelectorResult;
import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
-import org.apache.lucene.store.IndexInput;
/**
* The SegmentMerger class combines two or more Segments, represented by an IndexReader ({@link #add},
@@ -49,7 +50,7 @@
private String segment;
private int termIndexInterval = IndexWriter.DEFAULT_TERM_INDEX_INTERVAL;
- private Vector readers = new Vector();
+ private List readers = new ArrayList();
private FieldInfos fieldInfos;
private int mergedDocs;
@@ -93,7 +94,7 @@
* @param reader
*/
final void add(IndexReader reader) {
- readers.addElement(reader);
+ readers.add(reader);
}
/**
@@ -102,7 +103,7 @@
* @return The ith reader to be merged
*/
final IndexReader segmentReader(int i) {
- return (IndexReader) readers.elementAt(i);
+ return (IndexReader) readers.get(i);
}
/**
@@ -152,18 +153,18 @@
*/
final void closeReaders() throws IOException {
for (int i = 0; i < readers.size(); i++) { // close readers
- IndexReader reader = (IndexReader) readers.elementAt(i);
+ IndexReader reader = (IndexReader) readers.get(i);
reader.close();
}
}
- final Vector createCompoundFile(String fileName)
+ final List createCompoundFile(String fileName)
throws IOException {
CompoundFileWriter cfsWriter =
new CompoundFileWriter(directory, fileName, checkAbort);
- Vector files =
- new Vector(IndexFileNames.COMPOUND_EXTENSIONS.length + 1);
+ List files =
+ new ArrayList(IndexFileNames.COMPOUND_EXTENSIONS.length + 1);
// Basic files
for (int i = 0; i < IndexFileNames.COMPOUND_EXTENSIONS.length; i++) {
@@ -229,7 +230,7 @@
// FieldInfos, then we can do a bulk copy of the
// stored fields:
for (int i = 0; i < readers.size(); i++) {
- IndexReader reader = (IndexReader) readers.elementAt(i);
+ IndexReader reader = (IndexReader) readers.get(i);
if (reader instanceof SegmentReader) {
SegmentReader segmentReader = (SegmentReader) reader;
boolean same = true;
@@ -261,14 +262,14 @@
// name -> number mapping are the same. So, we start
// with the fieldInfos of the last segment in this
// case, to keep that numbering.
- final SegmentReader sr = (SegmentReader) readers.elementAt(readers.size()-1);
+ final SegmentReader sr = (SegmentReader) readers.get(readers.size()-1);
fieldInfos = (FieldInfos) sr.fieldInfos.clone();
} else {
fieldInfos = new FieldInfos(); // merge field names
}
for (int i = 0; i < readers.size(); i++) {
- IndexReader reader = (IndexReader) readers.elementAt(i);
+ IndexReader reader = (IndexReader) readers.get(i);
if (reader instanceof SegmentReader) {
SegmentReader segmentReader = (SegmentReader) reader;
for (int j = 0; j < segmentReader.getFieldInfos().size(); j++) {
@@ -307,7 +308,7 @@
try {
for (int i = 0; i < readers.size(); i++) {
- final IndexReader reader = (IndexReader) readers.elementAt(i);
+ final IndexReader reader = (IndexReader) readers.get(i);
final SegmentReader matchingSegmentReader = matchingSegmentReaders[i];
final FieldsReader matchingFieldsReader;
final boolean hasMatchingReader;
@@ -385,7 +386,7 @@
// are no deletions in any of these segments, so we
// just sum numDocs() of each segment to get total docCount
for (int i = 0; i < readers.size(); i++)
- docCount += ((IndexReader) readers.elementAt(i)).numDocs();
+ docCount += ((IndexReader) readers.get(i)).numDocs();
return docCount;
}
@@ -418,7 +419,7 @@
hasMatchingReader = false;
matchingVectorsReader = null;
}
- IndexReader reader = (IndexReader) readers.elementAt(r);
+ IndexReader reader = (IndexReader) readers.get(r);
final boolean hasDeletions = reader.hasDeletions();
int maxDoc = reader.maxDoc();
for (int docNum = 0; docNum < maxDoc;) {
@@ -510,7 +511,7 @@
int base = 0;
final int readerCount = readers.size();
for (int i = 0; i < readerCount; i++) {
- IndexReader reader = (IndexReader) readers.elementAt(i);
+ IndexReader reader = (IndexReader) readers.get(i);
TermEnum termEnum = reader.terms();
SegmentMergeInfo smi = new SegmentMergeInfo(base, termEnum, reader);
int[] docMap = smi.getDocMap();
@@ -750,7 +751,7 @@
output.writeBytes(NORMS_HEADER,NORMS_HEADER.length);
}
for (int j = 0; j < readers.size(); j++) {
- IndexReader reader = (IndexReader) readers.elementAt(j);
+ IndexReader reader = (IndexReader) readers.get(j);
int maxDoc = reader.maxDoc();
if (normBuffer == null || normBuffer.length < maxDoc) {
// the buffer is too small for the current segment
Index: src/java/org/apache/lucene/index/SegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentReader.java (revision 689935)
+++ src/java/org/apache/lucene/index/SegmentReader.java (working copy)
@@ -18,14 +18,15 @@
*/
import java.io.IOException;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
+import java.util.List;
import java.util.Map;
import java.util.Set;
-import java.util.Vector;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
@@ -695,8 +696,8 @@
undeleteAll = true;
}
- Vector files() throws IOException {
- return new Vector(si.files());
+ List files() throws IOException {
+ return new ArrayList(si.files());
}
public TermEnum terms() {
Index: src/java/org/apache/lucene/index/MultiSegmentReader.java
===================================================================
--- src/java/org/apache/lucene/index/MultiSegmentReader.java (revision 689935)
+++ src/java/org/apache/lucene/index/MultiSegmentReader.java (working copy)
@@ -17,26 +17,26 @@
* limitations under the License.
*/
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.FieldSelector;
-import org.apache.lucene.store.Directory;
-
import java.io.IOException;
import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
-import java.util.Hashtable;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.store.Directory;
+
/**
* An IndexReader which reads indexes with multiple segments.
*/
class MultiSegmentReader extends DirectoryIndexReader {
protected SegmentReader[] subReaders;
private int[] starts; // 1st docno for each segment
- private Hashtable normsCache = new Hashtable();
+ private Map normsCache = new HashMap();
private int maxDoc = 0;
private int numDocs = -1;
private boolean hasDeletions = false;
@@ -149,17 +149,18 @@
// try to copy unchanged norms from the old normsCache to the new one
if (oldNormsCache != null) {
- Iterator it = oldNormsCache.keySet().iterator();
+ Iterator it = oldNormsCache.entrySet().iterator();
while (it.hasNext()) {
- String field = (String) it.next();
+ Map.Entry entry = (Map.Entry) it.next();
+ String field = (String) entry.getKey();
if (!hasNorms(field)) {
continue;
}
-
- byte[] oldBytes = (byte[]) oldNormsCache.get(field);
-
+
+ byte[] oldBytes = (byte[]) entry.getValue();
+
byte[] bytes = new byte[maxDoc()];
-
+
for (int i = 0; i < subReaders.length; i++) {
Integer oldReaderIndex = ((Integer) segmentReaders.get(subReaders[i].getSegmentName()));
@@ -175,7 +176,7 @@
subReaders[i].norms(field, bytes, starts[i]);
}
}
-
+
normsCache.put(field, bytes); // update cache
}
}
@@ -353,7 +354,9 @@
protected void doSetNorm(int n, String field, byte value)
throws CorruptIndexException, IOException {
- normsCache.remove(field); // clear cache
+ synchronized (normsCache) {
+ normsCache.remove(field); // clear cache
+ }
int i = readerIndex(n); // find segment num
subReaders[i].setNorm(n-starts[i], field, value); // dispatch
}
Index: src/java/org/apache/lucene/index/SegmentInfos.java
===================================================================
--- src/java/org/apache/lucene/index/SegmentInfos.java (revision 689935)
+++ src/java/org/apache/lucene/index/SegmentInfos.java (working copy)
@@ -92,7 +92,7 @@
private static PrintStream infoStream;
public final SegmentInfo info(int i) {
- return (SegmentInfo) elementAt(i);
+ return (SegmentInfo) get(i);
}
/**
@@ -232,7 +232,7 @@
}
for (int i = input.readInt(); i > 0; i--) { // read segmentInfos
- addElement(new SegmentInfo(directory, format, input));
+ add(new SegmentInfo(directory, format, input));
}
if(format >= 0){ // in old format the version number may be at the end of the file
@@ -338,7 +338,7 @@
public Object clone() {
SegmentInfos sis = (SegmentInfos) super.clone();
for(int i=0;i 0 && conj == CONJ_AND) {
- BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1);
+ BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.MUST);
}
@@ -268,7 +275,7 @@
// unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
// notice if the input is a OR b, first term is parsed as required; without
// this modification a OR b would parsed as +a OR b
- BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1);
+ BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.SHOULD);
}
@@ -293,11 +300,11 @@
required = (!prohibited && conj != CONJ_OR);
}
if (required && !prohibited)
- clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST));
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST));
else if (!required && !prohibited)
- clauses.addElement(new BooleanClause(q, BooleanClause.Occur.SHOULD));
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
else if (!required && prohibited)
- clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST_NOT));
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST_NOT));
else
throw new RuntimeException("Clause cannot be both required and prohibited");
}
@@ -310,7 +317,7 @@
// PhraseQuery, or nothing based on the term count
TokenStream source = analyzer.tokenStream(field, new StringReader(queryText));
- Vector v = new Vector();
+ List list = new ArrayList();
final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token();
org.apache.lucene.analysis.Token nextToken;
int positionCount = 0;
@@ -325,7 +332,7 @@
}
if (nextToken == null)
break;
- v.addElement(nextToken.clone());
+ list.add(nextToken.clone());
if (nextToken.getPositionIncrement() == 1)
positionCount++;
else
@@ -338,18 +345,18 @@
// ignore
}
- if (v.size() == 0)
+ if (list.size() == 0)
return null;
- else if (v.size() == 1) {
- nextToken = (org.apache.lucene.analysis.Token) v.elementAt(0);
+ else if (list.size() == 1) {
+ nextToken = (org.apache.lucene.analysis.Token) list.get(0);
return new TermQuery(new Term(field, nextToken.term()));
} else {
if (severalTokensAtSamePosition) {
if (positionCount == 1) {
// no phrase query:
BooleanQuery q = new BooleanQuery();
- for (int i = 0; i < v.size(); i++) {
- nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
+ for (int i = 0; i < list.size(); i++) {
+ nextToken = (org.apache.lucene.analysis.Token) list.get(i);
TermQuery currentQuery = new TermQuery(
new Term(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD);
@@ -360,8 +367,8 @@
// phrase query:
MultiPhraseQuery mpq = new MultiPhraseQuery();
List multiTerms = new ArrayList();
- for (int i = 0; i < v.size(); i++) {
- nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
+ for (int i = 0; i < list.size(); i++) {
+ nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (nextToken.getPositionIncrement() == 1 && multiTerms.size() > 0) {
mpq.add((Term[])multiTerms.toArray(new Term[0]));
multiTerms.clear();
@@ -375,10 +382,9 @@
else {
PhraseQuery q = new PhraseQuery();
q.setSlop(phraseSlop);
- for (int i = 0; i < v.size(); i++) {
+ for (int i = 0; i < list.size(); i++) {
q.add(new Term(field, ((org.apache.lucene.analysis.Token)
- v.elementAt(i)).term()));
-
+ list.get(i)).term()));
}
return q;
}
@@ -440,14 +446,33 @@
* Can be overridden by extending classes, to modify query being
* returned.
*
- * @param clauses Vector that contains {@link BooleanClause} instances
+ * @param clauses List that contains {@link BooleanClause} instances
* to join.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
+ * @deprecated use {@link #getBooleanQuery(List)} instead
*/
protected Query getBooleanQuery(Vector clauses) throws ParseException
{
+ return getBooleanQuery((List) clauses, false);
+ }
+
+ /**
+ * Factory method for generating query, given a set of clauses.
+ * By default creates a boolean query composed of clauses passed in.
+ *
+ * Can be overridden by extending classes, to modify query being
+ * returned.
+ *
+ * @param clauses List that contains {@link BooleanClause} instances
+ * to join.
+ *
+ * @return Resulting {@link Query} object.
+ * @exception ParseException throw in overridden method to disallow
+ */
+ protected Query getBooleanQuery(List clauses) throws ParseException
+ {
return getBooleanQuery(clauses, false);
}
@@ -458,22 +483,42 @@
* Can be overridden by extending classes, to modify query being
* returned.
*
- * @param clauses Vector that contains {@link BooleanClause} instances
+ * @param clauses List that contains {@link BooleanClause} instances
* to join.
* @param disableCoord true if coord scoring should be disabled.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
+ * @deprecated use {@link #getBooleanQuery(List, boolean)} instead
*/
protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
throws ParseException
{
+ return getBooleanQuery((List) clauses, disableCoord);
+ }
+
+ /**
+ * Factory method for generating query, given a set of clauses.
+ * By default creates a boolean query composed of clauses passed in.
+ *
+ * Can be overridden by extending classes, to modify query being
+ * returned.
+ *
+ * @param clauses List that contains {@link BooleanClause} instances
+ * to join.
+ * @param disableCoord true if coord scoring should be disabled.
+ *
+ * @return Resulting {@link Query} object.
+ * @exception ParseException throw in overridden method to disallow
+ */
+ protected Query getBooleanQuery(List clauses, boolean disableCoord)
+ throws ParseException {
if (clauses == null || clauses.size() == 0)
return null;
BooleanQuery query = new BooleanQuery(disableCoord);
for (int i = 0; i < clauses.size(); i++) {
- query.add((BooleanClause)clauses.elementAt(i));
+ query.add((BooleanClause)clauses.get(i));
}
return query;
}
@@ -675,7 +720,7 @@
}
final public Query Query(String field) throws ParseException {
- Vector clauses = new Vector();
+ List clauses = new ArrayList();
Query q, firstQuery=null;
boolean orPresent = false;
int modifier;
@@ -727,7 +772,7 @@
}
final public Query andExpression(String field) throws ParseException {
- Vector clauses = new Vector();
+ List clauses = new ArrayList();
Query q, firstQuery=null;
int modifier;
q = Clause(field);
@@ -1191,7 +1236,7 @@
return (jj_ntk = jj_nt.kind);
}
- private java.util.Vector jj_expentries = new java.util.Vector();
+ private java.util.List jj_expentries = new java.util.ArrayList();
private int[] jj_expentry;
private int jj_kind = -1;
private int[] jj_lasttokens = new int[100];
@@ -1207,8 +1252,8 @@
jj_expentry[i] = jj_lasttokens[i];
}
boolean exists = false;
- for (java.util.Enumeration e = jj_expentries.elements(); e.hasMoreElements();) {
- int[] oldentry = (int[])(e.nextElement());
+ for (java.util.Iterator e = jj_expentries.iterator(); e.hasNext();) {
+ int[] oldentry = (int[])(e.next());
if (oldentry.length == jj_expentry.length) {
exists = true;
for (int i = 0; i < jj_expentry.length; i++) {
@@ -1220,13 +1265,13 @@
if (exists) break;
}
}
- if (!exists) jj_expentries.addElement(jj_expentry);
+ if (!exists) jj_expentries.add(jj_expentry);
if (pos != 0) jj_lasttokens[(jj_endpos = pos) - 1] = kind;
}
}
public ParseException generateParseException() {
- jj_expentries.removeAllElements();
+ jj_expentries.clear();
boolean[] la1tokens = new boolean[32];
for (int i = 0; i < 32; i++) {
la1tokens[i] = false;
@@ -1248,7 +1293,7 @@
if (la1tokens[i]) {
jj_expentry = new int[1];
jj_expentry[0] = i;
- jj_expentries.addElement(jj_expentry);
+ jj_expentries.add(jj_expentry);
}
}
jj_endpos = 0;
@@ -1256,7 +1301,7 @@
jj_add_error_token(0, 0);
int[][] exptokseq = new int[jj_expentries.size()][];
for (int i = 0; i < jj_expentries.size(); i++) {
- exptokseq[i] = (int[])jj_expentries.elementAt(i);
+ exptokseq[i] = (int[])jj_expentries.get(i);
}
return new ParseException(token, exptokseq, tokenImage);
}
Index: contrib/miscellaneous/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.jj
===================================================================
--- contrib/miscellaneous/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.jj (revision 689935)
+++ contrib/miscellaneous/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParser.jj (working copy)
@@ -99,7 +99,7 @@
* @author Tatu Saloranta
*/
-public class PrecedenceQueryParser {
+public class PrecedenceQueryParser implements PrecedenceQueryParserConstants {
private static final int CONJ_NONE = 0;
private static final int CONJ_AND = 1;
@@ -166,28 +166,28 @@
throw new ParseException("Too many boolean clauses");
}
}
-
+
/**
* @return Returns the analyzer.
*/
public Analyzer getAnalyzer() {
return analyzer;
}
-
+
/**
* @return Returns the field.
*/
public String getField() {
return field;
}
-
+
/**
* Get the minimal similarity for fuzzy queries.
*/
public float getFuzzyMinSim() {
return fuzzyMinSim;
}
-
+
/**
* Set the minimum similarity for fuzzy queries.
* Default is 0.5f.
@@ -195,7 +195,7 @@
public void setFuzzyMinSim(float fuzzyMinSim) {
this.fuzzyMinSim = fuzzyMinSim;
}
-
+
/**
* Get the prefix length for fuzzy queries.
* @return Returns the fuzzyPrefixLength.
@@ -203,7 +203,7 @@
public int getFuzzyPrefixLength() {
return fuzzyPrefixLength;
}
-
+
/**
* Set the prefix length for fuzzy queries. Default is 0.
* @param fuzzyPrefixLength The fuzzyPrefixLength to set.
@@ -276,13 +276,20 @@
return locale;
}
+ /**
+ * @deprecated use {@link #addClause(List, int, int, Query)} instead.
+ */
protected void addClause(Vector clauses, int conj, int modifier, Query q) {
+ addClause((List) clauses, conj, modifier, q);
+ }
+
+ protected void addClause(List clauses, int conj, int modifier, Query q) {
boolean required, prohibited;
// If this term is introduced by AND, make the preceding term required,
// unless it's already prohibited
if (clauses.size() > 0 && conj == CONJ_AND) {
- BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1);
+ BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.MUST);
}
@@ -292,7 +299,7 @@
// unless it's prohibited (that means we leave -a OR b but +a OR b-->a OR b)
// notice if the input is a OR b, first term is parsed as required; without
// this modification a OR b would parsed as +a OR b
- BooleanClause c = (BooleanClause) clauses.elementAt(clauses.size()-1);
+ BooleanClause c = (BooleanClause) clauses.get(clauses.size()-1);
if (!c.isProhibited())
c.setOccur(BooleanClause.Occur.SHOULD);
}
@@ -317,15 +324,15 @@
required = (!prohibited && conj != CONJ_OR);
}
if (required && !prohibited)
- clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST));
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST));
else if (!required && !prohibited)
- clauses.addElement(new BooleanClause(q, BooleanClause.Occur.SHOULD));
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
else if (!required && prohibited)
- clauses.addElement(new BooleanClause(q, BooleanClause.Occur.MUST_NOT));
+ clauses.add(new BooleanClause(q, BooleanClause.Occur.MUST_NOT));
else
throw new RuntimeException("Clause cannot be both required and prohibited");
}
-
+
/**
* @exception ParseException throw in overridden method to disallow
*/
@@ -334,7 +341,7 @@
// PhraseQuery, or nothing based on the term count
TokenStream source = analyzer.tokenStream(field, new StringReader(queryText));
- Vector v = new Vector();
+ List list = new ArrayList();
final org.apache.lucene.analysis.Token reusableToken = new org.apache.lucene.analysis.Token();
org.apache.lucene.analysis.Token nextToken;
int positionCount = 0;
@@ -349,7 +356,7 @@
}
if (nextToken == null)
break;
- v.addElement(nextToken.clone());
+ list.add(nextToken.clone());
if (nextToken.getPositionIncrement() == 1)
positionCount++;
else
@@ -362,18 +369,18 @@
// ignore
}
- if (v.size() == 0)
+ if (list.size() == 0)
return null;
- else if (v.size() == 1) {
- nextToken = (org.apache.lucene.analysis.Token) v.elementAt(0);
+ else if (list.size() == 1) {
+ nextToken = (org.apache.lucene.analysis.Token) list.get(0);
return new TermQuery(new Term(field, nextToken.term()));
} else {
if (severalTokensAtSamePosition) {
if (positionCount == 1) {
// no phrase query:
BooleanQuery q = new BooleanQuery();
- for (int i = 0; i < v.size(); i++) {
- nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
+ for (int i = 0; i < list.size(); i++) {
+ nextToken = (org.apache.lucene.analysis.Token) list.get(i);
TermQuery currentQuery = new TermQuery(
new Term(field, nextToken.term()));
q.add(currentQuery, BooleanClause.Occur.SHOULD);
@@ -384,8 +391,8 @@
// phrase query:
MultiPhraseQuery mpq = new MultiPhraseQuery();
List multiTerms = new ArrayList();
- for (int i = 0; i < v.size(); i++) {
- nextToken = (org.apache.lucene.analysis.Token) v.elementAt(i);
+ for (int i = 0; i < list.size(); i++) {
+ nextToken = (org.apache.lucene.analysis.Token) list.get(i);
if (nextToken.getPositionIncrement() == 1 && multiTerms.size() > 0) {
mpq.add((Term[])multiTerms.toArray(new Term[0]));
multiTerms.clear();
@@ -399,16 +406,15 @@
else {
PhraseQuery q = new PhraseQuery();
q.setSlop(phraseSlop);
- for (int i = 0; i < v.size(); i++) {
- q.add(new Term(field, ((org.apache.lucene.analysis.Token)
- v.elementAt(i)).term()));
-
+ for (int i = 0; i < list.size(); i++) {
+ q.add(new Term(field, ((org.apache.lucene.analysis.Token)
+ list.get(i)).term()));
}
return q;
}
}
}
-
+
/**
* Base implementation delegates to {@link #getFieldQuery(String,String)}.
* This method may be overridden, for example, to return
@@ -416,8 +422,8 @@
*
* @exception ParseException throw in overridden method to disallow
*/
- protected Query getFieldQuery(String field, String queryText, int slop)
- throws ParseException {
+ protected Query getFieldQuery(String field, String queryText, int slop)
+ throws ParseException {
Query query = getFieldQuery(field, queryText);
if (query instanceof PhraseQuery) {
@@ -429,7 +435,7 @@
return query;
}
-
+
/**
* @exception ParseException throw in overridden method to disallow
*/
@@ -464,14 +470,33 @@
* Can be overridden by extending classes, to modify query being
* returned.
*
- * @param clauses Vector that contains {@link BooleanClause} instances
+ * @param clauses List that contains {@link BooleanClause} instances
* to join.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
+ * @deprecated use {@link #getBooleanQuery(List)} instead
*/
protected Query getBooleanQuery(Vector clauses) throws ParseException
{
+ return getBooleanQuery((List) clauses, false);
+ }
+
+ /**
+ * Factory method for generating query, given a set of clauses.
+ * By default creates a boolean query composed of clauses passed in.
+ *
+ * Can be overridden by extending classes, to modify query being
+ * returned.
+ *
+ * @param clauses List that contains {@link BooleanClause} instances
+ * to join.
+ *
+ * @return Resulting {@link Query} object.
+ * @exception ParseException throw in overridden method to disallow
+ */
+ protected Query getBooleanQuery(List clauses) throws ParseException
+ {
return getBooleanQuery(clauses, false);
}
@@ -482,22 +507,42 @@
* Can be overridden by extending classes, to modify query being
* returned.
*
- * @param clauses Vector that contains {@link BooleanClause} instances
+ * @param clauses List that contains {@link BooleanClause} instances
* to join.
* @param disableCoord true if coord scoring should be disabled.
*
* @return Resulting {@link Query} object.
* @exception ParseException throw in overridden method to disallow
+ * @deprecated use {@link #getBooleanQuery(List, boolean)} instead
*/
protected Query getBooleanQuery(Vector clauses, boolean disableCoord)
throws ParseException
{
+ return getBooleanQuery((List) clauses, disableCoord);
+ }
+
+ /**
+ * Factory method for generating query, given a set of clauses.
+ * By default creates a boolean query composed of clauses passed in.
+ *
+ * Can be overridden by extending classes, to modify query being
+ * returned.
+ *
+ * @param clauses List that contains {@link BooleanClause} instances
+ * to join.
+ * @param disableCoord true if coord scoring should be disabled.
+ *
+ * @return Resulting {@link Query} object.
+ * @exception ParseException throw in overridden method to disallow
+ */
+ protected Query getBooleanQuery(List clauses, boolean disableCoord)
+ throws ParseException {
if (clauses == null || clauses.size() == 0)
return null;
BooleanQuery query = new BooleanQuery(disableCoord);
for (int i = 0; i < clauses.size(); i++) {
- query.add((BooleanClause)clauses.elementAt(i));
+ query.add((BooleanClause)clauses.get(i));
}
return query;
}
@@ -563,7 +608,7 @@
Term t = new Term(field, termStr);
return new PrefixQuery(t);
}
-
+
/**
* Factory method for generating a query (similar to
* {@link #getWildcardQuery}). Called when parser parses
@@ -733,7 +778,7 @@
Query Query(String field) :
{
- Vector clauses = new Vector();
+ List clauses = new ArrayList();
Query q, firstQuery=null;
boolean orPresent = false;
int modifier;
@@ -760,7 +805,7 @@
Query andExpression(String field) :
{
- Vector clauses = new Vector();
+ List clauses = new ArrayList();
Query q, firstQuery=null;
int modifier;
}
Index: contrib/miscellaneous/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParserTokenManager.java
===================================================================
--- contrib/miscellaneous/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParserTokenManager.java (revision 689935)
+++ contrib/miscellaneous/src/java/org/apache/lucene/queryParser/precedence/PrecedenceQueryParserTokenManager.java (working copy)
@@ -1,5 +1,6 @@
/* Generated By:JavaCC: Do not edit this line. PrecedenceQueryParserTokenManager.java */
package org.apache.lucene.queryParser.precedence;
+
import java.io.IOException;
import java.io.StringReader;
import java.text.DateFormat;
@@ -7,7 +8,7 @@
import java.util.Date;
import java.util.List;
import java.util.Locale;
-import java.util.Vector;
+
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.DateTools;
Index: contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
===================================================================
--- contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (revision 689935)
+++ contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (working copy)
@@ -28,8 +28,9 @@
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.ArrayList;
-import java.util.Enumeration;
+import java.util.Iterator;
import java.util.LinkedHashSet;
+import java.util.List;
import java.util.Set;
import junit.framework.TestCase;
@@ -350,7 +351,7 @@
private String[] readLines(File file) throws Exception {
BufferedReader reader = new BufferedReader(new InputStreamReader(
new FileInputStream(file)));
- ArrayList lines = new ArrayList();
+ List lines = new ArrayList();
String line;
while ((line = reader.readLine()) != null) {
String t = line.trim();
@@ -373,9 +374,9 @@
private MemoryIndex createMemoryIndex(Document doc) {
MemoryIndex index = new MemoryIndex();
- Enumeration iter = doc.fields();
- while (iter.hasMoreElements()) {
- Field field = (Field) iter.nextElement();
+ Iterator iter = doc.getFields().iterator();
+ while (iter.hasNext()) {
+ Field field = (Field) iter.next();
index.addField(field.name(), field.stringValue(), analyzer);
}
return index;
Index: contrib/lucli/src/java/lucli/LuceneMethods.java
===================================================================
--- contrib/lucli/src/java/lucli/LuceneMethods.java (revision 689935)
+++ contrib/lucli/src/java/lucli/LuceneMethods.java (working copy)
@@ -57,16 +57,17 @@
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
-
-import java.util.Hashtable;
-import java.util.Vector;
-import java.util.TreeMap;
-import java.util.Map.Entry;
-import java.util.Set;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
+import java.util.Enumeration;
+import java.util.HashMap;
import java.util.Iterator;
-import java.util.Enumeration;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.Map.Entry;
import jline.ConsoleReader;
@@ -91,15 +92,15 @@
/**
* Various methods that interact with Lucene and provide info about the
- * index, search, etc. Parts addapted from Lucene demo.
+ * index, search, etc. Parts adapted from Lucene demo.
*/
class LuceneMethods {
private int numDocs;
private String indexName; //directory of this index
private java.util.Iterator fieldIterator;
- private Vector fields; //Fields as a vector
- private Vector indexedFields; //Fields as a vector
+ private List fields; //Fields as a vector
+ private List indexedFields; //Fields as a vector
private String fieldsArray[]; //Fields as an array
private Searcher searcher;
private Query query; //current query string
@@ -247,8 +248,8 @@
private void getFieldInfo() throws IOException {
IndexReader indexReader = IndexReader.open(indexName);
- fields = new Vector();
- indexedFields = new Vector();
+ fields = new ArrayList();
+ indexedFields = new ArrayList();
//get the list of all field names
fieldIterator = indexReader.getFieldNames(FieldOption.ALL).iterator();
@@ -274,14 +275,14 @@
private void invertDocument(Document doc)
throws IOException {
- Hashtable tokenHash = new Hashtable();
+ Map tokenMap = new HashMap();
final int maxFieldLength = 10000;
Analyzer analyzer = new StandardAnalyzer();
- Enumeration fields = doc.fields();
+ Iterator fields = doc.getFields().iterator();
final Token reusableToken = new Token();
- while (fields.hasMoreElements()) {
- Field field = (Field) fields.nextElement();
+ while (fields.hasNext()) {
+ Field field = (Field) fields.next();
String fieldName = field.name();
@@ -304,12 +305,12 @@
position += (nextToken.getPositionIncrement() - 1);
position++;
String name = nextToken.term();
- Integer Count = (Integer) tokenHash.get(name);
+ Integer Count = (Integer) tokenMap.get(name);
if (Count == null) { // not in there yet
- tokenHash.put(name, new Integer(1)); //first one
+ tokenMap.put(name, new Integer(1)); //first one
} else {
int count = Count.intValue();
- tokenHash.put(name, new Integer(count + 1));
+ tokenMap.put(name, new Integer(count + 1));
}
if (position > maxFieldLength) break;
}
@@ -320,7 +321,7 @@
}
}
- Entry[] sortedHash = getSortedHashtableEntries(tokenHash);
+ Entry[] sortedHash = getSortedMapEntries(tokenMap);
for (int ii = 0; ii < sortedHash.length && ii < 10; ii++) {
Entry currentEntry = sortedHash[ii];
message((ii + 1) + ":" + currentEntry.getKey() + " " + currentEntry.getValue());
@@ -353,17 +354,16 @@
indexReader.close();
}
- /** Sort Hashtable values
- * @param h the hashtable we're sorting
+ /** Sort Map values
+ * @param m the map we're sorting
* from http://developer.java.sun.com/developer/qow/archive/170/index.jsp
*/
-
public static Entry[]
- getSortedHashtableEntries(Hashtable h) {
- Set set = h.entrySet();
+ getSortedMapEntries(Map m) {
+ Set set = m.entrySet();
Entry[] entries =
(Entry[]) set.toArray(
- new Entry[set.size()]);
+ new Entry[set.size()]);
Arrays.sort(entries, new Comparator() {
public int compare(Object o1, Object o2) {
Object v1 = ((Entry) o1).getValue();
Index: contrib/analyzers/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java
===================================================================
--- contrib/analyzers/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java (revision 689935)
+++ contrib/analyzers/src/java/org/apache/lucene/analysis/ru/RussianAnalyzer.java (working copy)
@@ -19,7 +19,7 @@
import java.io.Reader;
import java.util.HashSet;
-import java.util.Hashtable;
+import java.util.Map;
import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
@@ -237,7 +237,7 @@
* Builds an analyzer with the given stop words.
* @todo create a Set version of this ctor
*/
- public RussianAnalyzer(char[] charset, Hashtable stopwords)
+ public RussianAnalyzer(char[] charset, Map stopwords)
{
this.charset = charset;
stopSet = new HashSet(stopwords.keySet());
Index: contrib/analyzers/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java
===================================================================
--- contrib/analyzers/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java (revision 689935)
+++ contrib/analyzers/src/java/org/apache/lucene/analysis/de/GermanAnalyzer.java (working copy)
@@ -22,7 +22,7 @@
import java.io.IOException;
import java.io.Reader;
import java.util.HashSet;
-import java.util.Hashtable;
+import java.util.Map;
import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
@@ -91,7 +91,7 @@
/**
* Builds an analyzer with the given stop words.
*/
- public GermanAnalyzer(Hashtable stopwords) {
+ public GermanAnalyzer(Map stopwords) {
stopSet = new HashSet(stopwords.keySet());
}
@@ -112,7 +112,7 @@
/**
* Builds an exclusionlist from a Hashtable.
*/
- public void setStemExclusionTable(Hashtable exclusionlist) {
+ public void setStemExclusionTable(Map exclusionlist) {
exclusionSet = new HashSet(exclusionlist.keySet());
}
Index: contrib/analyzers/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java
===================================================================
--- contrib/analyzers/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java (revision 689935)
+++ contrib/analyzers/src/java/org/apache/lucene/analysis/el/GreekAnalyzer.java (working copy)
@@ -16,6 +16,7 @@
* limitations under the License.
*/
+
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
@@ -23,7 +24,7 @@
import java.io.Reader;
import java.util.HashSet;
-import java.util.Hashtable;
+import java.util.Map;
import java.util.Set;
/**
@@ -200,7 +201,7 @@
/**
* Builds an analyzer with the given stop words.
*/
- public GreekAnalyzer(char[] charset, Hashtable stopwords)
+ public GreekAnalyzer(char[] charset, Map stopwords)
{
this.charset = charset;
stopSet = new HashSet(stopwords.keySet());
Index: contrib/analyzers/src/java/org/apache/lucene/analysis/cn/ChineseFilter.java
===================================================================
--- contrib/analyzers/src/java/org/apache/lucene/analysis/cn/ChineseFilter.java (revision 689935)
+++ contrib/analyzers/src/java/org/apache/lucene/analysis/cn/ChineseFilter.java (working copy)
@@ -17,7 +17,8 @@
* limitations under the License.
*/
-import java.util.Hashtable;
+import java.util.HashMap;
+import java.util.Map;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
@@ -54,12 +55,12 @@
};
- private Hashtable stopTable;
+ private Map stopTable;
public ChineseFilter(TokenStream in) {
super(in);
- stopTable = new Hashtable(STOP_WORDS.length);
+ stopTable = new HashMap(STOP_WORDS.length);
for (int i = 0; i < STOP_WORDS.length; i++)
stopTable.put(STOP_WORDS[i], STOP_WORDS[i]);
}
Index: contrib/analyzers/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java
===================================================================
--- contrib/analyzers/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java (revision 689935)
+++ contrib/analyzers/src/java/org/apache/lucene/analysis/br/BrazilianAnalyzer.java (working copy)
@@ -17,6 +17,13 @@
* limitations under the License.
*/
+import java.io.File;
+import java.io.IOException;
+import java.io.Reader;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.StopFilter;
@@ -24,12 +31,6 @@
import org.apache.lucene.analysis.WordlistLoader;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
-import java.io.File;
-import java.io.IOException;
-import java.io.Reader;
-import java.util.Hashtable;
-import java.util.HashSet;
-import java.util.Set;
/**
* Analyzer for Brazilian language. Supports an external list of stopwords (words that
@@ -92,7 +93,7 @@
/**
* Builds an analyzer with the given stop words.
*/
- public BrazilianAnalyzer( Hashtable stopwords ) {
+ public BrazilianAnalyzer( Map stopwords ) {
stoptable = new HashSet(stopwords.keySet());
}
@@ -112,7 +113,7 @@
/**
* Builds an exclusionlist from a Hashtable.
*/
- public void setStemExclusionTable( Hashtable exclusionlist ) {
+ public void setStemExclusionTable( Map exclusionlist ) {
excltable = new HashSet(exclusionlist.keySet());
}
/**
Index: contrib/analyzers/src/java/org/apache/lucene/analysis/br/BrazilianStemFilter.java
===================================================================
--- contrib/analyzers/src/java/org/apache/lucene/analysis/br/BrazilianStemFilter.java (revision 689935)
+++ contrib/analyzers/src/java/org/apache/lucene/analysis/br/BrazilianStemFilter.java (working copy)
@@ -23,7 +23,6 @@
import java.io.IOException;
import java.util.HashSet;
-import java.util.Hashtable;
import java.util.Set;
/**
Index: contrib/analyzers/src/java/org/apache/lucene/analysis/fr/FrenchStemFilter.java
===================================================================
--- contrib/analyzers/src/java/org/apache/lucene/analysis/fr/FrenchStemFilter.java (revision 689935)
+++ contrib/analyzers/src/java/org/apache/lucene/analysis/fr/FrenchStemFilter.java (working copy)
@@ -20,9 +20,10 @@
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
+
import java.io.IOException;
-import java.util.Hashtable;
import java.util.HashSet;
+import java.util.Map;
import java.util.Set;
/**
@@ -83,7 +84,7 @@
/**
* Set an alternative exclusion list for this filter.
*/
- public void setExclusionTable( Hashtable exclusiontable ) {
+ public void setExclusionTable( Map exclusiontable ) {
exclusions = new HashSet(exclusiontable.keySet());
}
}
Index: contrib/analyzers/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java
===================================================================
--- contrib/analyzers/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java (revision 689935)
+++ contrib/analyzers/src/java/org/apache/lucene/analysis/fr/FrenchAnalyzer.java (working copy)
@@ -29,7 +29,7 @@
import java.io.IOException;
import java.io.Reader;
import java.util.HashSet;
-import java.util.Hashtable;
+import java.util.Map;
import java.util.Set;
/**
@@ -111,9 +111,9 @@
}
/**
- * Builds an exclusionlist from a Hashtable.
+ * Builds an exclusionlist from a Map.
*/
- public void setStemExclusionTable(Hashtable exclusionlist) {
+ public void setStemExclusionTable(Map exclusionlist) {
excltable = new HashSet(exclusionlist.keySet());
}
Index: contrib/analyzers/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java
===================================================================
--- contrib/analyzers/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java (revision 689935)
+++ contrib/analyzers/src/java/org/apache/lucene/analysis/cz/CzechAnalyzer.java (working copy)
@@ -26,7 +26,6 @@
import org.apache.lucene.analysis.standard.StandardTokenizer;
import java.io.*;
-import java.util.Hashtable;
import java.util.HashSet;
import java.util.Set;