Index: CHANGES.txt
===================================================================
--- CHANGES.txt (revision 692811)
+++ CHANGES.txt (working copy)
@@ -149,6 +149,11 @@
which is equivalent to
getDirectory().fileModified(getSegmentsFileName()). (Mike McCandless)
+23. LUCENE-1366: Rename Field.Index options to be more accurate:
+ TOKENIZED becomes ANALYZED; UN_TOKENIZED becomes NOT_ANALYZED;
+ NO_NORMS becomes NOT_ANALYZED_NO_NORMS and a new ANALYZED_NO_NORMS
+ is added. (Mike McCandless)
+
Bug fixes
1. LUCENE-1134: Fixed BooleanQuery.rewrite to only optimize a single
Index: src/test/org/apache/lucene/TestMergeSchedulerExternal.java
===================================================================
--- src/test/org/apache/lucene/TestMergeSchedulerExternal.java (revision 692811)
+++ src/test/org/apache/lucene/TestMergeSchedulerExternal.java (working copy)
@@ -88,7 +88,7 @@
dir.failOn(new FailOnlyOnMerge());
Document doc = new Document();
- Field idField = new Field("id", "", Field.Store.YES, Field.Index.UN_TOKENIZED);
+ Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
doc.add(idField);
IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Index: src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java
===================================================================
--- src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java (revision 692811)
+++ src/test/org/apache/lucene/queryParser/TestMultiFieldQueryParser.java (working copy)
@@ -287,7 +287,7 @@
Directory ramDir = new RAMDirectory();
IndexWriter iw = new IndexWriter(ramDir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
- doc.add(new Field("body", "blah the footest blah", Field.Store.NO, Field.Index.TOKENIZED));
+ doc.add(new Field("body", "blah the footest blah", Field.Store.NO, Field.Index.ANALYZED));
iw.addDocument(doc);
iw.close();
Index: src/test/org/apache/lucene/queryParser/TestQueryParser.java
===================================================================
--- src/test/org/apache/lucene/queryParser/TestQueryParser.java (revision 692811)
+++ src/test/org/apache/lucene/queryParser/TestQueryParser.java (working copy)
@@ -917,10 +917,10 @@
private static void addDateDoc(String content, int year, int month,
int day, int hour, int minute, int second, IndexWriter iw) throws IOException {
Document d = new Document();
- d.add(new Field("f", content, Field.Store.YES, Field.Index.TOKENIZED));
+ d.add(new Field("f", content, Field.Store.YES, Field.Index.ANALYZED));
Calendar cal = Calendar.getInstance();
cal.set(year, month-1, day, hour, minute, second);
- d.add(new Field("date", DateField.dateToString(cal.getTime()), Field.Store.YES, Field.Index.UN_TOKENIZED));
+ d.add(new Field("date", DateField.dateToString(cal.getTime()), Field.Store.YES, Field.Index.NOT_ANALYZED));
iw.addDocument(d);
}
Index: src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java
===================================================================
--- src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java (revision 692811)
+++ src/test/org/apache/lucene/analysis/TestKeywordAnalyzer.java (working copy)
@@ -43,8 +43,8 @@
true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
- doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.UN_TOKENIZED));
- doc.add(new Field("description", "Illidium Space Modulator", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(new Field("description", "Illidium Space Modulator", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
@@ -69,10 +69,10 @@
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir,new KeywordAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
- doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
- doc.add(new Field("partnum", "Q37", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("partnum", "Q37", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
Index: src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java
===================================================================
--- src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java (revision 692811)
+++ src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java (working copy)
@@ -72,7 +72,7 @@
// Force frequent commits
writer.setMaxBufferedDocs(2);
Document doc = new Document();
- doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<7;i++)
writer.addDocument(doc);
IndexCommit cp = (IndexCommit) dp.snapshot();
@@ -115,7 +115,7 @@
final Thread t = new Thread() {
public void run() {
Document doc = new Document();
- doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
while(System.currentTimeMillis() < stopTime) {
for(int i=0;i<27;i++) {
try {
@@ -159,7 +159,7 @@
// final segment, so deletion policy has a chance to
// delete again:
Document doc = new Document();
- doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
// Make sure we don't have any leftover files in the
Index: src/test/org/apache/lucene/SearchTest.java
===================================================================
--- src/test/org/apache/lucene/SearchTest.java (revision 692811)
+++ src/test/org/apache/lucene/SearchTest.java (working copy)
@@ -44,7 +44,7 @@
};
for (int j = 0; j < docs.length; j++) {
Document d = new Document();
- d.add(new Field("contents", docs[j], Field.Store.YES, Field.Index.TOKENIZED));
+ d.add(new Field("contents", docs[j], Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d);
}
writer.close();
Index: src/test/org/apache/lucene/TestSearch.java
===================================================================
--- src/test/org/apache/lucene/TestSearch.java (revision 692811)
+++ src/test/org/apache/lucene/TestSearch.java (working copy)
@@ -93,7 +93,7 @@
};
for (int j = 0; j < docs.length; j++) {
Document d = new Document();
- d.add(new Field("contents", docs[j], Field.Store.YES, Field.Index.TOKENIZED));
+ d.add(new Field("contents", docs[j], Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d);
}
writer.close();
Index: src/test/org/apache/lucene/store/TestLockFactory.java
===================================================================
--- src/test/org/apache/lucene/store/TestLockFactory.java (revision 692811)
+++ src/test/org/apache/lucene/store/TestLockFactory.java (working copy)
@@ -557,7 +557,7 @@
private void addDoc(IndexWriter writer) throws IOException {
Document doc = new Document();
- doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
+ doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
Index: src/test/org/apache/lucene/store/TestWindowsMMap.java
===================================================================
--- src/test/org/apache/lucene/store/TestWindowsMMap.java (revision 692811)
+++ src/test/org/apache/lucene/store/TestWindowsMMap.java (working copy)
@@ -78,7 +78,7 @@
for(int dx = 0; dx < 1000; dx ++) {
String f = randomField();
Document doc = new Document();
- doc.add(new Field("data", f, Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("data", f, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
Index: src/test/org/apache/lucene/store/TestBufferedIndexInput.java
===================================================================
--- src/test/org/apache/lucene/store/TestBufferedIndexInput.java (revision 692811)
+++ src/test/org/apache/lucene/store/TestBufferedIndexInput.java (working copy)
@@ -163,8 +163,8 @@
writer.setUseCompoundFile(false);
for(int i=0;i<37;i++) {
Document doc = new Document();
- doc.add(new Field("content", "aaa bbb ccc ddd" + i, Field.Store.YES, Field.Index.TOKENIZED));
- doc.add(new Field("id", "" + i, Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("content", "aaa bbb ccc ddd" + i, Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("id", "" + i, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
Index: src/test/org/apache/lucene/ThreadSafetyTest.java
===================================================================
--- src/test/org/apache/lucene/ThreadSafetyTest.java (revision 692811)
+++ src/test/org/apache/lucene/ThreadSafetyTest.java (working copy)
@@ -56,8 +56,8 @@
for (int i = 0; i < 1024*ITERATIONS; i++) {
Document d = new Document();
int n = RANDOM.nextInt();
- d.add(new Field("id", Integer.toString(n), Field.Store.YES, Field.Index.UN_TOKENIZED));
- d.add(new Field("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.TOKENIZED));
+ d.add(new Field("id", Integer.toString(n), Field.Store.YES, Field.Index.NOT_ANALYZED));
+ d.add(new Field("contents", English.intToEnglish(n), Field.Store.NO, Field.Index.ANALYZED));
System.out.println("Adding " + n);
// Switch between single and multiple file segments
Index: src/test/org/apache/lucene/TestDemo.java
===================================================================
--- src/test/org/apache/lucene/TestDemo.java (revision 692811)
+++ src/test/org/apache/lucene/TestDemo.java (working copy)
@@ -56,7 +56,7 @@
Document doc = new Document();
String text = "This is the text to be indexed.";
doc.add(new Field("fieldname", text, Field.Store.YES,
- Field.Index.TOKENIZED));
+ Field.Index.ANALYZED));
iwriter.addDocument(doc);
iwriter.close();
Index: src/test/org/apache/lucene/search/TestNot.java
===================================================================
--- src/test/org/apache/lucene/search/TestNot.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestNot.java (working copy)
@@ -41,7 +41,7 @@
IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document d1 = new Document();
- d1.add(new Field("field", "a b", Field.Store.YES, Field.Index.TOKENIZED));
+ d1.add(new Field("field", "a b", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d1);
writer.optimize();
Index: src/test/org/apache/lucene/search/TestSearchHitsWithDeletions.java
===================================================================
--- src/test/org/apache/lucene/search/TestSearchHitsWithDeletions.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestSearchHitsWithDeletions.java (working copy)
@@ -169,7 +169,7 @@
private static Document createDocument(int id) {
Document doc = new Document();
- doc.add(new Field(TEXT_FIELD, "text of document"+id, Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field(TEXT_FIELD, "text of document"+id, Field.Store.YES, Field.Index.ANALYZED));
return doc;
}
Index: src/test/org/apache/lucene/search/TestPrefixFilter.java
===================================================================
--- src/test/org/apache/lucene/search/TestPrefixFilter.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestPrefixFilter.java (working copy)
@@ -42,7 +42,7 @@
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < categories.length; i++) {
Document doc = new Document();
- doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.UN_TOKENIZED));
+ doc.add(new Field("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
writer.close();
Index: src/test/org/apache/lucene/search/TestPhraseQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestPhraseQuery.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestPhraseQuery.java (working copy)
@@ -59,19 +59,19 @@
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
- doc.add(new Field("field", "one two three four five", Field.Store.YES, Field.Index.TOKENIZED));
- doc.add(new Field("repeated", "this is a repeated field - first part", Field.Store.YES, Field.Index.TOKENIZED));
- Fieldable repeatedField = new Field("repeated", "second part of a repeated field", Field.Store.YES, Field.Index.TOKENIZED);
+ doc.add(new Field("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("repeated", "this is a repeated field - first part", Field.Store.YES, Field.Index.ANALYZED));
+ Fieldable repeatedField = new Field("repeated", "second part of a repeated field", Field.Store.YES, Field.Index.ANALYZED);
doc.add(repeatedField);
- doc.add(new Field("palindrome", "one two three two one", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("palindrome", "one two three two one", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
- doc.add(new Field("nonexist", "phrase exist notexist exist found", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("nonexist", "phrase exist notexist exist found", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
- doc.add(new Field("nonexist", "phrase exist notexist exist found", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("nonexist", "phrase exist notexist exist found", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();
@@ -205,7 +205,7 @@
IndexWriter writer = new IndexWriter(directory, stopAnalyzer, true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
- doc.add(new Field("field", "the stop words are here", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("field", "the stop words are here", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
@@ -238,12 +238,12 @@
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
- doc.add(new Field("source", "marketing info", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("source", "marketing info", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
- doc.add(new Field("contents", "foobar", Field.Store.YES, Field.Index.TOKENIZED));
- doc.add(new Field("source", "marketing info", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("contents", "foobar", Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("source", "marketing info", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();
@@ -273,15 +273,15 @@
writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true,
IndexWriter.MaxFieldLength.LIMITED);
doc = new Document();
- doc.add(new Field("contents", "map entry woo", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("contents", "map entry woo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
- doc.add(new Field("contents", "woo map entry", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("contents", "woo map entry", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
- doc.add(new Field("contents", "map foobarword entry woo", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("contents", "map foobarword entry woo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();
@@ -324,15 +324,15 @@
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
- doc.add(new Field("field", "foo firstname lastname foo", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("field", "foo firstname lastname foo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
Document doc2 = new Document();
- doc2.add(new Field("field", "foo firstname xxx lastname foo", Field.Store.YES, Field.Index.TOKENIZED));
+ doc2.add(new Field("field", "foo firstname xxx lastname foo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc2);
Document doc3 = new Document();
- doc3.add(new Field("field", "foo firstname xxx yyy lastname foo", Field.Store.YES, Field.Index.TOKENIZED));
+ doc3.add(new Field("field", "foo firstname xxx yyy lastname foo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc3);
writer.optimize();
Index: src/test/org/apache/lucene/search/TestPositionIncrement.java
===================================================================
--- src/test/org/apache/lucene/search/TestPositionIncrement.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestPositionIncrement.java (working copy)
@@ -65,7 +65,7 @@
IndexWriter writer = new IndexWriter(store, analyzer, true,
IndexWriter.MaxFieldLength.LIMITED);
Document d = new Document();
- d.add(new Field("field", "bogus", Field.Store.YES, Field.Index.TOKENIZED));
+ d.add(new Field("field", "bogus", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(d);
writer.optimize();
writer.close();
Index: src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
===================================================================
--- src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java (working copy)
@@ -65,10 +65,10 @@
for (int i = 0; i < data.length; i++) {
Document doc = new Document();
- doc.add(new Field("id", String.valueOf(i), Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("id",String.valueOf(i)));
- doc.add(new Field("all", "all", Field.Store.YES, Field.Index.UN_TOKENIZED));//Field.Keyword("all","all"));
+ doc.add(new Field("id", String.valueOf(i), Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("id",String.valueOf(i)));
+ doc.add(new Field("all", "all", Field.Store.YES, Field.Index.NOT_ANALYZED));//Field.Keyword("all","all"));
if (null != data[i]) {
- doc.add(new Field("data", data[i], Field.Store.YES, Field.Index.TOKENIZED));//Field.Text("data",data[i]));
+ doc.add(new Field("data", data[i], Field.Store.YES, Field.Index.ANALYZED));//Field.Text("data",data[i]));
}
writer.addDocument(doc);
}
Index: src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java (working copy)
@@ -137,7 +137,7 @@
private static Document makeDocument(String docText) {
Document doc = new Document();
- Field f = new Field("f", docText, Field.Store.NO, Field.Index.TOKENIZED);
+ Field f = new Field("f", docText, Field.Store.NO, Field.Index.ANALYZED);
f.setOmitNorms(true);
doc.add(f);
return doc;
Index: src/test/org/apache/lucene/search/TestBooleanOr.java
===================================================================
--- src/test/org/apache/lucene/search/TestBooleanOr.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestBooleanOr.java (working copy)
@@ -148,12 +148,12 @@
FIELD_T,
"Optimize not deleting all files",
Field.Store.YES,
- Field.Index.TOKENIZED));
+ Field.Index.ANALYZED));
d.add(new Field(
FIELD_C,
"Deleted When I run an optimize in our production environment.",
Field.Store.YES,
- Field.Index.TOKENIZED));
+ Field.Index.ANALYZED));
//
writer.addDocument(d);
Index: src/test/org/apache/lucene/search/TestDateSort.java
===================================================================
--- src/test/org/apache/lucene/search/TestDateSort.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestDateSort.java (working copy)
@@ -103,13 +103,13 @@
Document document = new Document();
// Add the text field.
- Field textField = new Field(TEXT_FIELD, text, Field.Store.YES, Field.Index.TOKENIZED);
+ Field textField = new Field(TEXT_FIELD, text, Field.Store.YES, Field.Index.ANALYZED);
document.add(textField);
// Add the date/time field.
String dateTimeString = DateTools.timeToString(time, DateTools.Resolution.SECOND);
Field dateTimeField = new Field(DATE_TIME_FIELD, dateTimeString, Field.Store.YES,
- Field.Index.UN_TOKENIZED);
+ Field.Index.NOT_ANALYZED);
document.add(dateTimeField);
return document;
Index: src/test/org/apache/lucene/search/TestSort.java
===================================================================
--- src/test/org/apache/lucene/search/TestSort.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestSort.java (working copy)
@@ -125,16 +125,16 @@
if (((i%2)==0 && even) || ((i%2)==1 && odd)) {
Document doc = new Document();
doc.add (new Field ("tracer", data[i][0], Field.Store.YES, Field.Index.NO));
- doc.add (new Field ("contents", data[i][1], Field.Store.NO, Field.Index.TOKENIZED));
- if (data[i][2] != null) doc.add (new Field ("int", data[i][2], Field.Store.NO, Field.Index.UN_TOKENIZED));
- if (data[i][3] != null) doc.add (new Field ("float", data[i][3], Field.Store.NO, Field.Index.UN_TOKENIZED));
- if (data[i][4] != null) doc.add (new Field ("string", data[i][4], Field.Store.NO, Field.Index.UN_TOKENIZED));
- if (data[i][5] != null) doc.add (new Field ("custom", data[i][5], Field.Store.NO, Field.Index.UN_TOKENIZED));
- if (data[i][6] != null) doc.add (new Field ("i18n", data[i][6], Field.Store.NO, Field.Index.UN_TOKENIZED));
- if (data[i][7] != null) doc.add (new Field ("long", data[i][7], Field.Store.NO, Field.Index.UN_TOKENIZED));
- if (data[i][8] != null) doc.add (new Field ("double", data[i][8], Field.Store.NO, Field.Index.UN_TOKENIZED));
- if (data[i][8] != null) doc.add (new Field ("short", data[i][9], Field.Store.NO, Field.Index.UN_TOKENIZED));
- if (data[i][8] != null) doc.add (new Field ("byte", data[i][10], Field.Store.NO, Field.Index.UN_TOKENIZED));
+ doc.add (new Field ("contents", data[i][1], Field.Store.NO, Field.Index.ANALYZED));
+ if (data[i][2] != null) doc.add (new Field ("int", data[i][2], Field.Store.NO, Field.Index.NOT_ANALYZED));
+ if (data[i][3] != null) doc.add (new Field ("float", data[i][3], Field.Store.NO, Field.Index.NOT_ANALYZED));
+ if (data[i][4] != null) doc.add (new Field ("string", data[i][4], Field.Store.NO, Field.Index.NOT_ANALYZED));
+ if (data[i][5] != null) doc.add (new Field ("custom", data[i][5], Field.Store.NO, Field.Index.NOT_ANALYZED));
+ if (data[i][6] != null) doc.add (new Field ("i18n", data[i][6], Field.Store.NO, Field.Index.NOT_ANALYZED));
+ if (data[i][7] != null) doc.add (new Field ("long", data[i][7], Field.Store.NO, Field.Index.NOT_ANALYZED));
+ if (data[i][8] != null) doc.add (new Field ("double", data[i][8], Field.Store.NO, Field.Index.NOT_ANALYZED));
+ if (data[i][8] != null) doc.add (new Field ("short", data[i][9], Field.Store.NO, Field.Index.NOT_ANALYZED));
+ if (data[i][8] != null) doc.add (new Field ("byte", data[i][10], Field.Store.NO, Field.Index.NOT_ANALYZED));
doc.setBoost(2); // produce some scores above 1.0
writer.addDocument (doc);
}
Index: src/test/org/apache/lucene/search/TestExtendedFieldCache.java
===================================================================
--- src/test/org/apache/lucene/search/TestExtendedFieldCache.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestExtendedFieldCache.java (working copy)
@@ -43,9 +43,9 @@
double theDouble = Double.MAX_VALUE;
for (int i = 0; i < NUM_DOCS; i++){
Document doc = new Document();
- doc.add(new Field("theLong", String.valueOf(theLong--), Field.Store.NO, Field.Index.UN_TOKENIZED));
- doc.add(new Field("theDouble", String.valueOf(theDouble--), Field.Store.NO, Field.Index.UN_TOKENIZED));
- doc.add(new Field("text", English.intToEnglish(i), Field.Store.NO, Field.Index.TOKENIZED));
+ doc.add(new Field("theLong", String.valueOf(theLong--), Field.Store.NO, Field.Index.NOT_ANALYZED));
+ doc.add(new Field("theDouble", String.valueOf(theDouble--), Field.Store.NO, Field.Index.NOT_ANALYZED));
+ doc.add(new Field("text", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
Index: src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java
===================================================================
--- src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java (revision 692811)
+++ src/test/org/apache/lucene/search/payloads/TestBoostingTermQuery.java (working copy)
@@ -102,11 +102,11 @@
//writer.infoStream = System.out;
for (int i = 0; i < 1000; i++) {
Document doc = new Document();
- Field noPayloadField = new Field(PayloadHelper.NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.TOKENIZED);
+ Field noPayloadField = new Field(PayloadHelper.NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED);
//noPayloadField.setBoost(0);
doc.add(noPayloadField);
- doc.add(new Field("field", English.intToEnglish(i), Field.Store.YES, Field.Index.TOKENIZED));
- doc.add(new Field("multiField", English.intToEnglish(i) + " " + English.intToEnglish(i), Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("field", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("multiField", English.intToEnglish(i) + " " + English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.optimize();
Index: src/test/org/apache/lucene/search/payloads/PayloadHelper.java
===================================================================
--- src/test/org/apache/lucene/search/payloads/PayloadHelper.java (revision 692811)
+++ src/test/org/apache/lucene/search/payloads/PayloadHelper.java (working copy)
@@ -89,9 +89,9 @@
//writer.infoStream = System.out;
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
- doc.add(new Field(FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.TOKENIZED));
- doc.add(new Field(MULTI_FIELD, English.intToEnglish(i) + " " + English.intToEnglish(i), Field.Store.YES, Field.Index.TOKENIZED));
- doc.add(new Field(NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field(FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field(MULTI_FIELD, English.intToEnglish(i) + " " + English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field(NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
//writer.optimize();
Index: src/test/org/apache/lucene/search/TestBooleanScorer.java
===================================================================
--- src/test/org/apache/lucene/search/TestBooleanScorer.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestBooleanScorer.java (working copy)
@@ -50,7 +50,7 @@
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < values.length; i++) {
Document doc = new Document();
- doc.add(new Field(FIELD, values[i], Field.Store.YES, Field.Index.UN_TOKENIZED));
+ doc.add(new Field(FIELD, values[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
writer.close();
Index: src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (revision 692811)
+++ src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (working copy)
@@ -54,7 +54,7 @@
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
- doc.add(new Field(FIELD, docFields[i], Field.Store.NO, Field.Index.TOKENIZED));
+ doc.add(new Field(FIELD, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
Index: src/test/org/apache/lucene/search/spans/TestBasics.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestBasics.java (revision 692811)
+++ src/test/org/apache/lucene/search/spans/TestBasics.java (working copy)
@@ -55,7 +55,7 @@
//writer.infoStream = System.out;
for (int i = 0; i < 1000; i++) {
Document doc = new Document();
- doc.add(new Field("field", English.intToEnglish(i), Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("field", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
Index: src/test/org/apache/lucene/search/spans/TestSpans.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestSpans.java (revision 692811)
+++ src/test/org/apache/lucene/search/spans/TestSpans.java (working copy)
@@ -41,7 +41,7 @@
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
- doc.add(new Field(field, docFields[i], Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field(field, docFields[i], Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
Index: src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java (revision 692811)
+++ src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java (working copy)
@@ -83,8 +83,8 @@
protected void addDocument(final IndexWriter writer, final String id, final String text) throws IOException {
final Document document = new Document();
- document.add(new Field(FIELD_ID, id, Field.Store.YES, Field.Index.UN_TOKENIZED));
- document.add(new Field(FIELD_TEXT, text, Field.Store.YES, Field.Index.TOKENIZED));
+ document.add(new Field(FIELD_ID, id, Field.Store.YES, Field.Index.NOT_ANALYZED));
+ document.add(new Field(FIELD_TEXT, text, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(document);
}
Index: src/test/org/apache/lucene/search/spans/TestPayloadSpans.java
===================================================================
--- src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (revision 692811)
+++ src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (working copy)
@@ -224,7 +224,7 @@
IndexWriter writer = new IndexWriter(directory, analyzer, true);
writer.setSimilarity(similarity);
Document doc = new Document();
- doc.add(new Field(PayloadHelper.FIELD,"xx rr yy mm pp", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field(PayloadHelper.FIELD,"xx rr yy mm pp", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
@@ -292,7 +292,7 @@
for(int i = 0; i < docs.length; i++) {
doc = new Document();
String docText = docs[i];
- doc.add(new Field(PayloadHelper.FIELD,docText, Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field(PayloadHelper.FIELD,docText, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
Index: src/test/org/apache/lucene/search/TestMultiSearcher.java
===================================================================
--- src/test/org/apache/lucene/search/TestMultiSearcher.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestMultiSearcher.java (working copy)
@@ -65,23 +65,23 @@
// creating a document to store
Document lDoc = new Document();
- lDoc.add(new Field("fulltext", "Once upon a time.....", Field.Store.YES, Field.Index.TOKENIZED));
- lDoc.add(new Field("id", "doc1", Field.Store.YES, Field.Index.UN_TOKENIZED));
- lDoc.add(new Field("handle", "1", Field.Store.YES, Field.Index.UN_TOKENIZED));
+ lDoc.add(new Field("fulltext", "Once upon a time.....", Field.Store.YES, Field.Index.ANALYZED));
+ lDoc.add(new Field("id", "doc1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+ lDoc.add(new Field("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
// creating a document to store
Document lDoc2 = new Document();
lDoc2.add(new Field("fulltext", "in a galaxy far far away.....",
- Field.Store.YES, Field.Index.TOKENIZED));
- lDoc2.add(new Field("id", "doc2", Field.Store.YES, Field.Index.UN_TOKENIZED));
- lDoc2.add(new Field("handle", "1", Field.Store.YES, Field.Index.UN_TOKENIZED));
+ Field.Store.YES, Field.Index.ANALYZED));
+ lDoc2.add(new Field("id", "doc2", Field.Store.YES, Field.Index.NOT_ANALYZED));
+ lDoc2.add(new Field("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
// creating a document to store
Document lDoc3 = new Document();
lDoc3.add(new Field("fulltext", "a bizarre bug manifested itself....",
- Field.Store.YES, Field.Index.TOKENIZED));
- lDoc3.add(new Field("id", "doc3", Field.Store.YES, Field.Index.UN_TOKENIZED));
- lDoc3.add(new Field("handle", "1", Field.Store.YES, Field.Index.UN_TOKENIZED));
+ Field.Store.YES, Field.Index.ANALYZED));
+ lDoc3.add(new Field("id", "doc3", Field.Store.YES, Field.Index.NOT_ANALYZED));
+ lDoc3.add(new Field("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
// creating an index writer for the first index
IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
@@ -204,10 +204,10 @@
private static Document createDocument(String contents1, String contents2) {
Document document=new Document();
- document.add(new Field("contents", contents1, Field.Store.YES, Field.Index.UN_TOKENIZED));
- document.add(new Field("other", "other contents", Field.Store.YES, Field.Index.UN_TOKENIZED));
+ document.add(new Field("contents", contents1, Field.Store.YES, Field.Index.NOT_ANALYZED));
+ document.add(new Field("other", "other contents", Field.Store.YES, Field.Index.NOT_ANALYZED));
if (contents2!=null) {
- document.add(new Field("contents", contents2, Field.Store.YES, Field.Index.UN_TOKENIZED));
+ document.add(new Field("contents", contents2, Field.Store.YES, Field.Index.NOT_ANALYZED));
}
return document;
Index: src/test/org/apache/lucene/search/TestTimeLimitedCollector.java
===================================================================
--- src/test/org/apache/lucene/search/TestTimeLimitedCollector.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestTimeLimitedCollector.java (working copy)
@@ -96,7 +96,7 @@
private void add(String value, IndexWriter iw) throws IOException {
Document d = new Document();
- d.add(new Field(FIELD_NAME, value, Field.Store.NO, Field.Index.TOKENIZED));
+ d.add(new Field(FIELD_NAME, value, Field.Store.NO, Field.Index.ANALYZED));
iw.addDocument(d);
}
Index: src/test/org/apache/lucene/search/TestTermVectors.java
===================================================================
--- src/test/org/apache/lucene/search/TestTermVectors.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestTermVectors.java (working copy)
@@ -63,7 +63,7 @@
termVector = Field.TermVector.YES;
}
doc.add(new Field("field", English.intToEnglish(i),
- Field.Store.YES, Field.Index.TOKENIZED, termVector));
+ Field.Store.YES, Field.Index.ANALYZED, termVector));
writer.addDocument(doc);
}
writer.close();
@@ -95,10 +95,10 @@
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
- doc.add(new Field("c", "some content here", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- doc.add(new Field("a", "some content here", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- doc.add(new Field("b", "some content here", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- doc.add(new Field("x", "some content here", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ doc.add(new Field("c", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ doc.add(new Field("a", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ doc.add(new Field("b", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ doc.add(new Field("x", "some content here", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
IndexReader reader = IndexReader.open(dir);
@@ -342,9 +342,9 @@
private void setupDoc(Document doc, String text)
{
doc.add(new Field("field2", text, Field.Store.YES,
- Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("field", text, Field.Store.YES,
- Field.Index.TOKENIZED, Field.TermVector.YES));
+ Field.Index.ANALYZED, Field.TermVector.YES));
//System.out.println("Document: " + doc);
}
@@ -355,13 +355,13 @@
for(int i=0;i<100;i++) {
Document doc = new Document();
doc.add(new Field("field", English.intToEnglish(i),
- Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.NO));
+ Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
writer.addDocument(doc);
}
for(int i=0;i<10;i++) {
Document doc = new Document();
doc.add(new Field("field", English.intToEnglish(100+i),
- Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
}
@@ -386,15 +386,15 @@
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
doc.add(new Field("field", "one",
- Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.NO));
+ Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO));
doc.add(new Field("field", "one",
- Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.YES));
+ Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES));
doc.add(new Field("field", "one",
- Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS));
+ Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
doc.add(new Field("field", "one",
- Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_OFFSETS));
+ Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS));
doc.add(new Field("field", "one",
- Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
Index: src/test/org/apache/lucene/search/TestTermScorer.java
===================================================================
--- src/test/org/apache/lucene/search/TestTermScorer.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestTermScorer.java (working copy)
@@ -55,7 +55,7 @@
for (int i = 0; i < values.length; i++)
{
Document doc = new Document();
- doc.add(new Field(FIELD, values[i], Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field(FIELD, values[i], Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
Index: src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java
===================================================================
--- src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java (working copy)
@@ -57,11 +57,11 @@
Document doc3 = new Document();
Document doc4 = new Document();
Document doc5 = new Document();
- doc1.add(new Field("body", "blueberry pie", Field.Store.YES, Field.Index.TOKENIZED));
- doc2.add(new Field("body", "blueberry strudel", Field.Store.YES, Field.Index.TOKENIZED));
- doc3.add(new Field("body", "blueberry pizza", Field.Store.YES, Field.Index.TOKENIZED));
- doc4.add(new Field("body", "blueberry chewing gum", Field.Store.YES, Field.Index.TOKENIZED));
- doc5.add(new Field("body", "piccadilly circus", Field.Store.YES, Field.Index.TOKENIZED));
+ doc1.add(new Field("body", "blueberry pie", Field.Store.YES, Field.Index.ANALYZED));
+ doc2.add(new Field("body", "blueberry strudel", Field.Store.YES, Field.Index.ANALYZED));
+ doc3.add(new Field("body", "blueberry pizza", Field.Store.YES, Field.Index.ANALYZED));
+ doc4.add(new Field("body", "blueberry chewing gum", Field.Store.YES, Field.Index.ANALYZED));
+ doc5.add(new Field("body", "piccadilly circus", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc1);
writer.addDocument(doc2);
writer.addDocument(doc3);
Index: src/test/org/apache/lucene/search/TestSetNorm.java
===================================================================
--- src/test/org/apache/lucene/search/TestSetNorm.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestSetNorm.java (working copy)
@@ -40,7 +40,7 @@
IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
// add the same document four times
- Fieldable f1 = new Field("field", "word", Field.Store.YES, Field.Index.TOKENIZED);
+ Fieldable f1 = new Field("field", "word", Field.Store.YES, Field.Index.ANALYZED);
Document d1 = new Document();
d1.add(f1);
writer.addDocument(d1);
Index: src/test/org/apache/lucene/search/TestWildcard.java
===================================================================
--- src/test/org/apache/lucene/search/TestWildcard.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestWildcard.java (working copy)
@@ -142,7 +142,7 @@
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < contents.length; ++i) {
Document doc = new Document();
- doc.add(new Field(field, contents[i], Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field(field, contents[i], Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.optimize();
@@ -200,7 +200,7 @@
IndexWriter iw = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < docs.length; i++) {
Document doc = new Document();
- doc.add(new Field(field,docs[i],Store.NO,Index.TOKENIZED));
+ doc.add(new Field(field,docs[i],Store.NO,Index.ANALYZED));
iw.addDocument(doc);
}
iw.close();
Index: src/test/org/apache/lucene/search/BaseTestRangeFilter.java
===================================================================
--- src/test/org/apache/lucene/search/BaseTestRangeFilter.java (revision 692811)
+++ src/test/org/apache/lucene/search/BaseTestRangeFilter.java (working copy)
@@ -80,7 +80,7 @@
for (int d = minId; d <= maxId; d++) {
Document doc = new Document();
- doc.add(new Field("id",pad(d), Field.Store.YES, Field.Index.UN_TOKENIZED));
+ doc.add(new Field("id",pad(d), Field.Store.YES, Field.Index.NOT_ANALYZED));
int r= rand.nextInt();
if (maxR < r) {
maxR = r;
@@ -88,8 +88,8 @@
if (r < minR) {
minR = r;
}
- doc.add(new Field("rand",pad(r), Field.Store.YES, Field.Index.UN_TOKENIZED));
- doc.add(new Field("body","body", Field.Store.YES, Field.Index.UN_TOKENIZED));
+ doc.add(new Field("rand",pad(r), Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(new Field("body","body", Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
}
Index: src/test/org/apache/lucene/search/TestBoolean2.java
===================================================================
--- src/test/org/apache/lucene/search/TestBoolean2.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestBoolean2.java (working copy)
@@ -49,7 +49,7 @@
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
- doc.add(new Field(field, docFields[i], Field.Store.NO, Field.Index.TOKENIZED));
+ doc.add(new Field(field, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
Index: src/test/org/apache/lucene/search/function/FunctionTestSetup.java
===================================================================
--- src/test/org/apache/lucene/search/function/FunctionTestSetup.java (revision 692811)
+++ src/test/org/apache/lucene/search/function/FunctionTestSetup.java (working copy)
@@ -111,19 +111,19 @@
Fieldable f;
int scoreAndID = i+1;
- f = new Field(ID_FIELD,id2String(scoreAndID),Field.Store.YES,Field.Index.UN_TOKENIZED); // for debug purposes
+ f = new Field(ID_FIELD,id2String(scoreAndID),Field.Store.YES,Field.Index.NOT_ANALYZED); // for debug purposes
f.setOmitNorms(true);
d.add(f);
- f = new Field(TEXT_FIELD,"text of doc"+scoreAndID+textLine(i),Field.Store.NO,Field.Index.TOKENIZED); // for regular search
+ f = new Field(TEXT_FIELD,"text of doc"+scoreAndID+textLine(i),Field.Store.NO,Field.Index.ANALYZED); // for regular search
f.setOmitNorms(true);
d.add(f);
- f = new Field(INT_FIELD,""+scoreAndID,Field.Store.NO,Field.Index.UN_TOKENIZED); // for function scoring
+ f = new Field(INT_FIELD,""+scoreAndID,Field.Store.NO,Field.Index.NOT_ANALYZED); // for function scoring
f.setOmitNorms(true);
d.add(f);
- f = new Field(FLOAT_FIELD,scoreAndID+".000",Field.Store.NO,Field.Index.UN_TOKENIZED); // for function scoring
+ f = new Field(FLOAT_FIELD,scoreAndID+".000",Field.Store.NO,Field.Index.NOT_ANALYZED); // for function scoring
f.setOmitNorms(true);
d.add(f);
Index: src/test/org/apache/lucene/search/TestExplanations.java
===================================================================
--- src/test/org/apache/lucene/search/TestExplanations.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestExplanations.java (working copy)
@@ -69,7 +69,7 @@
IndexWriter.MaxFieldLength.LIMITED);
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
- doc.add(new Field(FIELD, docFields[i], Field.Store.NO, Field.Index.TOKENIZED));
+ doc.add(new Field(FIELD, docFields[i], Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
Index: src/test/org/apache/lucene/search/TestRemoteSearchable.java
===================================================================
--- src/test/org/apache/lucene/search/TestRemoteSearchable.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestRemoteSearchable.java (working copy)
@@ -56,8 +56,8 @@
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore,new SimpleAnalyzer(),true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
- doc.add(new Field("test", "test text", Field.Store.YES, Field.Index.TOKENIZED));
- doc.add(new Field("other", "other test text", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("test", "test text", Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("other", "other test text", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();
writer.close();
Index: src/test/org/apache/lucene/search/TestThreadSafe.java
===================================================================
--- src/test/org/apache/lucene/search/TestThreadSafe.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestThreadSafe.java (working copy)
@@ -120,7 +120,7 @@
while (sb.length() < flen) sb.append(' ').append(words[r.nextInt(words.length)]);
sb.append(" $");
Field.Store store = Field.Store.YES; // make random later
- Field.Index index = Field.Index.TOKENIZED; // make random later
+ Field.Index index = Field.Index.ANALYZED; // make random later
d.add(new Field("f"+i, sb.toString(), store, index));
}
iw.addDocument(d);
Index: src/test/org/apache/lucene/search/TestScorerPerf.java
===================================================================
--- src/test/org/apache/lucene/search/TestScorerPerf.java (revision 692811)
+++ src/test/org/apache/lucene/search/TestScorerPerf.java (working copy)
@@ -69,7 +69,7 @@
Document d = new Document();
for (int j=0; j0) {
int k = i-1;
@@ -884,11 +884,11 @@
Document doc = new Document();
sb.append("a");
sb.append(n);
- doc.add(new Field("field1", sb.toString(), Store.YES, Index.TOKENIZED));
+ doc.add(new Field("field1", sb.toString(), Store.YES, Index.ANALYZED));
sb.append(" b");
sb.append(n);
for (int i = 1; i < numFields; i++) {
- doc.add(new Field("field" + (i+1), sb.toString(), Store.YES, Index.TOKENIZED));
+ doc.add(new Field("field" + (i+1), sb.toString(), Store.YES, Index.ANALYZED));
}
return doc;
}
Index: src/test/org/apache/lucene/index/TestSegmentTermDocs.java
===================================================================
--- src/test/org/apache/lucene/index/TestSegmentTermDocs.java (revision 692811)
+++ src/test/org/apache/lucene/index/TestSegmentTermDocs.java (working copy)
@@ -258,7 +258,7 @@
private void addDoc(IndexWriter writer, String value) throws IOException
{
Document doc = new Document();
- doc.add(new Field("content", value, Field.Store.NO, Field.Index.TOKENIZED));
+ doc.add(new Field("content", value, Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
Index: src/test/org/apache/lucene/index/TestIndexWriter.java
===================================================================
--- src/test/org/apache/lucene/index/TestIndexWriter.java (revision 692811)
+++ src/test/org/apache/lucene/index/TestIndexWriter.java (working copy)
@@ -129,15 +129,15 @@
private static void addDoc(IndexWriter writer) throws IOException
{
Document doc = new Document();
- doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.TOKENIZED));
+ doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
}
private void addDocWithIndex(IndexWriter writer, int index) throws IOException
{
Document doc = new Document();
- doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.TOKENIZED));
- doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("id", "" + index, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
@@ -566,12 +566,12 @@
// Max length term is 16383, so this contents produces
// a too-long term:
String contents = "abc xyz x" + bigTerm + " another term";
- doc.add(new Field("content", contents, Field.Store.NO, Field.Index.TOKENIZED));
+ doc.add(new Field("content", contents, Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
- doc.add(new Field("content", "abc bbb ccc", Field.Store.NO, Field.Index.TOKENIZED));
+ doc.add(new Field("content", "abc bbb ccc", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
@@ -599,7 +599,7 @@
// Make sure we can add a document with exactly the
// maximum length term, and search on that term:
doc = new Document();
- doc.add(new Field("content", bigTerm, Field.Store.NO, Field.Index.TOKENIZED));
+ doc.add(new Field("content", bigTerm, Field.Store.NO, Field.Index.ANALYZED));
StandardAnalyzer sa = new StandardAnalyzer();
sa.setMaxTokenLength(100000);
writer = new IndexWriter(dir, sa, IndexWriter.MaxFieldLength.LIMITED);
@@ -617,7 +617,7 @@
MockRAMDirectory dir = new MockRAMDirectory();
final Document doc = new Document();
- doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
for(int numDocs=38;numDocs<500;numDocs += 38) {
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
@@ -655,7 +655,7 @@
MockRAMDirectory dir = new MockRAMDirectory();
final Document doc = new Document();
- doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
LogDocMergePolicy ldmp = new LogDocMergePolicy();
@@ -1245,12 +1245,12 @@
writer.setMaxBufferedDocs(10);
for(int j=0;j<100;j++) {
Document doc = new Document();
- doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
- doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
- doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
- doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.TOKENIZED));
- doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.TOKENIZED));
- doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
@@ -1277,7 +1277,7 @@
int lastNumFile = dir.list().length;
for(int j=0;j<9;j++) {
Document doc = new Document();
- doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
int numFile = dir.list().length;
// Verify that with a tiny RAM buffer we see new
@@ -1300,7 +1300,7 @@
int lastFlushCount = -1;
for(int j=1;j<52;j++) {
Document doc = new Document();
- doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
_TestUtil.syncConcurrentMerges(writer);
int flushCount = writer.getFlushCount();
@@ -1354,7 +1354,7 @@
for(int j=1;j<52;j++) {
Document doc = new Document();
- doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
@@ -1416,7 +1416,7 @@
for(int j=0;j<100;j++) {
Document doc = new Document();
for(int k=0;k<100;k++) {
- doc.add(new Field("field", Integer.toString(rand.nextInt()), Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("field", Integer.toString(rand.nextInt()), Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(doc);
}
@@ -1425,7 +1425,7 @@
// occurs (heavy on byte blocks)
for(int j=0;j<100;j++) {
Document doc = new Document();
- doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
@@ -1440,7 +1440,7 @@
String longTerm = b.toString();
Document doc = new Document();
- doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("field", longTerm, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
}
@@ -1461,7 +1461,7 @@
// Enable norms for only 1 doc, pre flush
for(int j=0;j<10;j++) {
Document doc = new Document();
- Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED);
+ Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 8) {
f.setOmitNorms(true);
}
@@ -1482,7 +1482,7 @@
// Enable norms for only 1 doc, post flush
for(int j=0;j<27;j++) {
Document doc = new Document();
- Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED);
+ Field f = new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED);
if (j != 26) {
f.setOmitNorms(true);
}
@@ -1514,7 +1514,7 @@
b.append(" a a a a a a a a");
}
Document doc = new Document();
- doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ doc.add(new Field("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.close();
@@ -1571,7 +1571,7 @@
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMaxBufferedDocs(2);
Document doc = new Document();
- doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<19;i++)
writer.addDocument(doc);
writer.flush(false, true, true);
@@ -1589,7 +1589,7 @@
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
- doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.flush();
writer.addDocument(new Document());
@@ -1609,7 +1609,7 @@
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMergeScheduler(new ConcurrentMergeScheduler());
Document doc = new Document();
- doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ doc.add(new Field("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.setMaxBufferedDocs(2);
writer.setMergeFactor(101);
for(int i=0;i<200;i++)
@@ -1663,7 +1663,7 @@
IndexWriter ir = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document document = new Document();
- document.add(new Field("tvtest", "", Field.Store.NO, Field.Index.TOKENIZED,
+ document.add(new Field("tvtest", "", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
ir.addDocument(document);
ir.close();
@@ -1675,17 +1675,17 @@
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document document = new Document();
- document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
+ document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
document = new Document();
- document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.TOKENIZED,
+ document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
iw.flush();
- document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
+ document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
@@ -1701,13 +1701,13 @@
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document document = new Document();
- document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
+ document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
iw.flush();
document = new Document();
- document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.TOKENIZED,
+ document.add(new Field("tvtest", "x y z", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.NO));
iw.addDocument(document);
// Make first segment
@@ -1715,7 +1715,7 @@
iw.optimize();
- document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
+ document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.addDocument(document);
// Make 2nd segment
@@ -1733,7 +1733,7 @@
MockRAMDirectory dir = new MockRAMDirectory();
IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
Document document = new Document();
- document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
+ document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
iw.setMaxBufferedDocs(2);
iw.setMergeFactor(2);
@@ -1775,7 +1775,7 @@
iw.setMaxBufferedDocs(2);
iw.setMergeFactor(2);
Document document = new Document();
- document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.TOKENIZED,
+ document.add(new Field("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
Field.TermVector.YES));
for(int i=0;i<177;i++)
iw.addDocument(document);
@@ -1805,7 +1805,7 @@
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
- Field.Index.TOKENIZED));
+ Field.Index.ANALYZED));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
@@ -1815,13 +1815,13 @@
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
- Field.Index.TOKENIZED));
+ Field.Index.ANALYZED));
writer.addDocument(doc);
// Make sure we can add another normal document
doc = new Document();
doc.add(new Field("content", "aa bb cc dd", Field.Store.NO,
- Field.Index.TOKENIZED));
+ Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
@@ -1887,7 +1887,7 @@
Document doc = new Document();
String contents = "aa bb cc dd ee ff gg hh ii jj kk";
doc.add(new Field("content", contents, Field.Store.NO,
- Field.Index.TOKENIZED));
+ Field.Index.ANALYZED));
boolean hitError = false;
for(int i=0;i<200;i++) {
try {
@@ -1939,13 +1939,13 @@
//writer.setInfoStream(System.out);
Document doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
- Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
doc.add(new Field("crash", "this should crash after 4 terms", Field.Store.YES,
- Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
doc.add(new Field("other", "this will not get indexed", Field.Store.YES,
- Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
try {
writer.addDocument(doc);
fail("did not hit expected exception");
@@ -1955,7 +1955,7 @@
if (0 == i) {
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
- Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
writer.addDocument(doc);
writer.addDocument(doc);
}
@@ -1982,7 +1982,7 @@
writer.setMaxBufferedDocs(10);
doc = new Document();
doc.add(new Field("contents", "here are some contents", Field.Store.YES,
- Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int j=0;j<17;j++)
writer.addDocument(doc);
writer.optimize();
@@ -2034,13 +2034,13 @@
for(int iter=0;iterdocument.add (new Field ("byNumber", Integer.toString(x), Field.Store.NO, Field.Index.UN_TOKENIZED));
+ * document.add (new Field ("byNumber", Integer.toString(x), Field.Store.NO, Field.Index.NOT_ANALYZED));
*
*
* Valid Types of Values
Index: src/java/org/apache/lucene/index/FieldsReader.java
===================================================================
--- src/java/org/apache/lucene/index/FieldsReader.java (revision 692811)
+++ src/java/org/apache/lucene/index/FieldsReader.java (working copy)
@@ -405,9 +405,9 @@
private Field.Index getIndexType(FieldInfo fi, boolean tokenize) {
Field.Index index;
if (fi.isIndexed && tokenize)
- index = Field.Index.TOKENIZED;
+ index = Field.Index.ANALYZED;
else if (fi.isIndexed && !tokenize)
- index = Field.Index.UN_TOKENIZED;
+ index = Field.Index.NOT_ANALYZED;
else
index = Field.Index.NO;
return index;
Index: src/java/org/apache/lucene/index/IndexModifier.java
===================================================================
--- src/java/org/apache/lucene/index/IndexModifier.java (revision 692811)
+++ src/java/org/apache/lucene/index/IndexModifier.java (working copy)
@@ -57,8 +57,8 @@
// create an index in /tmp/index, overwriting an existing one:
IndexModifier indexModifier = new IndexModifier("/tmp/index", analyzer, true);
Document doc = new Document();
- doc.add(new Field("id", "1", Field.Store.YES, Field.Index.UN_TOKENIZED));
- doc.add(new Field("body", "a simple test", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("id", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(new Field("body", "a simple test", Field.Store.YES, Field.Index.ANALYZED));
indexModifier.addDocument(doc);
int deleted = indexModifier.delete(new Term("id", "1"));
System.out.println("Deleted " + deleted + " document");
@@ -593,8 +593,8 @@
// create an index in /tmp/index, overwriting an existing one:
IndexModifier indexModifier = new IndexModifier("/tmp/index", analyzer, true);
Document doc = new Document();
- doc.add(new Fieldable("id", "1", Fieldable.Store.YES, Fieldable.Index.UN_TOKENIZED));
- doc.add(new Fieldable("body", "a simple test", Fieldable.Store.YES, Fieldable.Index.TOKENIZED));
+ doc.add(new Fieldable("id", "1", Fieldable.Store.YES, Fieldable.Index.NOT_ANALYZED));
+ doc.add(new Fieldable("body", "a simple test", Fieldable.Store.YES, Fieldable.Index.ANALYZED));
indexModifier.addDocument(doc);
int deleted = indexModifier.delete(new Term("id", "1"));
System.out.println("Deleted " + deleted + " document");
Index: src/java/org/apache/lucene/document/Field.java
===================================================================
--- src/java/org/apache/lucene/document/Field.java (revision 692811)
+++ src/java/org/apache/lucene/document/Field.java (working copy)
@@ -69,30 +69,49 @@
* {@link Field.Store stored}. */
public static final Index NO = new Index("NO");
- /** Index the field's value so it can be searched. An Analyzer will be used
- * to tokenize and possibly further normalize the text before its
- * terms will be stored in the index. This is useful for common text.
- */
- public static final Index TOKENIZED = new Index("TOKENIZED");
+ /** Index the tokens produced by running the field's
+ * value through an Analyzer. This is useful for
+ * common text. */
+ public static final Index ANALYZED = new Index("ANALYZED");
+ /** @deprecated this has been renamed to {@link #ANALYZED} */
+ public static final Index TOKENIZED = ANALYZED;
+
/** Index the field's value without using an Analyzer, so it can be searched.
* As no analyzer is used the value will be stored as a single term. This is
* useful for unique Ids like product numbers.
*/
- public static final Index UN_TOKENIZED = new Index("UN_TOKENIZED");
+ public static final Index NOT_ANALYZED = new Index("NOT_ANALYZED");
- /** Index the field's value without an Analyzer, and disable
- * the storing of norms. No norms means that index-time boosting
- * and field length normalization will be disabled. The benefit is
- * less memory usage as norms take up one byte per indexed field
- * for every document in the index.
- * Note that once you index a given field with norms enabled,
- * disabling norms will have no effect. In other words, for NO_NORMS
- * to have the above described effect on a field, all instances of that
- * field must be indexed with NO_NORMS from the beginning.
- */
- public static final Index NO_NORMS = new Index("NO_NORMS");
+ /** @deprecated This has been renamed to {@link #NOT_ANALYZED} */
+ public static final Index UN_TOKENIZED = NOT_ANALYZED;
+ /** Expert: Index the field's value without an Analyzer,
+ * and also disable the storing of norms. Note that you
+ * can also separately enable/disable norms by calling
+ * {@link #setOmitNorms}. No norms means that
+ * index-time field and document boosting and field
+ * length normalization are disabled. The benefit is
+ * less memory usage as norms take up one byte of RAM
+ * per indexed field for every document in the index,
+ * during searching. Note that once you index a given
+ * field with norms enabled, disabling norms will
+ * have no effect. In other words, for this to have the
+ * above described effect on a field, all instances of
+ * that field must be indexed with NOT_ANALYZED_NO_NORMS
+ * from the beginning. */
+ public static final Index NOT_ANALYZED_NO_NORMS = new Index("NOT_ANALYZED_NO_NORMS");
+
+ /** @deprecated This has been renamed to
+ * {@link #NOT_ANALYZED_NO_NORMS} */
+ public static final Index NO_NORMS = NOT_ANALYZED_NO_NORMS;
+
+ /** Expert: Index the tokens produced by running the
+ * field's value through an Analyzer, and also
+ * separately disable the storing of norms. See
+ * {@link #NOT_ANALYZED_NO_NORMS} for what norms are
+ * and why you may want to disable them. */
+ public static final Index ANALYZED_NO_NORMS = new Index("ANALYZED_NO_NORMS");
}
/** Specifies whether and how a field should have term vectors. */
@@ -284,16 +303,20 @@
if (index == Index.NO) {
this.isIndexed = false;
this.isTokenized = false;
- } else if (index == Index.TOKENIZED) {
+ } else if (index == Index.ANALYZED) {
this.isIndexed = true;
this.isTokenized = true;
- } else if (index == Index.UN_TOKENIZED) {
+ } else if (index == Index.NOT_ANALYZED) {
this.isIndexed = true;
this.isTokenized = false;
- } else if (index == Index.NO_NORMS) {
+ } else if (index == Index.NOT_ANALYZED_NO_NORMS) {
this.isIndexed = true;
this.isTokenized = false;
this.omitNorms = true;
+ } else if (index == Index.ANALYZED_NO_NORMS) {
+ this.isIndexed = true;
+ this.isTokenized = true;
+ this.omitNorms = true;
} else {
throw new IllegalArgumentException("unknown index parameter " + index);
}
Index: src/java/org/apache/lucene/document/AbstractField.java
===================================================================
--- src/java/org/apache/lucene/document/AbstractField.java (revision 692811)
+++ src/java/org/apache/lucene/document/AbstractField.java (working copy)
@@ -68,16 +68,20 @@
if (index == Field.Index.NO) {
this.isIndexed = false;
this.isTokenized = false;
- } else if (index == Field.Index.TOKENIZED) {
+ } else if (index == Field.Index.ANALYZED) {
this.isIndexed = true;
this.isTokenized = true;
- } else if (index == Field.Index.UN_TOKENIZED) {
+ } else if (index == Field.Index.NOT_ANALYZED) {
this.isIndexed = true;
this.isTokenized = false;
- } else if (index == Field.Index.NO_NORMS) {
+ } else if (index == Field.Index.NOT_ANALYZED_NO_NORMS) {
this.isIndexed = true;
this.isTokenized = false;
this.omitNorms = true;
+ } else if (index == Field.Index.ANALYZED_NO_NORMS) {
+ this.isIndexed = true;
+ this.isTokenized = true;
+ this.omitNorms = true;
} else {
throw new IllegalArgumentException("unknown index parameter " + index);
}
Index: src/demo/org/apache/lucene/demo/HTMLDocument.java
===================================================================
--- src/demo/org/apache/lucene/demo/HTMLDocument.java (revision 692811)
+++ src/demo/org/apache/lucene/demo/HTMLDocument.java (working copy)
@@ -49,19 +49,19 @@
// Add the url as a field named "path". Use a field that is
// indexed (i.e. searchable), but don't tokenize the field into words.
doc.add(new Field("path", f.getPath().replace(dirSep, '/'), Field.Store.YES,
- Field.Index.UN_TOKENIZED));
+ Field.Index.NOT_ANALYZED));
// Add the last modified date of the file a field named "modified".
// Use a field that is indexed (i.e. searchable), but don't tokenize
// the field into words.
doc.add(new Field("modified",
DateTools.timeToString(f.lastModified(), DateTools.Resolution.MINUTE),
- Field.Store.YES, Field.Index.UN_TOKENIZED));
+ Field.Store.YES, Field.Index.NOT_ANALYZED));
// Add the uid as a field, so that index can be incrementally maintained.
// This field is not stored with document, it is indexed, but it is not
// tokenized prior to indexing.
- doc.add(new Field("uid", uid(f), Field.Store.NO, Field.Index.UN_TOKENIZED));
+ doc.add(new Field("uid", uid(f), Field.Store.NO, Field.Index.NOT_ANALYZED));
FileInputStream fis = new FileInputStream(f);
HTMLParser parser = new HTMLParser(fis);
@@ -75,7 +75,7 @@
doc.add(new Field("summary", parser.getSummary(), Field.Store.YES, Field.Index.NO));
// Add the title as a field that it can be searched and that is stored.
- doc.add(new Field("title", parser.getTitle(), Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("title", parser.getTitle(), Field.Store.YES, Field.Index.ANALYZED));
// return the document
return doc;
Index: src/demo/org/apache/lucene/demo/FileDocument.java
===================================================================
--- src/demo/org/apache/lucene/demo/FileDocument.java (revision 692811)
+++ src/demo/org/apache/lucene/demo/FileDocument.java (working copy)
@@ -47,14 +47,14 @@
// Add the path of the file as a field named "path". Use a field that is
// indexed (i.e. searchable), but don't tokenize the field into words.
- doc.add(new Field("path", f.getPath(), Field.Store.YES, Field.Index.UN_TOKENIZED));
+ doc.add(new Field("path", f.getPath(), Field.Store.YES, Field.Index.NOT_ANALYZED));
// Add the last modified date of the file a field named "modified". Use
// a field that is indexed (i.e. searchable), but don't tokenize the field
// into words.
doc.add(new Field("modified",
DateTools.timeToString(f.lastModified(), DateTools.Resolution.MINUTE),
- Field.Store.YES, Field.Index.UN_TOKENIZED));
+ Field.Store.YES, Field.Index.NOT_ANALYZED));
// Add the contents of the file to a field named "contents". Specify a Reader,
// so that the text of the file is tokenized and indexed, but not stored.
Index: contrib/surround/src/test/org/apache/lucene/queryParser/surround/query/SingleFieldTestDb.java
===================================================================
--- contrib/surround/src/test/org/apache/lucene/queryParser/surround/query/SingleFieldTestDb.java (revision 692811)
+++ contrib/surround/src/test/org/apache/lucene/queryParser/surround/query/SingleFieldTestDb.java (working copy)
@@ -40,7 +40,7 @@
IndexWriter.MaxFieldLength.LIMITED);
for (int j = 0; j < docs.length; j++) {
Document d = new Document();
- d.add(new Field(fieldName, docs[j], Field.Store.NO, Field.Index.TOKENIZED));
+ d.add(new Field(fieldName, docs[j], Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(d);
}
writer.close();
Index: contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java
===================================================================
--- contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java (revision 692811)
+++ contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java (working copy)
@@ -256,7 +256,7 @@
int n = index(word2Nums, num2Words, g, doc);
if (n > 0)
{
- doc.add( new Field( F_WORD, g, Field.Store.YES, Field.Index.UN_TOKENIZED));
+ doc.add( new Field( F_WORD, g, Field.Store.YES, Field.Index.NOT_ANALYZED));
if ((++row % mod) == 0)
{
o.println("\trow=" + row + "/" + word2Nums.size() + " doc= " + doc);
Index: contrib/ant/src/java/org/apache/lucene/ant/HtmlDocument.java
===================================================================
--- contrib/ant/src/java/org/apache/lucene/ant/HtmlDocument.java (revision 692811)
+++ contrib/ant/src/java/org/apache/lucene/ant/HtmlDocument.java (working copy)
@@ -95,8 +95,8 @@
org.apache.lucene.document.Document luceneDoc =
new org.apache.lucene.document.Document();
- luceneDoc.add(new Field("title", htmlDoc.getTitle(), Field.Store.YES, Field.Index.TOKENIZED));
- luceneDoc.add(new Field("contents", htmlDoc.getBody(), Field.Store.YES, Field.Index.TOKENIZED));
+ luceneDoc.add(new Field("title", htmlDoc.getTitle(), Field.Store.YES, Field.Index.ANALYZED));
+ luceneDoc.add(new Field("contents", htmlDoc.getBody(), Field.Store.YES, Field.Index.ANALYZED));
return luceneDoc;
}
@@ -119,8 +119,8 @@
org.apache.lucene.document.Document luceneDoc =
new org.apache.lucene.document.Document();
- luceneDoc.add(new Field("title", htmlDoc.getTitle(), Field.Store.YES, Field.Index.TOKENIZED));
- luceneDoc.add(new Field("contents", htmlDoc.getBody(), Field.Store.YES, Field.Index.TOKENIZED));
+ luceneDoc.add(new Field("title", htmlDoc.getTitle(), Field.Store.YES, Field.Index.ANALYZED));
+ luceneDoc.add(new Field("contents", htmlDoc.getBody(), Field.Store.YES, Field.Index.ANALYZED));
String contents = null;
BufferedReader br =
Index: contrib/ant/src/java/org/apache/lucene/ant/TextDocument.java
===================================================================
--- contrib/ant/src/java/org/apache/lucene/ant/TextDocument.java (revision 692811)
+++ contrib/ant/src/java/org/apache/lucene/ant/TextDocument.java (working copy)
@@ -79,8 +79,8 @@
// make a new, empty document
Document doc = new Document();
- doc.add(new Field("title", f.getName(), Field.Store.YES, Field.Index.TOKENIZED));
- doc.add(new Field("contents", textDoc.getContents(), Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("title", f.getName(), Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("contents", textDoc.getContents(), Field.Store.YES, Field.Index.ANALYZED));
doc.add(new Field("rawcontents", textDoc.getContents(), Field.Store.YES, Field.Index.NO));
// return the document
Index: contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java
===================================================================
--- contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java (revision 692811)
+++ contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java (working copy)
@@ -330,12 +330,12 @@
} else {
// Add the path of the file as a field named "path". Use a Keyword field, so
// that the index stores the path, and so that the path is searchable
- doc.add(new Field("path", file.getPath(), Field.Store.YES, Field.Index.UN_TOKENIZED));
+ doc.add(new Field("path", file.getPath(), Field.Store.YES, Field.Index.NOT_ANALYZED));
// Add the last modified date of the file a field named "modified". Use a
// Keyword field, so that it's searchable, but so that no attempt is made
// to tokenize the field into words.
- doc.add(new Field("modified", DateTools.timeToString(file.lastModified(), DateTools.Resolution.MILLISECOND), Field.Store.YES, Field.Index.UN_TOKENIZED));
+ doc.add(new Field("modified", DateTools.timeToString(file.lastModified(), DateTools.Resolution.MILLISECOND), Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
totalIndexed++;
Index: contrib/swing/src/java/org/apache/lucene/swing/models/TableSearcher.java
===================================================================
--- contrib/swing/src/java/org/apache/lucene/swing/models/TableSearcher.java (revision 692811)
+++ contrib/swing/src/java/org/apache/lucene/swing/models/TableSearcher.java (working copy)
@@ -173,14 +173,14 @@
//this will allow us to retrive the results later
//and map this table model's row to a row in the decorated
//table model
- document.add(new Field(ROW_NUMBER, "" + row, Field.Store.YES, Field.Index.TOKENIZED));
+ document.add(new Field(ROW_NUMBER, "" + row, Field.Store.YES, Field.Index.ANALYZED));
//iterate through all columns
//index the value keyed by the column name
//NOTE: there could be a problem with using column names with spaces
for (int column=0; column < tableModel.getColumnCount(); column++){
String columnName = tableModel.getColumnName(column);
String columnValue = String.valueOf(tableModel.getValueAt(row, column)).toLowerCase();
- document.add(new Field(columnName, columnValue, Field.Store.YES, Field.Index.TOKENIZED));
+ document.add(new Field(columnName, columnValue, Field.Store.YES, Field.Index.ANALYZED));
}
writer.addDocument(document);
}
Index: contrib/swing/src/java/org/apache/lucene/swing/models/ListSearcher.java
===================================================================
--- contrib/swing/src/java/org/apache/lucene/swing/models/ListSearcher.java (revision 692811)
+++ contrib/swing/src/java/org/apache/lucene/swing/models/ListSearcher.java (working copy)
@@ -122,9 +122,9 @@
//this will allow us to retrive the results later
//and map this list model's row to a row in the decorated
//list model
- document.add(new Field(ROW_NUMBER, "" + row, Field.Store.YES, Field.Index.TOKENIZED));
+ document.add(new Field(ROW_NUMBER, "" + row, Field.Store.YES, Field.Index.ANALYZED));
//add the string representation of the row to the index
- document.add(new Field(FIELD_NAME, String.valueOf(listModel.getElementAt(row)).toLowerCase(), Field.Store.YES, Field.Index.TOKENIZED));
+ document.add(new Field(FIELD_NAME, String.valueOf(listModel.getElementAt(row)).toLowerCase(), Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(document);
}
writer.optimize();
Index: contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java
===================================================================
--- contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (revision 692811)
+++ contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (working copy)
@@ -148,18 +148,18 @@
}
private void assembleDocument(Document document, int i) {
- document.add(new Field("a", i + " Do you really want to go and live in that house all winter?", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ document.add(new Field("a", i + " Do you really want to go and live in that house all winter?", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
if (i > 0) {
- document.add(new Field("b0", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- document.add(new Field("b1", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.NO_NORMS, Field.TermVector.NO));
- document.add(new Field("b2", i + " All work and no play makes Jack a dull boy", Field.Store.NO, Field.Index.UN_TOKENIZED, Field.TermVector.NO));
+ document.add(new Field("b0", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ document.add(new Field("b1", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO));
+ document.add(new Field("b2", i + " All work and no play makes Jack a dull boy", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.NO));
document.add(new Field("b3", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.NO, Field.TermVector.NO));
if (i > 1) {
- document.add(new Field("c", i + " Redrum redrum", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ document.add(new Field("c", i + " Redrum redrum", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
if (i > 2) {
- document.add(new Field("d", i + " Hello Danny, come and play with us... forever and ever. and ever.", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ document.add(new Field("d", i + " Hello Danny, come and play with us... forever and ever. and ever.", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
if (i > 3) {
- Field f = new Field("e", i + " Heres Johnny!", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+ Field f = new Field("e", i + " Heres Johnny!", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
f.setOmitNorms(true);
document.add(f);
if (i > 4) {
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/standard/StandardBenchmarker.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/standard/StandardBenchmarker.java (revision 692811)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/standard/StandardBenchmarker.java (working copy)
@@ -258,11 +258,11 @@
for (int i = 0; i < tags.length; i++)
{
doc.add(new Field("tag" + i, tags[i], stored == true ? Field.Store.YES : Field.Store.NO,
- tokenized == true ? Field.Index.TOKENIZED : Field.Index.UN_TOKENIZED, tfv == true ? Field.TermVector.YES : Field.TermVector.NO));
+ tokenized == true ? Field.Index.ANALYZED : Field.Index.NOT_ANALYZED, tfv == true ? Field.TermVector.YES : Field.TermVector.NO));
}
}
doc.add(new Field("file", in.getCanonicalPath(), stored == true ? Field.Store.YES : Field.Store.NO,
- tokenized == true ? Field.Index.TOKENIZED : Field.Index.UN_TOKENIZED, tfv == true ? Field.TermVector.YES : Field.TermVector.NO));
+ tokenized == true ? Field.Index.ANALYZED : Field.Index.NOT_ANALYZED, tfv == true ? Field.TermVector.YES : Field.TermVector.NO));
BufferedReader reader = new BufferedReader(new FileReader(in));
String line = null;
//First line is the date, 3rd is the title, rest is body
@@ -279,17 +279,17 @@
Date date = format.parse(dateStr.trim());
- doc.add(new Field("date", DateTools.dateToString(date, DateTools.Resolution.SECOND), Field.Store.YES, Field.Index.UN_TOKENIZED));
+ doc.add(new Field("date", DateTools.dateToString(date, DateTools.Resolution.SECOND), Field.Store.YES, Field.Index.NOT_ANALYZED));
if (title != null)
{
doc.add(new Field("title", title, stored == true ? Field.Store.YES : Field.Store.NO,
- tokenized == true ? Field.Index.TOKENIZED : Field.Index.UN_TOKENIZED, tfv == true ? Field.TermVector.YES : Field.TermVector.NO));
+ tokenized == true ? Field.Index.ANALYZED : Field.Index.NOT_ANALYZED, tfv == true ? Field.TermVector.YES : Field.TermVector.NO));
}
if (body.length() > 0)
{
doc.add(new Field("body", body.toString(), stored == true ? Field.Store.YES : Field.Store.NO,
- tokenized == true ? Field.Index.TOKENIZED : Field.Index.UN_TOKENIZED, tfv == true ? Field.TermVector.YES : Field.TermVector.NO));
+ tokenized == true ? Field.Index.ANALYZED : Field.Index.NOT_ANALYZED, tfv == true ? Field.TermVector.YES : Field.TermVector.NO));
}
return doc;
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/BasicDocMaker.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/BasicDocMaker.java (revision 692811)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/BasicDocMaker.java (working copy)
@@ -71,7 +71,7 @@
protected Config config;
protected Field.Store storeVal = Field.Store.NO;
- protected Field.Index indexVal = Field.Index.TOKENIZED;
+ protected Field.Index indexVal = Field.Index.ANALYZED;
protected Field.TermVector termVecVal = Field.TermVector.NO;
private synchronized int incrNumDocsCreated() {
@@ -196,7 +196,7 @@
boolean tokenized = config.get("doc.tokenized",true);
boolean termVec = config.get("doc.term.vector",false);
storeVal = (stored ? Field.Store.YES : Field.Store.NO);
- indexVal = (tokenized ? Field.Index.TOKENIZED : Field.Index.UN_TOKENIZED);
+ indexVal = (tokenized ? Field.Index.ANALYZED : Field.Index.NOT_ANALYZED);
boolean termVecPositions = config.get("doc.term.vector.positions",false);
boolean termVecOffsets = config.get("doc.term.vector.offsets",false);
if (termVecPositions && termVecOffsets)
Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LineDocMaker.java
===================================================================
--- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LineDocMaker.java (revision 692811)
+++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/LineDocMaker.java (working copy)
@@ -63,19 +63,19 @@
bodyField = new Field(BasicDocMaker.BODY_FIELD,
"",
storeVal,
- Field.Index.TOKENIZED,
+ Field.Index.ANALYZED,
termVecVal);
titleField = new Field(BasicDocMaker.TITLE_FIELD,
"",
storeVal,
- Field.Index.TOKENIZED,
+ Field.Index.ANALYZED,
termVecVal);
dateField = new Field(BasicDocMaker.DATE_FIELD,
"",
storeVal,
- Field.Index.TOKENIZED,
+ Field.Index.ANALYZED,
termVecVal);
- idField = new Field(BasicDocMaker.ID_FIELD, "", Field.Store.YES, Field.Index.NO_NORMS);
+ idField = new Field(BasicDocMaker.ID_FIELD, "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
doc = new Document();
doc.add(bodyField);
@@ -111,17 +111,17 @@
Field localTitleField = new Field(BasicDocMaker.TITLE_FIELD,
title,
storeVal,
- Field.Index.TOKENIZED,
+ Field.Index.ANALYZED,
termVecVal);
Field localBodyField = new Field(BasicDocMaker.BODY_FIELD,
body,
storeVal,
- Field.Index.TOKENIZED,
+ Field.Index.ANALYZED,
termVecVal);
Field localDateField = new Field(BasicDocMaker.BODY_FIELD,
date,
storeVal,
- Field.Index.TOKENIZED,
+ Field.Index.ANALYZED,
termVecVal);
Document localDoc = new Document();
localDoc.add(localBodyField);
Index: contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java
===================================================================
--- contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (revision 692811)
+++ contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (working copy)
@@ -70,8 +70,8 @@
String date=line.substring(0,endOfDate).trim();
String content=line.substring(endOfDate).trim();
org.apache.lucene.document.Document doc =new org.apache.lucene.document.Document();
- doc.add(new Field("date",date,Field.Store.YES,Field.Index.TOKENIZED));
- doc.add(new Field("contents",content,Field.Store.YES,Field.Index.TOKENIZED));
+ doc.add(new Field("date",date,Field.Store.YES,Field.Index.ANALYZED));
+ doc.add(new Field("contents",content,Field.Store.YES,Field.Index.ANALYZED));
writer.addDocument(doc);
line=d.readLine();
}
Index: contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java
===================================================================
--- contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java (revision 692811)
+++ contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java (working copy)
@@ -128,7 +128,7 @@
if(st.hasMoreTokens())
{
String value=st.nextToken().trim();
- result.add(new Field(name,value,Field.Store.YES,Field.Index.TOKENIZED));
+ result.add(new Field(name,value,Field.Store.YES,Field.Index.ANALYZED));
}
}
return result;
Index: contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
===================================================================
--- contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (revision 692811)
+++ contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (working copy)
@@ -1020,7 +1020,7 @@
RAMDirectory ramDir1 = new RAMDirectory();
IndexWriter writer1 = new IndexWriter(ramDir1, new StandardAnalyzer(), true);
Document d = new Document();
- Field f = new Field(FIELD_NAME, "multiOne", Field.Store.YES, Field.Index.TOKENIZED);
+ Field f = new Field(FIELD_NAME, "multiOne", Field.Store.YES, Field.Index.ANALYZED);
d.add(f);
writer1.addDocument(d);
writer1.optimize();
@@ -1031,7 +1031,7 @@
RAMDirectory ramDir2 = new RAMDirectory();
IndexWriter writer2 = new IndexWriter(ramDir2, new StandardAnalyzer(), true);
d = new Document();
- f = new Field(FIELD_NAME, "multiTwo", Field.Store.YES, Field.Index.TOKENIZED);
+ f = new Field(FIELD_NAME, "multiTwo", Field.Store.YES, Field.Index.ANALYZED);
d.add(f);
writer2.addDocument(d);
writer2.optimize();
@@ -1264,7 +1264,7 @@
private Document doc( String f, String v ){
Document doc = new Document();
- doc.add( new Field( f, v, Store.YES, Index.TOKENIZED ) );
+ doc.add( new Field( f, v, Store.YES, Index.ANALYZED ) );
return doc;
}
@@ -1394,7 +1394,7 @@
private void addDoc(IndexWriter writer, String text) throws IOException {
Document d = new Document();
- Field f = new Field(FIELD_NAME, text, Field.Store.YES, Field.Index.TOKENIZED);
+ Field f = new Field(FIELD_NAME, text, Field.Store.YES, Field.Index.ANALYZED);
d.add(f);
writer.addDocument(d);
Index: contrib/miscellaneous/src/test/org/apache/lucene/index/TestTermVectorAccessor.java
===================================================================
--- contrib/miscellaneous/src/test/org/apache/lucene/index/TestTermVectorAccessor.java (revision 692811)
+++ contrib/miscellaneous/src/test/org/apache/lucene/index/TestTermVectorAccessor.java (working copy)
@@ -34,33 +34,33 @@
Document doc;
doc = new Document();
- doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
iw.addDocument(doc);
doc = new Document();
- doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS));
- doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS));
- doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS));
+ doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
+ doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
+ doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
iw.addDocument(doc);
doc = new Document();
- doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.YES));
- doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.YES));
- doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.YES));
+ doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES));
+ doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES));
+ doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES));
iw.addDocument(doc);
doc = new Document();
- doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.NO));
- doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.NO));
- doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.NO));
+ doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO));
+ doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO));
+ doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO));
iw.addDocument(doc);
doc = new Document();
- doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
- doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.NO));
- doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.YES));
+ doc.add(new Field("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+ doc.add(new Field("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO));
+ doc.add(new Field("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES));
iw.addDocument(doc);
iw.close();
Index: contrib/miscellaneous/src/test/org/apache/lucene/index/TestFieldNormModifier.java
===================================================================
--- contrib/miscellaneous/src/test/org/apache/lucene/index/TestFieldNormModifier.java (revision 692811)
+++ contrib/miscellaneous/src/test/org/apache/lucene/index/TestFieldNormModifier.java (working copy)
@@ -64,13 +64,13 @@
for (int i = 0; i < NUM_DOCS; i++) {
Document d = new Document();
- d.add(new Field("field", "word", Field.Store.YES, Field.Index.TOKENIZED));
- d.add(new Field("nonorm", "word", Field.Store.YES, Field.Index.NO_NORMS));
- d.add(new Field("untokfield", "20061212 20071212", Field.Store.YES, Field.Index.TOKENIZED));
+ d.add(new Field("field", "word", Field.Store.YES, Field.Index.ANALYZED));
+ d.add(new Field("nonorm", "word", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+ d.add(new Field("untokfield", "20061212 20071212", Field.Store.YES, Field.Index.ANALYZED));
for (int j = 1; j <= i; j++) {
- d.add(new Field("field", "crap", Field.Store.YES, Field.Index.TOKENIZED));
- d.add(new Field("nonorm", "more words", Field.Store.YES, Field.Index.NO_NORMS));
+ d.add(new Field("field", "crap", Field.Store.YES, Field.Index.ANALYZED));
+ d.add(new Field("nonorm", "more words", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
}
writer.addDocument(d);
}
Index: contrib/miscellaneous/src/test/org/apache/lucene/misc/ChainedFilterTest.java
===================================================================
--- contrib/miscellaneous/src/test/org/apache/lucene/misc/ChainedFilterTest.java (revision 692811)
+++ contrib/miscellaneous/src/test/org/apache/lucene/misc/ChainedFilterTest.java (working copy)
@@ -58,9 +58,9 @@
for (int i = 0; i < MAX; i++) {
Document doc = new Document();
- doc.add(new Field("key", "" + (i + 1), Field.Store.YES, Field.Index.UN_TOKENIZED));
- doc.add(new Field("owner", (i < MAX / 2) ? "bob" : "sue", Field.Store.YES, Field.Index.UN_TOKENIZED));
- doc.add(new Field("date", cal.getTime().toString(), Field.Store.YES, Field.Index.UN_TOKENIZED));
+ doc.add(new Field("key", "" + (i + 1), Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(new Field("owner", (i < MAX / 2) ? "bob" : "sue", Field.Store.YES, Field.Index.NOT_ANALYZED));
+ doc.add(new Field("date", cal.getTime().toString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
writer.addDocument(doc);
cal.add(Calendar.DATE, 1);
Index: contrib/miscellaneous/src/test/org/apache/lucene/misc/TestLengthNormModifier.java
===================================================================
--- contrib/miscellaneous/src/test/org/apache/lucene/misc/TestLengthNormModifier.java (revision 692811)
+++ contrib/miscellaneous/src/test/org/apache/lucene/misc/TestLengthNormModifier.java (working copy)
@@ -65,15 +65,15 @@
for (int i = 0; i < NUM_DOCS; i++) {
Document d = new Document();
d.add(new Field("field", "word",
- Field.Store.YES, Field.Index.TOKENIZED));
+ Field.Store.YES, Field.Index.ANALYZED));
d.add(new Field("nonorm", "word",
- Field.Store.YES, Field.Index.NO_NORMS));
+ Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
for (int j = 1; j <= i; j++) {
d.add(new Field("field", "crap",
- Field.Store.YES, Field.Index.TOKENIZED));
+ Field.Store.YES, Field.Index.ANALYZED));
d.add(new Field("nonorm", "more words",
- Field.Store.YES, Field.Index.NO_NORMS));
+ Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
}
writer.addDocument(d);
}
Index: contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java
===================================================================
--- contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java (revision 692811)
+++ contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java (working copy)
@@ -53,23 +53,23 @@
Document doc;
doc = new Document();
- doc.add(new Field("aaa", "foo", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
- doc.add(new Field("aaa", "foo", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("aaa", "foo", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
- doc.add(new Field("contents", "Tom", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("contents", "Tom", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
- doc.add(new Field("contents", "Jerry", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("contents", "Jerry", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
- doc.add(new Field("zzz", "bar", Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("zzz", "bar", Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();
Index: contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java
===================================================================
--- contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java (revision 692811)
+++ contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java (working copy)
@@ -50,8 +50,8 @@
for (int i = 0; i < 1000; i++) {
Document doc = new Document();
- doc.add(new Field("field1", English.intToEnglish(i), Field.Store.YES, Field.Index.TOKENIZED));
- doc.add(new Field("field2", English.intToEnglish(i + 1), Field.Store.YES, Field.Index.TOKENIZED)); // + word thousand
+ doc.add(new Field("field1", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("field2", English.intToEnglish(i + 1), Field.Store.YES, Field.Index.ANALYZED)); // + word thousand
writer.addDocument(doc);
}
writer.close();
Index: contrib/spellchecker/src/java/org/apache/lucene/search/spell/SpellChecker.java
===================================================================
--- contrib/spellchecker/src/java/org/apache/lucene/search/spell/SpellChecker.java (revision 692811)
+++ contrib/spellchecker/src/java/org/apache/lucene/search/spell/SpellChecker.java (working copy)
@@ -384,7 +384,7 @@
private static Document createDocument(String text, int ng1, int ng2) {
Document doc = new Document();
- doc.add(new Field(F_WORD, text, Field.Store.YES, Field.Index.UN_TOKENIZED)); // orig term
+ doc.add(new Field(F_WORD, text, Field.Store.YES, Field.Index.NOT_ANALYZED)); // orig term
addGram(text, doc, ng1, ng2);
return doc;
}
@@ -396,14 +396,14 @@
String end = null;
for (int i = 0; i < len - ng + 1; i++) {
String gram = text.substring(i, i + ng);
- doc.add(new Field(key, gram, Field.Store.NO, Field.Index.UN_TOKENIZED));
+ doc.add(new Field(key, gram, Field.Store.NO, Field.Index.NOT_ANALYZED));
if (i == 0) {
- doc.add(new Field("start" + ng, gram, Field.Store.NO, Field.Index.UN_TOKENIZED));
+ doc.add(new Field("start" + ng, gram, Field.Store.NO, Field.Index.NOT_ANALYZED));
}
end = gram;
}
if (end != null) { // may not be present if len==ng1
- doc.add(new Field("end" + ng, end, Field.Store.NO, Field.Index.UN_TOKENIZED));
+ doc.add(new Field("end" + ng, end, Field.Store.NO, Field.Index.NOT_ANALYZED));
}
}
}
Index: contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
===================================================================
--- contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (revision 692811)
+++ contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (working copy)
@@ -367,7 +367,7 @@
private Document createDocument(String content) {
Document doc = new Document();
- doc.add(new Field(FIELD_NAME, content, Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS));
+ doc.add(new Field(FIELD_NAME, content, Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS));
return doc;
}
Index: contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
===================================================================
--- contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (revision 692811)
+++ contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (working copy)
@@ -226,7 +226,7 @@
* Convenience method; Tokenizes the given field text and adds the resulting
* terms to the index; Equivalent to adding an indexed non-keyword Lucene
* {@link org.apache.lucene.document.Field} that is
- * {@link org.apache.lucene.document.Field.Index#TOKENIZED tokenized},
+ * {@link org.apache.lucene.document.Field.Index#ANALYZED tokenized},
* {@link org.apache.lucene.document.Field.Store#NO not stored},
* {@link org.apache.lucene.document.Field.TermVector#WITH_POSITIONS termVectorStored with positions} (or
* {@link org.apache.lucene.document.Field.TermVector#WITH_POSITIONS termVectorStored with positions and offsets}),
Index: contrib/analyzers/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java
===================================================================
--- contrib/analyzers/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java (revision 692811)
+++ contrib/analyzers/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java (working copy)
@@ -51,8 +51,8 @@
Document doc = new Document();
String variedFieldValue = variedFieldValues[i % variedFieldValues.length];
String repetitiveFieldValue = repetitiveFieldValues[i % repetitiveFieldValues.length];
- doc.add(new Field("variedField", variedFieldValue, Field.Store.YES, Field.Index.TOKENIZED));
- doc.add(new Field("repetitiveField", repetitiveFieldValue, Field.Store.YES, Field.Index.TOKENIZED));
+ doc.add(new Field("variedField", variedFieldValue, Field.Store.YES, Field.Index.ANALYZED));
+ doc.add(new Field("repetitiveField", repetitiveFieldValue, Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
}
writer.close();
Index: contrib/analyzers/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java
===================================================================
--- contrib/analyzers/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (revision 692811)
+++ contrib/analyzers/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (working copy)
@@ -65,17 +65,17 @@
Document doc;
doc = new Document();
doc.add(new Field("content", "please divide this sentence into shingles",
- Field.Store.YES,Field.Index.TOKENIZED));
+ Field.Store.YES,Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("content", "just another test sentence",
- Field.Store.YES,Field.Index.TOKENIZED));
+ Field.Store.YES,Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("content", "a sentence which contains no test",
- Field.Store.YES,Field.Index.TOKENIZED));
+ Field.Store.YES,Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
Index: contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java
===================================================================
--- contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java (revision 692811)
+++ contrib/regex/src/test/org/apache/lucene/search/regex/TestRegexQuery.java (working copy)
@@ -39,7 +39,7 @@
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true,
IndexWriter.MaxFieldLength.LIMITED);
Document doc = new Document();
- doc.add(new Field(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.TOKENIZED));
+ doc.add(new Field(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();
writer.close();
Index: contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java
===================================================================
--- contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java (revision 692811)
+++ contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java (working copy)
@@ -36,13 +36,13 @@
RAMDirectory directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true);
Document doc = new Document();
-// doc.add(new Field("field", "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.TOKENIZED));
+// doc.add(new Field("field", "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.ANALYZED));
// writer.addDocument(doc);
// doc = new Document();
- doc.add(new Field("field", "auto update", Field.Store.NO, Field.Index.TOKENIZED));
+ doc.add(new Field("field", "auto update", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
- doc.add(new Field("field", "first auto update", Field.Store.NO, Field.Index.TOKENIZED));
+ doc.add(new Field("field", "first auto update", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.optimize();
writer.close();
Index: contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
===================================================================
--- contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (revision 692811)
+++ contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (working copy)
@@ -70,9 +70,9 @@
private void addDoc(IndexWriter writer, String url, String text, String date) throws IOException
{
Document doc=new Document();
- doc.add(new Field(KEY_FIELD,url,Field.Store.YES,Field.Index.UN_TOKENIZED));
- doc.add(new Field("text",text,Field.Store.YES,Field.Index.TOKENIZED));
- doc.add(new Field("date",date,Field.Store.YES,Field.Index.TOKENIZED));
+ doc.add(new Field(KEY_FIELD,url,Field.Store.YES,Field.Index.NOT_ANALYZED));
+ doc.add(new Field("text",text,Field.Store.YES,Field.Index.ANALYZED));
+ doc.add(new Field("date",date,Field.Store.YES,Field.Index.ANALYZED));
writer.addDocument(doc);
}
Index: contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java
===================================================================
--- contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java (revision 692811)
+++ contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java (working copy)
@@ -43,7 +43,7 @@
{
Document doc=new Document();
int term=i*10; //terms are units of 10;
- doc.add(new Field(fieldName,""+term,Field.Store.YES,Field.Index.UN_TOKENIZED));
+ doc.add(new Field(fieldName,""+term,Field.Store.YES,Field.Index.NOT_ANALYZED));
w.addDocument(doc);
}
w.close();
Index: contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java
===================================================================
--- contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java (revision 692811)
+++ contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java (working copy)
@@ -60,10 +60,10 @@
private void addDoc(IndexWriter writer, String accessRights, String price, String date, String inStock) throws IOException
{
Document doc=new Document();
- doc.add(new Field("accessRights",accessRights,Field.Store.YES,Field.Index.TOKENIZED));
- doc.add(new Field("price",price,Field.Store.YES,Field.Index.TOKENIZED));
- doc.add(new Field("date",date,Field.Store.YES,Field.Index.TOKENIZED));
- doc.add(new Field("inStock",inStock,Field.Store.YES,Field.Index.TOKENIZED));
+ doc.add(new Field("accessRights",accessRights,Field.Store.YES,Field.Index.ANALYZED));
+ doc.add(new Field("price",price,Field.Store.YES,Field.Index.ANALYZED));
+ doc.add(new Field("date",date,Field.Store.YES,Field.Index.ANALYZED));
+ doc.add(new Field("inStock",inStock,Field.Store.YES,Field.Index.ANALYZED));
writer.addDocument(doc);
}