Index: lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java =================================================================== --- lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java (revision 1148022) +++ lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java (working copy) @@ -81,6 +81,23 @@ return ts; } + public static TokenStream getAnyTokenStream(IndexReader reader, int docId, + String field, org.apache.lucene.document2.Document doc, Analyzer analyzer) throws IOException { + TokenStream ts = null; + + TermFreqVector tfv = reader.getTermFreqVector(docId, field); + if (tfv != null) { + if (tfv instanceof TermPositionVector) { + ts = getTokenStream((TermPositionVector) tfv); + } + } + // No token info stored so fall back to analyzing raw content + if (ts == null) { + ts = getTokenStream(doc, field, analyzer); + } + return ts; + } + /** * A convenience method that tries a number of approaches to getting a token * stream. The cost of finding there are no termVectors in the index is @@ -283,6 +300,16 @@ return getTokenStream(field, contents, analyzer); } + public static TokenStream getTokenStream(org.apache.lucene.document2.Document doc, String field, + Analyzer analyzer) { + String contents = doc.get(field); + if (contents == null) { + throw new IllegalArgumentException("Field " + field + + " in document is not stored and cannot be analyzed"); + } + return getTokenStream(field, contents, analyzer); + } + // convenience method public static TokenStream getTokenStream(String field, String contents, Analyzer analyzer) { Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (revision 1148022) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (working copy) @@ -112,7 +112,7 @@ for (int i = 0; i < hits.scoreDocs.length; i++) { - org.apache.lucene.document.Document doc = searcher.doc(hits.scoreDocs[i].doc); + Document doc = searcher.doc2(hits.scoreDocs[i].doc); String storedField = doc.get(FIELD_NAME); TokenStream stream = TokenSources.getAnyTokenStream(searcher @@ -1568,7 +1568,7 @@ TopDocs hits = searcher.search(query, null, 10); for( int i = 0; i < hits.totalHits; i++ ){ - org.apache.lucene.document.Document doc = searcher.doc( hits.scoreDocs[i].doc ); + Document doc = searcher.doc2( hits.scoreDocs[i].doc ); String result = h.getBestFragment( a, "t_text1", doc.get( "t_text1" )); if (VERBOSE) System.out.println("result:" + result); assertEquals("more random words for second field", result); Index: lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java =================================================================== --- lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (revision 1148022) +++ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (working copy) @@ -275,11 +275,21 @@ public Document document(int n) throws IOException { return getIndex().getDocumentsByNumber()[n].getDocument(); } + + @Override + public org.apache.lucene.document2.Document document2(int n) throws IOException { + return getIndex().getDocumentsByNumber()[n].getDocument2(); + } @Override public void document(int docID, StoredFieldVisitor visitor) throws IOException { throw new UnsupportedOperationException(); } + + @Override + public void document2(int docID, StoredFieldVisitor visitor) throws IOException { + throw new UnsupportedOperationException(); + } /** * never ever touch these values. it is the true values, unless norms have Index: lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java =================================================================== --- lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (revision 1148022) +++ lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (working copy) @@ -1229,6 +1229,12 @@ if (DEBUG) System.err.println("MemoryIndexReader.document"); // no-op: there are no stored fields } + + @Override + public void document2(int docID, StoredFieldVisitor visitor) { + if (DEBUG) System.err.println("MemoryIndexReader.document"); + // no-op: there are no stored fields + } @Override public boolean hasDeletions() { Index: lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java (revision 1148022) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java (working copy) @@ -152,7 +152,7 @@ writer.close(); IndexReader reader = IndexReader.open(dir, null, true, 1, new AppendingCodecProvider()); assertEquals(2, reader.numDocs()); - org.apache.lucene.document.Document doc2 = reader.document(0); + Document doc2 = reader.document2(0); assertEquals(text, doc2.get("f")); Fields fields = MultiFields.getFields(reader); Terms terms = fields.terms("f"); Index: lucene/contrib/misc/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java (revision 1148022) +++ lucene/contrib/misc/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; Index: lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (revision 1148022) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (working copy) @@ -93,7 +93,7 @@ ScoreDoc[] hits = searcher.search(tq,df, 1000).scoreDocs; for(int i=0;i0); for(int i=0;i0); for(int i=0;i0); for(int i=0;i0)); - org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); + Document doc=searcher.doc2(sd[0].doc); assertEquals("Should match most similar not most rare variant", "2",doc.get("id")); } //Test multiple input words are having variants produced @@ -104,7 +104,7 @@ TopDocs topDocs = searcher.search(flt, 1); ScoreDoc[] sd = topDocs.scoreDocs; assertTrue("score docs must match 1 doc", (sd!=null)&&(sd.length>0)); - org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); + Document doc=searcher.doc2(sd[0].doc); assertEquals("Should match most similar when using 2 words", "2",doc.get("id")); } //Test bug found when first query word does not match anything @@ -119,7 +119,7 @@ TopDocs topDocs = searcher.search(flt, 1); ScoreDoc[] sd = topDocs.scoreDocs; assertTrue("score docs must match 1 doc", (sd!=null)&&(sd.length>0)); - org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); + Document doc=searcher.doc2(sd[0].doc); assertEquals("Should match most similar when using 2 words", "2",doc.get("id")); } Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java =================================================================== --- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (revision 1148022) +++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (working copy) @@ -98,7 +98,7 @@ TopDocs td = searcher.search(q, 10); ScoreDoc[] sd = td.scoreDocs; for (int i = 0; i < sd.length; i++) { - org.apache.lucene.document.Document doc = searcher.doc(sd[i].doc); + Document doc = searcher.doc2(sd[i].doc); String id = doc.get("id"); assertTrue(qString + "matched doc#" + id + " not expected", expecteds .contains(id)); Index: lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java =================================================================== --- lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (revision 1148022) +++ lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (working copy) @@ -284,7 +284,7 @@ assertEquals(2, results); double lastDistance = 0; for(int i =0 ; i < results; i++){ - org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc); + Document d = searcher.doc2(scoreDocs[i].doc); String name = d.get("name"); double rsLat = Double.parseDouble(d.get(latField)); @@ -380,7 +380,7 @@ assertEquals(18, results); double lastDistance = 0; for(int i =0 ; i < results; i++){ - org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc); + Document d = searcher.doc2(scoreDocs[i].doc); String name = d.get("name"); double rsLat = Double.parseDouble(d.get(latField)); double rsLng = Double.parseDouble(d.get(lngField)); @@ -475,7 +475,7 @@ assertEquals(expected[x], results); double lastDistance = 0; for(int i =0 ; i < results; i++){ - org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc); + Document d = searcher.doc2(scoreDocs[i].doc); String name = d.get("name"); double rsLat = Double.parseDouble(d.get(latField)); @@ -570,7 +570,7 @@ assertEquals(expected[x], results); for(int i =0 ; i < results; i++){ - org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc); + Document d = searcher.doc2(scoreDocs[i].doc); String name = d.get("name"); double rsLat = Double.parseDouble(d.get(latField)); Index: lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java =================================================================== --- lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (revision 1148022) +++ lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (working copy) @@ -233,7 +233,7 @@ ScoreDoc[] scoreDocs = hits.scoreDocs; for(int i=0;i fieldsToAdd; + + /** Load only fields named in the provided Set<String>. */ + public Document2StoredFieldVisitor(Set fieldsToAdd) { + this.fieldsToAdd = fieldsToAdd; + } + + /** Load only fields named in the provided Set<String>. */ + public Document2StoredFieldVisitor(String... fields) { + fieldsToAdd = new HashSet(fields.length); + for(String field : fields) { + fieldsToAdd.add(field); + } + } + + /** Load all stored fields. */ + public Document2StoredFieldVisitor() { + this.fieldsToAdd = null; + } + + @Override + public boolean binaryField(FieldInfo fieldInfo, IndexInput in, int numBytes) throws IOException { + if (accept(fieldInfo)) { + final byte[] b = new byte[numBytes]; + in.readBytes(b, 0, b.length); + doc.add(new BinaryField(fieldInfo.name, b)); + } else { + in.seek(in.getFilePointer() + numBytes); + } + return false; + } + + @Override + public boolean stringField(FieldInfo fieldInfo, IndexInput in, int numUTF8Bytes) throws IOException { + if (accept(fieldInfo)) { + final byte[] b = new byte[numUTF8Bytes]; + in.readBytes(b, 0, b.length); + FieldType ft = new FieldType(TextField.TYPE_STORED); + ft.setStoreTermVectors(fieldInfo.storeTermVector); + ft.setStoreTermVectorPositions(fieldInfo.storePositionWithTermVector); + ft.setStoreTermVectorOffsets(fieldInfo.storeOffsetWithTermVector); + ft.setStoreTermVectors(fieldInfo.storeTermVector); + doc.add(new Field(fieldInfo.name, + false, + ft, + new String(b, "UTF-8"))); + } else { + in.seek(in.getFilePointer() + numUTF8Bytes); + } + return false; + } + + @Override + public boolean intField(FieldInfo fieldInfo, int value) { + if (accept(fieldInfo)) { + FieldType ft = new FieldType(NumericField.TYPE_STORED); + ft.setIndexed(fieldInfo.isIndexed); + doc.add(new NumericField(fieldInfo.name, ft).setIntValue(value)); + } + return false; + } + + @Override + public boolean longField(FieldInfo fieldInfo, long value) { + if (accept(fieldInfo)) { + FieldType ft = new FieldType(NumericField.TYPE_STORED); + ft.setIndexed(fieldInfo.isIndexed); + doc.add(new NumericField(fieldInfo.name, ft).setLongValue(value)); + } + return false; + } + + @Override + public boolean floatField(FieldInfo fieldInfo, float value) { + if (accept(fieldInfo)) { + FieldType ft = new FieldType(NumericField.TYPE_STORED); + ft.setIndexed(fieldInfo.isIndexed); + doc.add(new NumericField(fieldInfo.name, ft).setFloatValue(value)); + } + return false; + } + + @Override + public boolean doubleField(FieldInfo fieldInfo, double value) { + if (accept(fieldInfo)) { + FieldType ft = new FieldType(NumericField.TYPE_STORED); + ft.setIndexed(fieldInfo.isIndexed); + doc.add(new NumericField(fieldInfo.name, ft).setDoubleValue(value)); + } + return false; + } + + private boolean accept(FieldInfo fieldInfo) { + return fieldsToAdd == null || fieldsToAdd.contains(fieldInfo.name); + } + + public Document getDocument() { + return doc; + } +} \ No newline at end of file Index: lucene/src/java/org/apache/lucene/index/FilterIndexReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/FilterIndexReader.java (revision 1148022) +++ lucene/src/java/org/apache/lucene/index/FilterIndexReader.java (working copy) @@ -345,6 +345,12 @@ } @Override + public void document2(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException { + ensureOpen(); + in.document2(docID, visitor); + } + + @Override public boolean hasDeletions() { // Don't call ensureOpen() here (it could affect performance) return in.hasDeletions(); Index: lucene/src/java/org/apache/lucene/index/IndexReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/IndexReader.java (revision 1148022) +++ lucene/src/java/org/apache/lucene/index/IndexReader.java (working copy) @@ -954,6 +954,8 @@ * #document(int)}. If you want to load a subset, use * {@link DocumentStoredFieldVisitor}. */ public abstract void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException; + + public abstract void document2(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException; // nocommit -- the new document(int docID) API should // clearly advertise that only field types/values are @@ -981,6 +983,13 @@ return visitor.getDocument(); } + public org.apache.lucene.document2.Document document2(int docID) throws CorruptIndexException, IOException { + ensureOpen(); + final Document2StoredFieldVisitor visitor = new Document2StoredFieldVisitor(); + document(docID, visitor); + return visitor.getDocument(); + } + /** Returns true if any documents have been deleted */ public abstract boolean hasDeletions(); Index: lucene/src/java/org/apache/lucene/index/MultiReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/MultiReader.java (revision 1148022) +++ lucene/src/java/org/apache/lucene/index/MultiReader.java (working copy) @@ -263,6 +263,13 @@ } @Override + public void document2(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException { + ensureOpen(); + int i = readerIndex(docID); // find segment num + subReaders[i].document2(docID - starts[i], visitor); // dispatch to segment reader + } + + @Override public boolean hasDeletions() { // Don't call ensureOpen() here (it could affect performance) return hasDeletions; Index: lucene/src/java/org/apache/lucene/index/ParallelReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/ParallelReader.java (revision 1148022) +++ lucene/src/java/org/apache/lucene/index/ParallelReader.java (working copy) @@ -348,6 +348,14 @@ reader.document(docID, visitor); } } + + @Override + public void document2(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException { + ensureOpen(); + for (final IndexReader reader: storedFieldReaders) { + reader.document2(docID, visitor); + } + } // get all vectors @Override Index: lucene/src/java/org/apache/lucene/index/SegmentReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/SegmentReader.java (revision 1148022) +++ lucene/src/java/org/apache/lucene/index/SegmentReader.java (working copy) @@ -460,6 +460,11 @@ getFieldsReader().visitDocument(docID, visitor); } + public void document2(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException { + ensureOpen(); + getFieldsReader().visitDocument(docID, visitor); + } + @Override public Fields fields() throws IOException { return core.fields; Index: lucene/src/java/org/apache/lucene/search/IndexSearcher.java =================================================================== --- lucene/src/java/org/apache/lucene/search/IndexSearcher.java (revision 1148022) +++ lucene/src/java/org/apache/lucene/search/IndexSearcher.java (working copy) @@ -243,10 +243,18 @@ return reader.document(docID); } + public org.apache.lucene.document2.Document doc2(int docID) throws CorruptIndexException, IOException { + return reader.document2(docID); + } + /* Sugar for .getIndexReader().document(docID, fieldVisitor) */ public void doc(int docID, StoredFieldVisitor fieldVisitor) throws CorruptIndexException, IOException { reader.document(docID, fieldVisitor); } + + public void doc2(int docID, StoredFieldVisitor fieldVisitor) throws CorruptIndexException, IOException { + reader.document2(docID, fieldVisitor); + } /** Expert: Set the SimilarityProvider implementation used by this Searcher. * Index: lucene/src/test-framework/org/apache/lucene/index/DocHelper.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/index/DocHelper.java (revision 1148022) +++ lucene/src/test-framework/org/apache/lucene/index/DocHelper.java (working copy) @@ -297,8 +297,4 @@ public static int numFields(Document doc) { return doc.size(); } - - public static int numFields2(org.apache.lucene.document.Document doc) { - return doc.getFields().size(); - } } Index: lucene/src/test-framework/org/apache/lucene/util/LineFileDocs.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/util/LineFileDocs.java (revision 1148022) +++ lucene/src/test-framework/org/apache/lucene/util/LineFileDocs.java (working copy) @@ -28,8 +28,11 @@ import java.util.zip.GZIPInputStream; import java.util.Random; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.StringField; +import org.apache.lucene.document2.TextField; /** Minimal port of contrib/benchmark's LneDocSource + * DocMaker, so tests can enum docs from a line file created @@ -117,19 +120,24 @@ public DocState() { doc = new Document(); - title = new Field("title", "", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS); + title = new StringField("title", ""); doc.add(title); - titleTokenized = new Field("titleTokenized", "", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); + FieldType ft = new FieldType(TextField.TYPE_STORED); + ft.setStoreTermVectors(true); + ft.setStoreTermVectorOffsets(true); + ft.setStoreTermVectorPositions(true); + + titleTokenized = new Field("titleTokenized", ft, ""); doc.add(titleTokenized); - body = new Field("body", "", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); + body = new Field("body", ft, ""); doc.add(body); - id = new Field("docid", "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); + id = new Field("docid", StringField.TYPE_STORED, ""); doc.add(id); - date = new Field("date", "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); + date = new Field("date", StringField.TYPE_STORED, ""); doc.add(date); } } Index: lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (revision 1148022) +++ lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (working copy) @@ -36,10 +36,6 @@ import java.util.regex.Pattern; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; -import org.apache.lucene.document.Field.TermVector; import org.apache.lucene.document2.FieldType; import org.apache.lucene.index.*; import org.apache.lucene.index.codecs.Codec; @@ -1062,18 +1058,6 @@ return dir; } - /** Returns a new field instance. - * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */ - public static Field newField(String name, String value, Index index) { - return newField(random, name, value, index); - } - - /** Returns a new field instance. - * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */ - public static Field newField(String name, String value, Store store, Index index) { - return newField(random, name, value, store, index); - } - public static org.apache.lucene.document2.Field newField(String name, String value, FieldType type) { return newField(random, name, value, type); } @@ -1114,67 +1098,6 @@ return new org.apache.lucene.document2.Field(name, newType, value); } - /** - * Returns a new Field instance. Use this when the test does not - * care about some specific field settings (most tests) - *
    - *
  • If the store value is set to Store.NO, sometimes the field will be randomly stored. - *
  • More term vector data than you ask for might be indexed, for example if you choose YES - * it might index term vectors with offsets too. - *
- */ - public static Field newField(String name, String value, Store store, Index index, TermVector tv) { - return newField(random, name, value, store, index, tv); - } - - /** Returns a new field instance, using the specified random. - * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */ - public static Field newField(Random random, String name, String value, Index index) { - return newField(random, name, value, Store.NO, index); - } - - /** Returns a new field instance, using the specified random. - * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */ - public static Field newField(Random random, String name, String value, Store store, Index index) { - return newField(random, name, value, store, index, TermVector.NO); - } - - /** Returns a new field instance, using the specified random. - * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */ - public static Field newField(Random random, String name, String value, Store store, Index index, TermVector tv) { - if (usually(random)) { - // most of the time, don't modify the params - return new Field(name, value, store, index, tv); - } - - if (!index.isIndexed()) - return new Field(name, value, store, index, tv); - - if (!store.isStored() && random.nextBoolean()) - store = Store.YES; // randomly store it - - tv = randomTVSetting(random, tv); - - return new Field(name, value, store, index, tv); - } - - static final TermVector tvSettings[] = { - TermVector.NO, TermVector.YES, TermVector.WITH_OFFSETS, - TermVector.WITH_POSITIONS, TermVector.WITH_POSITIONS_OFFSETS - }; - - private static TermVector randomTVSetting(Random random, TermVector minimum) { - switch(minimum) { - case NO: return tvSettings[_TestUtil.nextInt(random, 0, tvSettings.length-1)]; - case YES: return tvSettings[_TestUtil.nextInt(random, 1, tvSettings.length-1)]; - case WITH_OFFSETS: return random.nextBoolean() ? TermVector.WITH_OFFSETS - : TermVector.WITH_POSITIONS_OFFSETS; - case WITH_POSITIONS: return random.nextBoolean() ? TermVector.WITH_POSITIONS - : TermVector.WITH_POSITIONS_OFFSETS; - default: return TermVector.WITH_POSITIONS_OFFSETS; - } - } - /** return a random Locale from the available locales on the system */ public static Locale randomLocale(Random random) { Locale locales[] = Locale.getAvailableLocales(); Index: lucene/src/test/org/apache/lucene/TestDemo.java =================================================================== --- lucene/src/test/org/apache/lucene/TestDemo.java (revision 1148022) +++ lucene/src/test/org/apache/lucene/TestDemo.java (working copy) @@ -72,7 +72,7 @@ assertEquals(1, hits.totalHits); // Iterate through the results: for (int i = 0; i < hits.scoreDocs.length; i++) { - org.apache.lucene.document.Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc); + Document hitDoc = isearcher.doc2(hits.scoreDocs[i].doc); assertEquals(text, hitDoc.get("fieldname")); } Index: lucene/src/test/org/apache/lucene/TestSearch.java =================================================================== --- lucene/src/test/org/apache/lucene/TestSearch.java (revision 1148022) +++ lucene/src/test/org/apache/lucene/TestSearch.java (working copy) @@ -127,7 +127,7 @@ out.println(hits.length + " total results"); for (int i = 0 ; i < hits.length && i < 10; i++) { - org.apache.lucene.document.Document d = searcher.doc(hits[i].doc); + Document d = searcher.doc2(hits[i].doc); out.println(i + " " + hits[i].score + " " + d.get("contents")); } } Index: lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java =================================================================== --- lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java (revision 1148022) +++ lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java (working copy) @@ -142,7 +142,7 @@ out.println(hits.length + " total results\n"); for (int i = 0 ; i < hits.length; i++) { if ( i < 10 || (i > 94 && i < 105) ) { - org.apache.lucene.document.Document d = searcher.doc(hits[i].doc); + Document d = searcher.doc2(hits[i].doc); out.println(i + " " + d.get(ID_FIELD)); } } @@ -152,7 +152,7 @@ assertEquals("total results", expectedCount, hits.length); for (int i = 0 ; i < hits.length; i++) { if (i < 10 || (i > 94 && i < 105) ) { - org.apache.lucene.document.Document d = searcher.doc(hits[i].doc); + Document d = searcher.doc2(hits[i].doc); assertEquals("check " + i, String.valueOf(i), d.get(ID_FIELD)); } } Index: lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (revision 1148022) +++ lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (working copy) @@ -31,7 +31,6 @@ import org.apache.lucene.document2.Field; import org.apache.lucene.document2.FieldType; import org.apache.lucene.document2.TextField; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.document2.NumericField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.DefaultSimilarity; @@ -287,12 +286,12 @@ for(int i=0;i<35;i++) { if (!delDocs.get(i)) { - org.apache.lucene.document.Document d = reader.document(i); - List fields = d.getFields(); + Document d = reader.document2(i); + List fields = d.getFields(); if (d.getField("content3") == null) { final int numFields = 5; assertEquals(numFields, fields.size()); - org.apache.lucene.document.Field f = d.getField("id"); + IndexableField f = d.getField("id"); assertEquals(""+i, f.stringValue()); f = d.getField("utf8"); @@ -320,7 +319,7 @@ // First document should be #21 since it's norm was // increased: - org.apache.lucene.document.Document d = searcher.getIndexReader().document(hits[0].doc); + Document d = searcher.getIndexReader().document2(hits[0].doc); assertEquals("didn't get the right document first", "21", d.get("id")); doTestHits(hits, 34, searcher.getIndexReader()); @@ -366,7 +365,7 @@ // make sure searching sees right # hits IndexSearcher searcher = new IndexSearcher(dir, true); ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; - org.apache.lucene.document.Document d = searcher.getIndexReader().document(hits[0].doc); + Document d = searcher.getIndexReader().document2(hits[0].doc); assertEquals("wrong first document", "21", d.get("id")); doTestHits(hits, 44, searcher.getIndexReader()); searcher.close(); @@ -385,7 +384,7 @@ searcher = new IndexSearcher(dir, true); hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; assertEquals("wrong number of hits", 43, hits.length); - d = searcher.doc(hits[0].doc); + d = searcher.doc2(hits[0].doc); assertEquals("wrong first document", "22", d.get("id")); doTestHits(hits, 43, searcher.getIndexReader()); searcher.close(); @@ -398,7 +397,7 @@ searcher = new IndexSearcher(dir, true); hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; assertEquals("wrong number of hits", 43, hits.length); - d = searcher.doc(hits[0].doc); + d = searcher.doc2(hits[0].doc); doTestHits(hits, 43, searcher.getIndexReader()); assertEquals("wrong first document", "22", d.get("id")); searcher.close(); @@ -414,7 +413,7 @@ IndexSearcher searcher = new IndexSearcher(dir, true); ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; assertEquals("wrong number of hits", 34, hits.length); - org.apache.lucene.document.Document d = searcher.doc(hits[0].doc); + Document d = searcher.doc2(hits[0].doc); assertEquals("wrong first document", "21", d.get("id")); searcher.close(); @@ -430,7 +429,7 @@ searcher = new IndexSearcher(dir, true); hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; assertEquals("wrong number of hits", 33, hits.length); - d = searcher.doc(hits[0].doc); + d = searcher.doc2(hits[0].doc); assertEquals("wrong first document", "22", d.get("id")); doTestHits(hits, 33, searcher.getIndexReader()); searcher.close(); @@ -443,7 +442,7 @@ searcher = new IndexSearcher(dir, true); hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; assertEquals("wrong number of hits", 33, hits.length); - d = searcher.doc(hits[0].doc); + d = searcher.doc2(hits[0].doc); assertEquals("wrong first document", "22", d.get("id")); doTestHits(hits, 33, searcher.getIndexReader()); searcher.close(); @@ -684,12 +683,12 @@ for (int id=10; id<15; id++) { ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", 4, Integer.valueOf(id), Integer.valueOf(id), true, true), 100).scoreDocs; assertEquals("wrong number of hits", 1, hits.length); - org.apache.lucene.document.Document d = searcher.doc(hits[0].doc); + Document d = searcher.doc2(hits[0].doc); assertEquals(String.valueOf(id), d.get("id")); hits = searcher.search(NumericRangeQuery.newLongRange("trieLong", 4, Long.valueOf(id), Long.valueOf(id), true, true), 100).scoreDocs; assertEquals("wrong number of hits", 1, hits.length); - d = searcher.doc(hits[0].doc); + d = searcher.doc2(hits[0].doc); assertEquals(String.valueOf(id), d.get("id")); } Index: lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java (revision 1148022) +++ lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java (working copy) @@ -81,12 +81,12 @@ sis.read(dir); IndexReader reader = openReader(); assertTrue(reader != null); - org.apache.lucene.document.Document newDoc1 = reader.document(0); + Document newDoc1 = reader.document2(0); assertTrue(newDoc1 != null); - assertTrue(DocHelper.numFields2(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size()); - org.apache.lucene.document.Document newDoc2 = reader.document(1); + assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size()); + Document newDoc2 = reader.document2(1); assertTrue(newDoc2 != null); - assertTrue(DocHelper.numFields2(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); + assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); TermFreqVector vector = reader.getTermFreqVector(0, DocHelper.TEXT_FIELD_2_KEY); assertTrue(vector != null); TestSegmentReader.checkNorms(reader); Index: lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (revision 1148022) +++ lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (working copy) @@ -28,7 +28,6 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.document2.Document; import org.apache.lucene.document2.Field; import org.apache.lucene.document2.FieldType; @@ -69,19 +68,19 @@ //After adding the document, we should be able to read it back in SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR); assertTrue(reader != null); - org.apache.lucene.document.Document doc = reader.document(0); + Document doc = reader.document2(0); assertTrue(doc != null); //System.out.println("Document: " + doc); - Fieldable [] fields = doc.getFields("textField2"); + IndexableField [] fields = doc.getFields("textField2"); assertTrue(fields != null && fields.length == 1); assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_2_TEXT)); - assertTrue(fields[0].isTermVectorStored()); + assertTrue(fields[0].storeTermVectors()); fields = doc.getFields("textField1"); assertTrue(fields != null && fields.length == 1); assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_1_TEXT)); - assertFalse(fields[0].isTermVectorStored()); + assertFalse(fields[0].storeTermVectors()); fields = doc.getFields("keyField"); assertTrue(fields != null && fields.length == 1); Index: lucene/src/test/org/apache/lucene/index/TestFieldsReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (revision 1148022) +++ lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (working copy) @@ -22,9 +22,9 @@ import java.util.*; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.NumericField; -import org.apache.lucene.document.Fieldable; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.NumericField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.FieldCache; import org.apache.lucene.store.BufferedIndexInput; @@ -67,32 +67,32 @@ assertTrue(dir != null); assertTrue(fieldInfos != null); IndexReader reader = IndexReader.open(dir); - Document doc = reader.document(0); + Document doc = reader.document2(0); assertTrue(doc != null); assertTrue(doc.getField(DocHelper.TEXT_FIELD_1_KEY) != null); - Fieldable field = doc.getField(DocHelper.TEXT_FIELD_2_KEY); + Field field = (Field) doc.getField(DocHelper.TEXT_FIELD_2_KEY); assertTrue(field != null); - assertTrue(field.isTermVectorStored() == true); + assertTrue(field.storeTermVectors() == true); - assertTrue(field.isStoreOffsetWithTermVector() == true); - assertTrue(field.isStorePositionWithTermVector() == true); + assertTrue(field.storeTermVectorOffsets() == true); + assertTrue(field.storeTermVectorPositions() == true); - field = doc.getField(DocHelper.TEXT_FIELD_3_KEY); + field = (Field) doc.getField(DocHelper.TEXT_FIELD_3_KEY); assertTrue(field != null); - assertTrue(field.isTermVectorStored() == false); - assertTrue(field.isStoreOffsetWithTermVector() == false); - assertTrue(field.isStorePositionWithTermVector() == false); + assertTrue(field.storeTermVectors() == false); + assertTrue(field.storeTermVectorOffsets() == false); + assertTrue(field.storeTermVectorPositions() == false); - field = doc.getField(DocHelper.NO_TF_KEY); + field = (Field) doc.getField(DocHelper.NO_TF_KEY); assertTrue(field != null); - assertTrue(field.isTermVectorStored() == false); - assertTrue(field.isStoreOffsetWithTermVector() == false); - assertTrue(field.isStorePositionWithTermVector() == false); + assertTrue(field.storeTermVectors() == false); + assertTrue(field.storeTermVectorOffsets() == false); + assertTrue(field.storeTermVectorPositions() == false); - DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(DocHelper.TEXT_FIELD_3_KEY); - reader.document(0, visitor); - final List fields = visitor.getDocument().getFields(); + Document2StoredFieldVisitor visitor = new Document2StoredFieldVisitor(DocHelper.TEXT_FIELD_3_KEY); + reader.document2(0, visitor); + final List fields = visitor.getDocument().getFields(); assertEquals(1, fields.size()); assertEquals(DocHelper.TEXT_FIELD_3_KEY, fields.get(0).name()); @@ -229,25 +229,25 @@ RandomIndexWriter w = new RandomIndexWriter(random, dir); final int numDocs = atLeast(500); final Number[] answers = new Number[numDocs]; - final NumericField.DataType[] typeAnswers = new NumericField.DataType[numDocs]; + final org.apache.lucene.document.NumericField.DataType[] typeAnswers = new org.apache.lucene.document.NumericField.DataType[numDocs]; for(int id=0;id fieldable1 = doc1.getFields(); - List fieldable2 = doc2.getFields(); + Document doc1 = index1.document2(i); + Document doc2 = index2.document2(i); + List fieldable1 = doc1.getFields(); + List fieldable2 = doc2.getFields(); assertEquals("Different numbers of fields for doc " + i + ".", fieldable1.size(), fieldable2.size()); - Iterator itField1 = fieldable1.iterator(); - Iterator itField2 = fieldable2.iterator(); + Iterator itField1 = fieldable1.iterator(); + Iterator itField2 = fieldable2.iterator(); while (itField1.hasNext()) { - org.apache.lucene.document.Field curField1 = (org.apache.lucene.document.Field) itField1.next(); - org.apache.lucene.document.Field curField2 = (org.apache.lucene.document.Field) itField2.next(); + Field curField1 = (Field) itField1.next(); + Field curField2 = (Field) itField2.next(); assertEquals("Different fields names for doc " + i + ".", curField1.name(), curField2.name()); assertEquals("Different field values for doc " + i + ".", curField1.stringValue(), curField2.stringValue()); } Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (revision 1148022) +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (working copy) @@ -188,7 +188,7 @@ if (i>0) { int k = i-1; int n = j + k*M; - org.apache.lucene.document.Document prevItereationDoc = reader.document(n); + Document prevItereationDoc = reader.document2(n); assertNotNull(prevItereationDoc); String id = prevItereationDoc.get("id"); assertEquals(k+"_"+j, id); Index: lucene/src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (revision 1148022) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -42,7 +42,6 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.document2.BinaryField; import org.apache.lucene.document2.Document; import org.apache.lucene.document2.Field; @@ -1012,8 +1011,8 @@ w.close(); IndexReader ir = IndexReader.open(dir, true); - org.apache.lucene.document.Document doc2 = ir.document(0); - org.apache.lucene.document.Field f2 = doc2.getField("binary"); + Document doc2 = ir.document2(0); + IndexableField f2 = doc2.getField("binary"); b = f2.binaryValue(null).bytes; assertTrue(b != null); assertEquals(17, b.length, 17); @@ -1283,8 +1282,8 @@ w.close(); IndexReader ir = IndexReader.open(dir, true); - org.apache.lucene.document.Document doc2 = ir.document(0); - org.apache.lucene.document.Field f3 = doc2.getField("binary"); + Document doc2 = ir.document2(0); + IndexableField f3 = doc2.getField("binary"); b = f3.binaryValue(null).bytes; assertTrue(b != null); assertEquals(17, b.length, 17); @@ -1325,20 +1324,20 @@ doc.add(newField("zzz", "1 2 3", customType)); w.addDocument(doc); IndexReader r = w.getReader(); - org.apache.lucene.document.Document doc2 = r.document(0); - Iterator it = doc2.getFields().iterator(); + Document doc2 = r.document2(0); + Iterator it = doc2.getFields().iterator(); assertTrue(it.hasNext()); - org.apache.lucene.document.Field f = (org.apache.lucene.document.Field) it.next(); + Field f = (Field) it.next(); assertEquals(f.name(), "zzz"); assertEquals(f.stringValue(), "a b c"); assertTrue(it.hasNext()); - f = (org.apache.lucene.document.Field) it.next(); + f = (Field) it.next(); assertEquals(f.name(), "aaa"); assertEquals(f.stringValue(), "a b c"); assertTrue(it.hasNext()); - f = (org.apache.lucene.document.Field) it.next(); + f = (Field) it.next(); assertEquals(f.name(), "zzz"); assertEquals(f.stringValue(), "1 2 3"); assertFalse(it.hasNext()); @@ -1669,7 +1668,7 @@ } TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1); assertEquals(1, hits.totalHits); - org.apache.lucene.document.Document doc = r.document(hits.scoreDocs[0].doc); + Document doc = r.document2(hits.scoreDocs[0].doc); Document docExp = docs.get(testID); for(int i=0;i docIDs = new ArrayList(); final SubDocs subDocs = new SubDocs(packID, docIDs); final List docsList = new ArrayList(); @@ -524,7 +525,7 @@ startDocID = docID; } lastDocID = docID; - final Document doc = s.doc(docID); + final Document doc = s.doc2(docID); assertEquals(subDocs.packID, doc.get("packID")); } Index: lucene/src/test/org/apache/lucene/index/TestParallelReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestParallelReader.java (revision 1148022) +++ lucene/src/test/org/apache/lucene/index/TestParallelReader.java (working copy) @@ -212,8 +212,8 @@ assertEquals(parallelHits.length, singleHits.length); for(int i = 0; i < parallelHits.length; i++) { assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f); - org.apache.lucene.document.Document docParallel = parallel.doc(parallelHits[i].doc); - org.apache.lucene.document.Document docSingle = single.doc(singleHits[i].doc); + Document docParallel = parallel.doc2(parallelHits[i].doc); + Document docSingle = single.doc2(singleHits[i].doc); assertEquals(docParallel.get("f1"), docSingle.get("f1")); assertEquals(docParallel.get("f2"), docSingle.get("f2")); assertEquals(docParallel.get("f3"), docSingle.get("f3")); Index: lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java (revision 1148022) +++ lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java (working copy) @@ -40,14 +40,14 @@ IndexReader r = null; final int numUpdates = (int) (SIZE * (2+random.nextDouble())); for(int docIter=0;docIter= SIZE && random.nextInt(50) == 17) { Index: lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java (revision 1148022) +++ lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java (working copy) @@ -87,13 +87,13 @@ BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR); assertTrue(mergedReader != null); assertTrue(mergedReader.numDocs() == 2); - org.apache.lucene.document.Document newDoc1 = mergedReader.document(0); + Document newDoc1 = mergedReader.document2(0); assertTrue(newDoc1 != null); //There are 2 unstored fields on the document - assertTrue(DocHelper.numFields2(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size()); - org.apache.lucene.document.Document newDoc2 = mergedReader.document(1); + assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size()); + Document newDoc2 = mergedReader.document2(1); assertTrue(newDoc2 != null); - assertTrue(DocHelper.numFields2(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); + assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); DocsEnum termDocs = MultiFields.getTermDocsEnum(mergedReader, MultiFields.getDeletedDocs(mergedReader), Index: lucene/src/test/org/apache/lucene/index/TestSegmentReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestSegmentReader.java (revision 1148022) +++ lucene/src/test/org/apache/lucene/index/TestSegmentReader.java (working copy) @@ -25,7 +25,6 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.document2.Document; import org.apache.lucene.store.Directory; @@ -61,13 +60,13 @@ public void testDocument() throws IOException { assertTrue(reader.numDocs() == 1); assertTrue(reader.maxDoc() >= 1); - org.apache.lucene.document.Document result = reader.document(0); + Document result = reader.document2(0); assertTrue(result != null); //There are 2 unstored fields on the document that are not preserved across writing - assertTrue(DocHelper.numFields2(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size()); + assertTrue(DocHelper.numFields(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size()); - List fields = result.getFields(); - for (final Fieldable field : fields ) { + List fields = result.getFields(); + for (final IndexableField field : fields ) { assertTrue(field != null); assertTrue(DocHelper.nameValues.containsKey(field.name())); } Index: lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java (revision 1148022) +++ lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java (working copy) @@ -30,7 +30,7 @@ import junit.framework.Assert; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Fieldable; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.document2.Document; import org.apache.lucene.document2.Field; import org.apache.lucene.document2.FieldType; @@ -133,8 +133,8 @@ static Term idTerm = new Term("id",""); IndexingThread[] threads; - static Comparator fieldNameComparator = new Comparator() { - public int compare(Fieldable o1, Fieldable o2) { + static Comparator fieldNameComparator = new Comparator() { + public int compare(IndexableField o1, IndexableField o2) { return o1.name().compareTo(o2.name()); } }; @@ -294,7 +294,7 @@ Bits delDocs = sub.getDeletedDocs(); System.out.println(" " + ((SegmentReader) sub).getSegmentInfo()); for(int docID=0;docID0 && (searcher.getIndexReader().getSequentialSubReaders() == null || @@ -211,9 +211,9 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); - org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); + Document doc=searcher.doc2(sd[0].doc); assertEquals("First doc", startOffset, Integer.parseInt(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, Integer.parseInt(doc.get(field)) ); q=NumericRangeQuery.newIntRange(field, precisionStep, null, upper, false, true); @@ -221,9 +221,9 @@ sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); - doc=searcher.doc(sd[0].doc); + doc=searcher.doc2(sd[0].doc); assertEquals("First doc", startOffset, Integer.parseInt(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, Integer.parseInt(doc.get(field)) ); } @@ -252,9 +252,9 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", noDocs-count, sd.length ); - org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); + Document doc=searcher.doc2(sd[0].doc); assertEquals("First doc", count*distance+startOffset, Integer.parseInt(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc", (noDocs-1)*distance+startOffset, Integer.parseInt(doc.get(field)) ); q=NumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, false); @@ -262,9 +262,9 @@ sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", noDocs-count, sd.length ); - doc=searcher.doc(sd[0].doc); + doc=searcher.doc2(sd[0].doc); assertEquals("First doc", count*distance+startOffset, Integer.parseInt(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc", (noDocs-1)*distance+startOffset, Integer.parseInt(doc.get(field)) ); } Index: lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (revision 1148022) +++ lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (working copy) @@ -150,9 +150,9 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count"+type, count, sd.length ); - org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); + Document doc=searcher.doc2(sd[0].doc); assertEquals("First doc"+type, 2*distance+startOffset, Long.parseLong(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc"+type, (1+count)*distance+startOffset, Long.parseLong(doc.get(field)) ); if (i>0 && (searcher.getIndexReader().getSequentialSubReaders() == null || @@ -217,9 +217,9 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); - org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); + Document doc=searcher.doc2(sd[0].doc); assertEquals("First doc", startOffset, Long.parseLong(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, Long.parseLong(doc.get(field)) ); q=NumericRangeQuery.newLongRange(field, precisionStep, null, upper, false, true); @@ -227,9 +227,9 @@ sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); - doc=searcher.doc(sd[0].doc); + doc=searcher.doc2(sd[0].doc); assertEquals("First doc", startOffset, Long.parseLong(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, Long.parseLong(doc.get(field)) ); } @@ -263,9 +263,9 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", noDocs-count, sd.length ); - org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); + Document doc=searcher.doc2(sd[0].doc); assertEquals("First doc", count*distance+startOffset, Long.parseLong(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc", (noDocs-1)*distance+startOffset, Long.parseLong(doc.get(field)) ); q=NumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, false); @@ -273,9 +273,9 @@ sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", noDocs-count, sd.length ); - doc=searcher.doc(sd[0].doc); + doc=searcher.doc2(sd[0].doc); assertEquals("First doc", count*distance+startOffset, Long.parseLong(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc", (noDocs-1)*distance+startOffset, Long.parseLong(doc.get(field)) ); } Index: lucene/src/test/org/apache/lucene/search/TestSort.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestSort.java (revision 1148022) +++ lucene/src/test/org/apache/lucene/search/TestSort.java (working copy) @@ -35,6 +35,7 @@ import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -350,18 +351,18 @@ int lastDocId = 0; boolean fail = false; for (int x = 0; x < n; ++x) { - org.apache.lucene.document.Document doc2 = searcher.doc(result[x].doc); - String[] v = doc2.getValues("tracer"); - String[] v2 = doc2.getValues("tracer2"); + Document doc2 = searcher.doc2(result[x].doc); + IndexableField[] v = doc2.getFields("tracer"); + IndexableField[] v2 = doc2.getFields("tracer2"); for (int j = 0; j < v.length; ++j) { if (last != null) { - int cmp = v[j].compareTo(last); + int cmp = v[j].stringValue().compareTo(last); if (!(cmp >= 0)) { // ensure first field is in order fail = true; System.out.println("fail:" + v[j] + " < " + last); } if (cmp == 0) { // ensure second field is in reverse order - cmp = v2[j].compareTo(lastSub); + cmp = v2[j].stringValue().compareTo(lastSub); if (cmp > 0) { fail = true; System.out.println("rev field fail:" + v2[j] + " > " + lastSub); @@ -373,8 +374,8 @@ } } } - last = v[j]; - lastSub = v2[j]; + last = v[j].stringValue(); + lastSub = v2[j].stringValue(); lastDocId = result[x].doc; buff.append(v[j] + "(" + v2[j] + ")(" + result[x].doc+") "); } @@ -956,10 +957,10 @@ StringBuilder buff = new StringBuilder(10); int n = result.length; for (int i=0; i(); // Initialize the map with the default fields. - fields.put(BODY_FIELD, new Field(BODY_FIELD, "", bodyStore, bodyIndex, termVector)); - fields.put(TITLE_FIELD, new Field(TITLE_FIELD, "", store, index, termVector)); - fields.put(DATE_FIELD, new Field(DATE_FIELD, "", store, index, termVector)); - fields.put(ID_FIELD, new Field(ID_FIELD, "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); - fields.put(NAME_FIELD, new Field(NAME_FIELD, "", store, index, termVector)); + fields.put(BODY_FIELD, new Field(BODY_FIELD, bodyFt, "")); + fields.put(TITLE_FIELD, new Field(TITLE_FIELD, ft, "")); + fields.put(DATE_FIELD, new Field(DATE_FIELD, ft, "")); + fields.put(ID_FIELD, new Field(ID_FIELD, StringField.TYPE_STORED, "")); + fields.put(NAME_FIELD, new Field(NAME_FIELD, ft, "")); numericFields.put(DATE_MSEC_FIELD, new NumericField(DATE_MSEC_FIELD)); numericFields.put(TIME_SEC_FIELD, new NumericField(TIME_SEC_FIELD)); @@ -125,14 +125,14 @@ * reuseFields was set to true, then it attempts to reuse a * Field instance. If such a field does not exist, it creates a new one. */ - Field getField(String name, Store store, Index index, TermVector termVector) { + Field getField(String name, FieldType ft) { if (!reuseFields) { - return new Field(name, "", store, index, termVector); + return new Field(name, ft, ""); } Field f = fields.get(name); if (f == null) { - f = new Field(name, "", store, index, termVector); + f = new Field(name, ft, ""); fields.put(name, f); } return f; @@ -179,12 +179,9 @@ protected Config config; - protected Store storeVal = Store.NO; - protected Store bodyStoreVal = Store.NO; - protected Index indexVal = Index.ANALYZED_NO_NORMS; - protected Index bodyIndexVal = Index.ANALYZED; - protected TermVector termVecVal = TermVector.NO; - + protected FieldType valType; + protected FieldType bodyValType; + protected ContentSource source; protected boolean reuseFields; protected boolean indexProperties; @@ -196,6 +193,13 @@ private int printNum = 0; + public DocMaker() { + valType = new FieldType(TextField.TYPE_UNSTORED); + valType.setOmitNorms(true); + + bodyValType = new FieldType(TextField.TYPE_UNSTORED); + } + // create a doc // use only part of the body, modify it to keep the rest (or use all if size==0). // reset the docdata properties so they are not added more than once. @@ -206,7 +210,10 @@ doc.getFields().clear(); // Set ID_FIELD - Field idField = ds.getField(ID_FIELD, storeVal, Index.NOT_ANALYZED_NO_NORMS, termVecVal); + FieldType ft = new FieldType(valType); + ft.setIndexed(false); + + Field idField = ds.getField(ID_FIELD, ft); int id; if (r != null) { id = r.nextInt(updateDocIDLimit); @@ -223,7 +230,7 @@ String name = docData.getName(); if (name == null) name = ""; name = cnt < 0 ? name : name + "_" + cnt; - Field nameField = ds.getField(NAME_FIELD, storeVal, indexVal, termVecVal); + Field nameField = ds.getField(NAME_FIELD, valType); nameField.setValue(name); doc.add(nameField); @@ -242,7 +249,7 @@ } else { dateString = ""; } - Field dateStringField = ds.getField(DATE_FIELD, storeVal, indexVal, termVecVal); + Field dateStringField = ds.getField(DATE_FIELD, valType); dateStringField.setValue(dateString); doc.add(dateStringField); @@ -264,7 +271,7 @@ // Set TITLE_FIELD String title = docData.getTitle(); - Field titleField = ds.getField(TITLE_FIELD, storeVal, indexVal, termVecVal); + Field titleField = ds.getField(TITLE_FIELD, valType); titleField.setValue(title == null ? "" : title); doc.add(titleField); @@ -285,12 +292,12 @@ bdy = body.substring(0, size); // use part docData.setBody(body.substring(size)); // some left } - Field bodyField = ds.getField(BODY_FIELD, bodyStoreVal, bodyIndexVal, termVecVal); + Field bodyField = ds.getField(BODY_FIELD, bodyValType); bodyField.setValue(bdy); doc.add(bodyField); if (storeBytes) { - Field bytesField = ds.getField(BYTES_FIELD, Store.YES, Index.NOT_ANALYZED_NO_NORMS, TermVector.NO); + Field bytesField = ds.getField(BYTES_FIELD, StringField.TYPE_STORED); bytesField.setValue(bdy.getBytes("UTF-8")); doc.add(bytesField); } @@ -300,7 +307,7 @@ Properties props = docData.getProps(); if (props != null) { for (final Map.Entry entry : props.entrySet()) { - Field f = ds.getField((String) entry.getKey(), storeVal, indexVal, termVecVal); + Field f = ds.getField((String) entry.getKey(), valType); f.setValue((String) entry.getValue()); doc.add(f); } @@ -319,7 +326,7 @@ protected DocState getDocState() { DocState ds = docState.get(); if (ds == null) { - ds = new DocState(reuseFields, storeVal, bodyStoreVal, indexVal, bodyIndexVal, termVecVal); + ds = new DocState(reuseFields, valType, bodyValType); docState.set(ds); } return ds; @@ -455,33 +462,23 @@ boolean norms = config.get("doc.tokenized.norms", false); boolean bodyNorms = config.get("doc.body.tokenized.norms", true); boolean termVec = config.get("doc.term.vector", false); - storeVal = (stored ? Field.Store.YES : Field.Store.NO); - bodyStoreVal = (bodyStored ? Field.Store.YES : Field.Store.NO); - if (tokenized) { - indexVal = norms ? Index.ANALYZED : Index.ANALYZED_NO_NORMS; - } else { - indexVal = norms ? Index.NOT_ANALYZED : Index.NOT_ANALYZED_NO_NORMS; - } - - if (bodyTokenized) { - bodyIndexVal = bodyNorms ? Index.ANALYZED : Index.ANALYZED_NO_NORMS; - } else { - bodyIndexVal = bodyNorms ? Index.NOT_ANALYZED : Index.NOT_ANALYZED_NO_NORMS; - } - boolean termVecPositions = config.get("doc.term.vector.positions", false); boolean termVecOffsets = config.get("doc.term.vector.offsets", false); - if (termVecPositions && termVecOffsets) { - termVecVal = TermVector.WITH_POSITIONS_OFFSETS; - } else if (termVecPositions) { - termVecVal = TermVector.WITH_POSITIONS; - } else if (termVecOffsets) { - termVecVal = TermVector.WITH_OFFSETS; - } else if (termVec) { - termVecVal = TermVector.YES; - } else { - termVecVal = TermVector.NO; - } + + valType.setStored(stored); + bodyValType.setStored(bodyStored); + valType.setTokenized(tokenized); + valType.setOmitNorms(!norms); + bodyValType.setTokenized(bodyTokenized); + bodyValType.setOmitNorms(!bodyNorms); + + valType.setStoreTermVectors(termVec); + valType.setStoreTermVectorPositions(termVecPositions); + valType.setStoreTermVectorOffsets(termVecOffsets); + bodyValType.setStoreTermVectors(termVec); + bodyValType.setStoreTermVectorPositions(termVecPositions); + bodyValType.setStoreTermVectorOffsets(termVecOffsets); + storeBytes = config.get("doc.store.body.bytes", false); reuseFields = config.get("doc.reuse.fields", true); Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddDocTask.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddDocTask.java (revision 1148022) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddDocTask.java (working copy) @@ -19,7 +19,7 @@ import org.apache.lucene.benchmark.byTask.PerfRunData; import org.apache.lucene.benchmark.byTask.feeds.DocMaker; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; /** * Add a document, optionally with of a certain size. Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/BenchmarkHighlighter.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/BenchmarkHighlighter.java (revision 1148022) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/BenchmarkHighlighter.java (working copy) @@ -18,7 +18,7 @@ */ import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; public abstract class BenchmarkHighlighter { Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java (revision 1148022) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java (working copy) @@ -27,9 +27,9 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.benchmark.byTask.PerfRunData; import org.apache.lucene.benchmark.byTask.feeds.QueryMaker; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Fieldable; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.MultiFields; import org.apache.lucene.search.Collector; import org.apache.lucene.search.TopDocs; @@ -99,7 +99,7 @@ Bits delDocs = MultiFields.getDeletedDocs(reader); for (int m = 0; m < reader.maxDoc(); m++) { if (null == delDocs || ! delDocs.get(m)) { - doc = reader.document(m); + doc = reader.document2(m); res += (doc == null ? 0 : 1); } } @@ -144,7 +144,7 @@ System.out.println("numDocs() = " + reader.numDocs()); for(int i=0;i0 && sufficientFields[i]; Index: modules/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractWikipedia.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractWikipedia.java (revision 1148022) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractWikipedia.java (working copy) @@ -25,7 +25,7 @@ import org.apache.lucene.benchmark.byTask.feeds.DocMaker; import org.apache.lucene.benchmark.byTask.feeds.NoMoreDataException; import org.apache.lucene.benchmark.byTask.utils.Config; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; /** * Extract the downloaded Wikipedia dump into separate files for indexing. Index: modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java =================================================================== --- modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java (revision 1148022) +++ modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java (working copy) @@ -29,7 +29,7 @@ import org.apache.lucene.benchmark.byTask.tasks.CreateIndexTask; import org.apache.lucene.benchmark.byTask.tasks.TaskSequence; import org.apache.lucene.benchmark.byTask.utils.Config; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; @@ -137,28 +137,28 @@ // Don't set anything, use the defaults doc = createTestNormsDocument(false, false, false, false); - assertTrue(doc.getField(DocMaker.TITLE_FIELD).getOmitNorms()); - assertFalse(doc.getField(DocMaker.BODY_FIELD).getOmitNorms()); + assertTrue(doc.getField(DocMaker.TITLE_FIELD).omitNorms()); + assertFalse(doc.getField(DocMaker.BODY_FIELD).omitNorms()); // Set norms to false doc = createTestNormsDocument(true, false, false, false); - assertTrue(doc.getField(DocMaker.TITLE_FIELD).getOmitNorms()); - assertFalse(doc.getField(DocMaker.BODY_FIELD).getOmitNorms()); + assertTrue(doc.getField(DocMaker.TITLE_FIELD).omitNorms()); + assertFalse(doc.getField(DocMaker.BODY_FIELD).omitNorms()); // Set norms to true doc = createTestNormsDocument(true, true, false, false); - assertFalse(doc.getField(DocMaker.TITLE_FIELD).getOmitNorms()); - assertFalse(doc.getField(DocMaker.BODY_FIELD).getOmitNorms()); + assertFalse(doc.getField(DocMaker.TITLE_FIELD).omitNorms()); + assertFalse(doc.getField(DocMaker.BODY_FIELD).omitNorms()); // Set body norms to false doc = createTestNormsDocument(false, false, true, false); - assertTrue(doc.getField(DocMaker.TITLE_FIELD).getOmitNorms()); - assertTrue(doc.getField(DocMaker.BODY_FIELD).getOmitNorms()); + assertTrue(doc.getField(DocMaker.TITLE_FIELD).omitNorms()); + assertTrue(doc.getField(DocMaker.BODY_FIELD).omitNorms()); // Set body norms to true doc = createTestNormsDocument(false, false, true, true); - assertTrue(doc.getField(DocMaker.TITLE_FIELD).getOmitNorms()); - assertFalse(doc.getField(DocMaker.BODY_FIELD).getOmitNorms()); + assertTrue(doc.getField(DocMaker.TITLE_FIELD).omitNorms()); + assertFalse(doc.getField(DocMaker.BODY_FIELD).omitNorms()); } } Index: modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/CountingHighlighterTestTask.java =================================================================== --- modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/CountingHighlighterTestTask.java (revision 1148022) +++ modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/CountingHighlighterTestTask.java (working copy) @@ -26,7 +26,7 @@ import org.apache.lucene.search.highlight.QueryScorer; import org.apache.lucene.search.highlight.TokenSources; import org.apache.lucene.search.Query; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; import java.io.IOException; @@ -45,7 +45,7 @@ @Override protected Document retrieveDoc(IndexReader ir, int id) throws IOException { - Document document = ir.document(id); + Document document = ir.document2(id); if (document != null) { numDocsRetrieved++; } Index: modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java =================================================================== --- modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java (revision 1148022) +++ modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java (working copy) @@ -32,10 +32,9 @@ import org.apache.lucene.benchmark.byTask.feeds.DocMaker; import org.apache.lucene.benchmark.byTask.utils.Config; import org.apache.lucene.benchmark.byTask.utils.StreamUtils.Type; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.StringField; /** Tests the functionality of {@link WriteLineDocTask}. */ public class WriteLineDocTaskTest extends BenchmarkTestCase { @@ -46,9 +45,9 @@ @Override public Document makeDocument() throws Exception { Document doc = new Document(); - doc.add(new Field(BODY_FIELD, "body", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); - doc.add(new Field(TITLE_FIELD, "title", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); - doc.add(new Field(DATE_FIELD, "date", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); + doc.add(new StringField(BODY_FIELD, "body")); + doc.add(new StringField(TITLE_FIELD, "title")); + doc.add(new StringField(DATE_FIELD, "date")); return doc; } @@ -60,9 +59,9 @@ @Override public Document makeDocument() throws Exception { Document doc = new Document(); - doc.add(new Field(BODY_FIELD, "body\r\ntext\ttwo", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); - doc.add(new Field(TITLE_FIELD, "title\r\ntext", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); - doc.add(new Field(DATE_FIELD, "date\r\ntext", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); + doc.add(new StringField(BODY_FIELD, "body\r\ntext\ttwo")); + doc.add(new StringField(TITLE_FIELD, "title\r\ntext")); + doc.add(new StringField(DATE_FIELD, "date\r\ntext")); return doc; } @@ -73,8 +72,8 @@ @Override public Document makeDocument() throws Exception { Document doc = new Document(); - doc.add(new Field(TITLE_FIELD, "title", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); - doc.add(new Field(DATE_FIELD, "date", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); + doc.add(new StringField(TITLE_FIELD, "title")); + doc.add(new StringField(DATE_FIELD, "date")); return doc; } } @@ -84,8 +83,8 @@ @Override public Document makeDocument() throws Exception { Document doc = new Document(); - doc.add(new Field(BODY_FIELD, "body", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); - doc.add(new Field(DATE_FIELD, "date", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); + doc.add(new StringField(BODY_FIELD, "body")); + doc.add(new StringField(DATE_FIELD, "date")); return doc; } } @@ -95,7 +94,7 @@ @Override public Document makeDocument() throws Exception { Document doc = new Document(); - doc.add(new Field(DATE_FIELD, "date", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); + doc.add(new StringField(DATE_FIELD, "date")); return doc; } } @@ -106,7 +105,7 @@ @Override public Document makeDocument() throws Exception { Document doc = new Document(); - doc.add(new Field(DATE_FIELD, "date", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); + doc.add(new StringField(DATE_FIELD, "date")); return doc; } } @@ -126,9 +125,9 @@ public Document makeDocument() throws Exception { Document doc = new Document(); String name = Thread.currentThread().getName(); - doc.add(new Field(BODY_FIELD, "body_" + name, Store.NO, Index.NOT_ANALYZED_NO_NORMS)); - doc.add(new Field(TITLE_FIELD, "title_" + name, Store.NO, Index.NOT_ANALYZED_NO_NORMS)); - doc.add(new Field(DATE_FIELD, "date_" + name, Store.NO, Index.NOT_ANALYZED_NO_NORMS)); + doc.add(new StringField(BODY_FIELD, "body_" + name)); + doc.add(new StringField(TITLE_FIELD, "title_" + name)); + doc.add(new StringField(DATE_FIELD, "date_" + name)); return doc; } Index: solr/src/test/org/apache/solr/BasicFunctionalityTest.java =================================================================== --- solr/src/test/org/apache/solr/BasicFunctionalityTest.java (revision 1148022) +++ solr/src/test/org/apache/solr/BasicFunctionalityTest.java (working copy) @@ -26,6 +26,7 @@ import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; +import org.apache.lucene.document2.Document; import org.apache.lucene.document2.Field; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LogMergePolicy; @@ -560,10 +561,10 @@ core.execute(core.getRequestHandler(req.getParams().get(CommonParams.QT)), req, rsp); DocList dl = ((ResultContext) rsp.getValues().get("response")).docs; - org.apache.lucene.document.Document d = req.getSearcher().doc(dl.iterator().nextDoc()); + Document d = req.getSearcher().doc2(dl.iterator().nextDoc()); // ensure field is not lazy, only works for Non-Numeric fields currently (if you change schema behind test, this may fail) - assertTrue( d.getFieldable("test_hlt") instanceof org.apache.lucene.document.Field ); - assertTrue( d.getFieldable("title") instanceof org.apache.lucene.document.Field ); + assertTrue( d.getField("test_hlt") instanceof Field ); + assertTrue( d.getField("title") instanceof Field ); req.close(); } @@ -583,10 +584,10 @@ DocList dl = ((ResultContext) rsp.getValues().get("response")).docs; DocIterator di = dl.iterator(); - org.apache.lucene.document.Document d = req.getSearcher().doc(di.nextDoc()); + Document d = req.getSearcher().doc2(di.nextDoc()); // ensure field is lazy - assertTrue( !( d.getFieldable("test_hlt") instanceof org.apache.lucene.document.Field ) ); - assertTrue( d.getFieldable("title") instanceof org.apache.lucene.document.Field ); + assertTrue( !( d.getField("test_hlt") instanceof Field ) ); + assertTrue( d.getField("title") instanceof Field ); req.close(); }