Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java (revision 1143083) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java (working copy) @@ -60,7 +60,7 @@ newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); try { final Document document = new Document(); - FieldType customType = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectorPositions(true); customType.setStoreTermVectors(true); @@ -108,7 +108,7 @@ try { final Document document = new Document(); - FieldType customType = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectorPositions(true); customType.setStoreTermVectors(true); @@ -182,7 +182,7 @@ try { final Document document = new Document(); - FieldType customType = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectorPositions(true); customType.setStoreTermVectors(true); @@ -229,7 +229,7 @@ try { final Document document = new Document(); - FieldType customType = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStored(true); customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectors(true); @@ -273,7 +273,7 @@ newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false))); try { final Document document = new Document(); - FieldType customType = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectorPositions(true); customType.setStoreTermVectors(true); Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (revision 1143083) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (working copy) @@ -1531,7 +1531,7 @@ private Document doc( String f, String v ){ Document doc = new Document(); - FieldType customType = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStored(true); doc.add( new Field( f, customType, v)); return doc; @@ -1658,7 +1658,7 @@ addDoc(writer, text); } Document doc = new Document(); - FieldType storedNumericType = new FieldType(NumericField.DEFAULT_TYPE); + FieldType storedNumericType = new FieldType(NumericField.TYPE_UNSTORED); storedNumericType.setStored(true); NumericField nfield = new NumericField(NUMERIC_FIELD_NAME, storedNumericType); nfield.setIntValue(1); @@ -1696,7 +1696,7 @@ private void addDoc(IndexWriter writer, String text) throws IOException { Document d = new Document(); - FieldType storedType = new FieldType(TextField.DEFAULT_TYPE); + FieldType storedType = new FieldType(TextField.TYPE_UNSTORED); storedType.setStored(true); Field f = new Field(FIELD_NAME, storedType, text); d.add(f); Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java (revision 1143083) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java (working copy) @@ -108,7 +108,7 @@ newIndexWriterConfig(TEST_VERSION_CURRENT, new OverlapAnalyzer())); try { final Document document = new Document(); - FieldType customType = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStoreTermVectors(true); customType.setStoreTermVectorOffsets(true); document.add(new Field(FIELD, customType, new TokenStreamOverlap())); @@ -156,7 +156,7 @@ newIndexWriterConfig(TEST_VERSION_CURRENT, new OverlapAnalyzer())); try { final Document document = new Document(); - FieldType customType = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStoreTermVectors(true); customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectorPositions(true); @@ -205,7 +205,7 @@ newIndexWriterConfig(TEST_VERSION_CURRENT, new OverlapAnalyzer())); try { final Document document = new Document(); - FieldType customType = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStoreTermVectors(true); customType.setStoreTermVectorOffsets(true); document.add(new Field(FIELD, customType, new TokenStreamOverlap())); @@ -254,7 +254,7 @@ newIndexWriterConfig(TEST_VERSION_CURRENT, new OverlapAnalyzer())); try { final Document document = new Document(); - FieldType customType = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStoreTermVectors(true); customType.setStoreTermVectorOffsets(true); document.add(new Field(FIELD, customType, new TokenStreamOverlap())); Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java (revision 1143083) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java (working copy) @@ -331,7 +331,7 @@ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setOpenMode(OpenMode.CREATE)); Document doc = new Document(); - FieldType customType = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStoreTermVectors(true); customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectorPositions(true); @@ -350,7 +350,7 @@ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( TEST_VERSION_CURRENT, analyzerK).setOpenMode(OpenMode.CREATE)); Document doc = new Document(); - FieldType customType = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStoreTermVectors(true); customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectorPositions(true); Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java (revision 1143083) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java (working copy) @@ -131,7 +131,7 @@ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( TEST_VERSION_CURRENT, analyzerW).setOpenMode(OpenMode.CREATE)); Document doc = new Document(); - FieldType customType = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStoreTermVectors(true); customType.setStoreTermVectorOffsets(true); customType.setStoreTermVectorPositions(true); Index: lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java =================================================================== --- lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (revision 1143083) +++ lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (working copy) @@ -206,7 +206,7 @@ private void assembleDocument(Document document, int i) { - FieldType customType = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStored(true); customType.setStoreTermVectors(true); customType.setStoreTermVectorOffsets(true); @@ -218,19 +218,19 @@ document.add(new Field("b0", customType, i + " All work and no play makes Jack a dull boy")); //document.add(new Field("b1", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS, Field.TermVector.NO)); - FieldType customType2 = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED); customType2.setStored(true); customType2.setTokenized(false); customType2.setOmitNorms(true); document.add(new Field("b1", customType2, i + " All work and no play makes Jack a dull boy")); //document.add(new Field("b2", i + " All work and no play makes Jack a dull boy", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.NO)); - FieldType customType3 = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType3 = new FieldType(TextField.TYPE_UNSTORED); customType3.setTokenized(false); document.add(new Field("b1", customType3, i + " All work and no play makes Jack a dull boy")); //document.add(new Field("b3", i + " All work and no play makes Jack a dull boy", Field.Store.YES, Field.Index.NO, Field.TermVector.NO)); - FieldType customType4 = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType4 = new FieldType(TextField.TYPE_UNSTORED); customType4.setStored(true); customType4.setIndexed(false); customType4.setTokenized(false); @@ -244,7 +244,7 @@ if (i > 3) { //Field f = new Field("e", i + " Heres Johnny!", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); //f.setOmitNorms(true); - FieldType customType5 = new FieldType(TextField.DEFAULT_TYPE); + FieldType customType5 = new FieldType(TextField.TYPE_UNSTORED); customType5.setOmitNorms(true); Field f = new Field("e", customType5, i + " Heres Johnny!"); document.add(f); Index: lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java =================================================================== --- lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java (revision 1143083) +++ lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java (working copy) @@ -24,8 +24,8 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.TextField; /** * @since 2009-mar-30 13:15:49 @@ -65,7 +65,7 @@ private void addDocument(IndexWriter iw, String text) throws IOException { Document doc = new Document(); - doc.add(new Field("field", text, Field.Store.NO, Field.Index.ANALYZED)); + doc.add(new TextField("field", text)); iw.addDocument(doc); } } Index: lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java =================================================================== --- lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (revision 1143083) +++ lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (working copy) @@ -29,8 +29,9 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenFilter; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.queryParser.QueryParser; @@ -108,8 +109,8 @@ IndexWriter writer = new IndexWriter(ramdir, new IndexWriterConfig(TEST_VERSION_CURRENT, analyzer).setCodecProvider(_TestUtil.alwaysCodec("Standard"))); Document doc = new Document(); - Field field1 = newField("foo", fooField.toString(), Field.Store.NO, Field.Index.ANALYZED); - Field field2 = newField("term", termField.toString(), Field.Store.NO, Field.Index.ANALYZED); + Field field1 = newField("foo", fooField.toString(), TextField.TYPE_UNSTORED); + Field field2 = newField("term", termField.toString(), TextField.TYPE_UNSTORED); doc.add(field1); doc.add(field2); writer.addDocument(doc); Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java (revision 1143083) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java (working copy) @@ -21,8 +21,10 @@ import java.util.Arrays; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.search.Collector; import org.apache.lucene.search.DefaultSimilarity; @@ -65,13 +67,21 @@ for (int i = 0; i < NUM_DOCS; i++) { Document d = new Document(); - d.add(newField("field", "word", Field.Store.YES, Field.Index.ANALYZED)); - d.add(newField("nonorm", "word", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); - d.add(newField("untokfield", "20061212 20071212", Field.Store.YES, Field.Index.ANALYZED)); + FieldType storedTextType = new FieldType(TextField.TYPE_UNSTORED); + storedTextType.setStored(true); + d.add(newField("field", "word", storedTextType)); + + FieldType storedTextType2 = new FieldType(TextField.TYPE_UNSTORED); + storedTextType2.setStored(true); + storedTextType2.setTokenized(false); + storedTextType2.setOmitNorms(true); + d.add(newField("nonorm", "word", storedTextType2)); + d.add(newField("untokfield", "20061212 20071212", storedTextType)); + for (int j = 1; j <= i; j++) { - d.add(newField("field", "crap", Field.Store.YES, Field.Index.ANALYZED)); - d.add(newField("nonorm", "more words", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); + d.add(newField("field", "crap", storedTextType)); + d.add(newField("nonorm", "more words", storedTextType2)); } writer.addDocument(d); } Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java (revision 1143083) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java (working copy) @@ -19,7 +19,7 @@ import java.io.File; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java (revision 1143083) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java (working copy) @@ -17,8 +17,10 @@ */ import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.BytesRef; @@ -36,8 +38,13 @@ Document doc; for (int i = 0; i < NUM_DOCS; i++) { doc = new Document(); - doc.add(newField("id", i + "", Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(newField("f", i + " " + i, Field.Store.YES, Field.Index.ANALYZED)); + FieldType storedTextType = new FieldType(TextField.TYPE_UNSTORED); + storedTextType.setStored(true); + storedTextType.setTokenized(false); + FieldType storedTextType2 = new FieldType(TextField.TYPE_UNSTORED); + storedTextType.setStored(true); + doc.add(newField("id", i + "", storedTextType)); + doc.add(newField("f", i + " " + i, storedTextType2)); w.addDocument(doc); } w.close(); @@ -70,7 +77,7 @@ IndexReader ir; ir = IndexReader.open(dirs[0], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); // rounding error - Document doc = ir.document(0); + org.apache.lucene.document.Document doc = ir.document(0); assertEquals("0", doc.get("id")); TermsEnum te = MultiFields.getTerms(ir, "id").iterator(); assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seek(new BytesRef("1"))); @@ -115,7 +122,7 @@ IndexReader ir; ir = IndexReader.open(dirs[0], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - Document doc = ir.document(0); + org.apache.lucene.document.Document doc = ir.document(0); assertEquals("0", doc.get("id")); int start = ir.numDocs(); ir.close(); Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java (revision 1143083) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java (working copy) @@ -1,8 +1,10 @@ package org.apache.lucene.index; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; @@ -30,33 +32,42 @@ Document doc; doc = new Document(); - doc.add(newField("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - doc.add(newField("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - doc.add(newField("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStoreTermVectors(true); + customType.setStoreTermVectorPositions(true); + customType.setStoreTermVectorOffsets(true); + doc.add(newField("a", "a b a c a d a e a f a g a h a", customType)); + doc.add(newField("b", "a b c b d b e b f b g b h b", customType)); + doc.add(newField("c", "a c b c d c e c f c g c h c", customType)); iw.addDocument(doc); doc = new Document(); - doc.add(newField("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS)); - doc.add(newField("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS)); - doc.add(newField("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS)); + FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED); + customType2.setStoreTermVectors(true); + customType2.setStoreTermVectorPositions(true); + doc.add(newField("a", "a b a c a d a e a f a g a h a", customType2)); + doc.add(newField("b", "a b c b d b e b f b g b h b", customType2)); + doc.add(newField("c", "a c b c d c e c f c g c h c", customType2)); iw.addDocument(doc); doc = new Document(); - doc.add(newField("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); - doc.add(newField("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); - doc.add(newField("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); + FieldType customType3 = new FieldType(TextField.TYPE_UNSTORED); + customType3.setStoreTermVectors(true); + doc.add(newField("a", "a b a c a d a e a f a g a h a", customType3)); + doc.add(newField("b", "a b c b d b e b f b g b h b", customType3)); + doc.add(newField("c", "a c b c d c e c f c g c h c", customType3)); iw.addDocument(doc); doc = new Document(); - doc.add(newField("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO)); - doc.add(newField("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO)); - doc.add(newField("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO)); + doc.add(newField("a", "a b a c a d a e a f a g a h a", TextField.TYPE_UNSTORED)); + doc.add(newField("b", "a b c b d b e b f b g b h b", TextField.TYPE_UNSTORED)); + doc.add(newField("c", "a c b c d c e c f c g c h c", TextField.TYPE_UNSTORED)); iw.addDocument(doc); doc = new Document(); - doc.add(newField("a", "a b a c a d a e a f a g a h a", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - doc.add(newField("b", "a b c b d b e b f b g b h b", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO)); - doc.add(newField("c", "a c b c d c e c f c g c h c", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES)); + doc.add(newField("a", "a b a c a d a e a f a g a h a", customType)); + doc.add(newField("b", "a b c b d b e b f b g b h b", TextField.TYPE_UNSTORED)); + doc.add(newField("c", "a c b c d c e c f c g c h c", customType3)); iw.addDocument(doc); iw.close(); Index: lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java (revision 1143083) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java (working copy) @@ -21,10 +21,9 @@ import java.util.Random; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; -import org.apache.lucene.document.Field.TermVector; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexReader; @@ -140,7 +139,12 @@ ((TieredMergePolicy)cfg.getMergePolicy()).setUseCompoundFile(false); IndexWriter writer = new IndexWriter(dir, cfg); Document doc = new Document(); - doc.add(newField("f", text, Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); + FieldType storedTextType = new FieldType(TextField.TYPE_UNSTORED); + storedTextType.setStored(true); + storedTextType.setStoreTermVectors(true); + storedTextType.setStoreTermVectorPositions(true); + storedTextType.setStoreTermVectorOffsets(true); + doc.add(newField("f", text, storedTextType)); writer.addDocument(doc); writer.commit(); writer.addDocument(doc); @@ -148,8 +152,8 @@ writer.close(); IndexReader reader = IndexReader.open(dir, null, true, 1, new AppendingCodecProvider()); assertEquals(2, reader.numDocs()); - doc = reader.document(0); - assertEquals(text, doc.get("f")); + org.apache.lucene.document.Document doc2 = reader.document(0); + assertEquals(text, doc2.get("f")); Fields fields = MultiFields.getFields(reader); Terms terms = fields.terms("f"); assertNotNull(terms); Index: lucene/contrib/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java (revision 1143083) +++ lucene/contrib/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java (working copy) @@ -19,8 +19,10 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.store.Directory; @@ -199,13 +201,15 @@ /** * Generate 10 documents where term n has a docFreq of n and a totalTermFreq of n*2 (squared). */ + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); for (int i = 1; i <= 10; i++) { Document doc = new Document(); String content = getContent(i); - doc.add(newField(random, "FIELD_1", content, Field.Store.YES,Field.Index.ANALYZED, Field.TermVector.NO)); + doc.add(newField(random, "FIELD_1", content, customType)); //add a different field - doc.add(newField(random, "different_field", "diff", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); + doc.add(newField(random, "different_field", "diff", customType)); writer.addDocument(doc); } @@ -213,7 +217,7 @@ //highest freq terms for a specific field. for (int i = 1; i <= 10; i++) { Document doc = new Document(); - doc.add(newField(random, "different_field", "diff", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); + doc.add(newField(random, "different_field", "diff", customType)); writer.addDocument(doc); } // add some docs where tf < df so we can see if sorting works @@ -224,7 +228,7 @@ for (int i = 0; i < highTF; i++) { content += "highTF "; } - doc.add(newField(random, "FIELD_1", content, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); + doc.add(newField(random, "FIELD_1", content, customType)); writer.addDocument(doc); // highTF medium df =5 int medium_df = 5; @@ -235,7 +239,7 @@ for (int j = 0; j < tf; j++) { newcontent += "highTFmedDF "; } - newdoc.add(newField(random, "FIELD_1", newcontent, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); + newdoc.add(newField(random, "FIELD_1", newcontent, customType)); writer.addDocument(newdoc); } // add a doc with high tf in field different_field @@ -245,7 +249,7 @@ for (int i = 0; i < targetTF; i++) { content += "TF150 "; } - doc.add(newField(random, "different_field", content, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); + doc.add(newField(random, "different_field", content, customType)); writer.addDocument(doc); writer.close(); Index: lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java (revision 1143083) +++ lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java (working copy) @@ -20,8 +20,10 @@ import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.FieldInvertState; import org.apache.lucene.index.FieldNormModifier; import org.apache.lucene.index.IndexReader; @@ -70,16 +72,17 @@ for (int i = 0; i < NUM_DOCS; i++) { Document d = new Document(); - d.add(newField("field", "word", - Field.Store.YES, Field.Index.ANALYZED)); - d.add(newField("nonorm", "word", - Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + d.add(newField("field", "word", customType)); + FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED); + customType2.setStored(true); + customType2.setOmitNorms(true); + d.add(newField("nonorm", "word", customType2)); for (int j = 1; j <= i; j++) { - d.add(newField("field", "crap", - Field.Store.YES, Field.Index.ANALYZED)); - d.add(newField("nonorm", "more words", - Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); + d.add(newField("field", "crap", customType)); + d.add(newField("nonorm", "more words", customType2)); } writer.addDocument(d); } Index: lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java (revision 1143083) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java (working copy) @@ -21,8 +21,9 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.RandomIndexWriter; @@ -61,10 +62,12 @@ private void addDoc(RandomIndexWriter writer, String accessRights, String price, String date, String inStock) throws IOException { Document doc=new Document(); - doc.add(newField("accessRights",accessRights,Field.Store.YES,Field.Index.ANALYZED)); - doc.add(newField("price",price,Field.Store.YES,Field.Index.ANALYZED)); - doc.add(newField("date",date,Field.Store.YES,Field.Index.ANALYZED)); - doc.add(newField("inStock",inStock,Field.Store.YES,Field.Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + doc.add(newField("accessRights",accessRights,customType)); + doc.add(newField("price",price,customType)); + doc.add(newField("date",date,customType)); + doc.add(newField("inStock",inStock,customType)); writer.addDocument(doc); } Index: lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java (revision 1143083) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java (working copy) @@ -20,8 +20,9 @@ import java.util.Calendar; import java.util.GregorianCalendar; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -62,9 +63,12 @@ for (int i = 0; i < MAX; i++) { Document doc = new Document(); - doc.add(newField("key", "" + (i + 1), Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(newField("owner", (i < MAX / 2) ? "bob" : "sue", Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(newField("date", cal.getTime().toString(), Field.Store.YES, Field.Index.NOT_ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); + doc.add(newField("key", "" + (i + 1), customType)); + doc.add(newField("owner", (i < MAX / 2) ? "bob" : "sue", customType)); + doc.add(newField("date", cal.getTime().toString(), customType)); writer.addDocument(doc); cal.add(Calendar.DATE, 1); Index: lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (revision 1143083) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (working copy) @@ -21,8 +21,9 @@ import java.util.HashSet; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiFields; @@ -76,9 +77,12 @@ private void addDoc(RandomIndexWriter writer, String url, String text, String date) throws IOException { Document doc=new Document(); - doc.add(newField(KEY_FIELD,url,Field.Store.YES,Field.Index.NOT_ANALYZED)); - doc.add(newField("text",text,Field.Store.YES,Field.Index.ANALYZED)); - doc.add(newField("date",date,Field.Store.YES,Field.Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); + doc.add(newField(KEY_FIELD,url,customType)); + doc.add(newField("text",text,TextField.TYPE_UNSTORED)); + doc.add(newField("date",date,TextField.TYPE_UNSTORED)); writer.addDocument(doc); } @@ -89,7 +93,7 @@ ScoreDoc[] hits = searcher.search(tq,df, 1000).scoreDocs; for(int i=0;i0); for(int i=0;i0); for(int i=0;i0); for(int i=0;i0)); - Document doc=searcher.doc(sd[0].doc); + org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); assertEquals("Should match most similar not most rare variant", "2",doc.get("id")); } //Test multiple input words are having variants produced @@ -101,7 +104,7 @@ TopDocs topDocs = searcher.search(flt, 1); ScoreDoc[] sd = topDocs.scoreDocs; assertTrue("score docs must match 1 doc", (sd!=null)&&(sd.length>0)); - Document doc=searcher.doc(sd[0].doc); + org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); assertEquals("Should match most similar when using 2 words", "2",doc.get("id")); } //Test bug found when first query word does not match anything @@ -116,7 +119,7 @@ TopDocs topDocs = searcher.search(flt, 1); ScoreDoc[] sd = topDocs.scoreDocs; assertTrue("score docs must match 1 doc", (sd!=null)&&(sd.length>0)); - Document doc=searcher.doc(sd[0].doc); + org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); assertEquals("Should match most similar when using 2 words", "2",doc.get("id")); } Index: lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java (revision 1143083) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java (working copy) @@ -18,8 +18,9 @@ */ import java.util.HashSet; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.RandomIndexWriter; @@ -56,7 +57,10 @@ for (int i = 0; i < 100; i++) { Document doc=new Document(); int term=i*10; //terms are units of 10; - doc.add(newField(fieldName,""+term,Field.Store.YES,Field.Index.NOT_ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); + doc.add(newField(fieldName,""+term,customType)); w.addDocument(doc); } IndexReader reader = new SlowMultiReaderWrapper(w.getReader()); Index: lucene/contrib/queries/src/test/org/apache/lucene/search/TestSlowCollationMethods.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/TestSlowCollationMethods.java (revision 1143083) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/TestSlowCollationMethods.java (working copy) @@ -4,8 +4,10 @@ import java.text.Collator; import java.util.Locale; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.search.BooleanClause.Occur; @@ -55,7 +57,11 @@ for (int i = 0; i < numDocs; i++) { Document doc = new Document(); String value = _TestUtil.randomUnicodeString(random); - Field field = newField("field", value, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setOmitNorms(true); + customType.setTokenized(false); + Field field = newField("field", value, customType); doc.add(field); iw.addDocument(doc); } Index: lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java (revision 1143083) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java (working copy) @@ -24,8 +24,9 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.TextField; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.index.TermsEnum; @@ -47,7 +48,7 @@ directory = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, directory); Document doc = new Document(); - doc.add(newField(FN, "the quick brown fox jumps over the lazy dog", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField(FN, "the quick brown fox jumps over the lazy dog", TextField.TYPE_UNSTORED)); writer.addDocument(doc); reader = writer.getReader(); writer.close(); Index: lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java (revision 1143083) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java (working copy) @@ -20,8 +20,10 @@ import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; @@ -62,12 +64,10 @@ // Field.Store.NO, Field.Index.ANALYZED)); // writer.addDocument(doc); // doc = new Document(); - doc.add(newField("field", "auto update", Field.Store.NO, - Field.Index.ANALYZED)); + doc.add(newField("field", "auto update", TextField.TYPE_UNSTORED)); writer.addDocument(doc); doc = new Document(); - doc.add(newField("field", "first auto update", Field.Store.NO, - Field.Index.ANALYZED)); + doc.add(newField("field", "first auto update", TextField.TYPE_UNSTORED)); writer.addDocument(doc); writer.optimize(); writer.close(); @@ -87,13 +87,13 @@ LockObtainFailedException, IOException { // creating a document to store Document lDoc = new Document(); - lDoc.add(newField("field", "a1 b1", Field.Store.NO, - Field.Index.ANALYZED_NO_NORMS)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setOmitNorms(true); + lDoc.add(newField("field", "a1 b1", customType)); // creating a document to store Document lDoc2 = new Document(); - lDoc2.add(newField("field", "a2 b2", Field.Store.NO, - Field.Index.ANALYZED_NO_NORMS)); + lDoc2.add(newField("field", "a2 b2", customType)); // creating first index writer IndexWriter writerA = new IndexWriter(indexStoreA, newIndexWriterConfig( Index: lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java (revision 1143083) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java (working copy) @@ -25,8 +25,10 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.search.BooleanClause; @@ -66,7 +68,9 @@ private void addDoc(RandomIndexWriter writer, String text) throws IOException { Document doc = new Document(); - doc.add(newField("text", text, Field.Store.YES, Field.Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + doc.add(newField("text", text, customType)); writer.addDocument(doc); } Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java =================================================================== --- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (revision 1143083) +++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (working copy) @@ -21,8 +21,9 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.IndexSearcher; @@ -97,7 +98,7 @@ TopDocs td = searcher.search(q, 10); ScoreDoc[] sd = td.scoreDocs; for (int i = 0; i < sd.length; i++) { - Document doc = searcher.doc(sd[i].doc); + org.apache.lucene.document.Document doc = searcher.doc(sd[i].doc); String id = doc.get("id"); assertTrue(qString + "matched doc#" + id + " not expected", expecteds .contains(id)); @@ -113,12 +114,12 @@ super.setUp(); rd = newDirectory(); IndexWriter w = new IndexWriter(rd, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); for (int i = 0; i < docsContent.length; i++) { Document doc = new Document(); - doc.add(newField("name", docsContent[i].name, Field.Store.YES, - Field.Index.ANALYZED)); - doc.add(newField("id", docsContent[i].id, Field.Store.YES, - Field.Index.ANALYZED)); + doc.add(newField("name", docsContent[i].name, customType)); + doc.add(newField("id", docsContent[i].id, customType)); w.addDocument(doc); } w.close(); Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java =================================================================== --- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java (revision 1143083) +++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java (working copy) @@ -24,8 +24,8 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.queryParser.core.QueryNodeException; import org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute.Operator; @@ -320,8 +320,7 @@ Directory ramDir = newDirectory(); IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); Document doc = new Document(); - doc.add(newField("body", "blah the footest blah", Field.Store.NO, - Field.Index.ANALYZED)); + doc.add(newField("body", "blah the footest blah", TextField.TYPE_UNSTORED)); iw.addDocument(doc); iw.close(); Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java =================================================================== --- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java (revision 1143083) +++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java (working copy) @@ -40,8 +40,8 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.document.DateTools; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; @@ -1226,7 +1226,7 @@ Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new CannedAnalyzer())); Document doc = new Document(); - doc.add(newField("field", "", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("field", "", TextField.TYPE_UNSTORED)); w.addDocument(doc); IndexReader r = IndexReader.open(w, true); IndexSearcher s = newSearcher(r); Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/surround/query/SingleFieldTestDb.java =================================================================== --- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/surround/query/SingleFieldTestDb.java (revision 1143083) +++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/surround/query/SingleFieldTestDb.java (working copy) @@ -24,8 +24,9 @@ import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.Version; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -44,7 +45,7 @@ new MockAnalyzer(random))); for (int j = 0; j < docs.length; j++) { Document d = new Document(); - d.add(new Field(fieldName, docs[j], Field.Store.NO, Field.Index.ANALYZED)); + d.add(new Field(fieldName, TextField.TYPE_UNSTORED, docs[j])); writer.addDocument(d); } writer.close(); Index: lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java =================================================================== --- lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (revision 1143083) +++ lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (working copy) @@ -22,9 +22,11 @@ import java.util.Map; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.NumericField; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.NumericField; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; @@ -97,26 +99,30 @@ private void addPoint(IndexWriter writer, String name, double lat, double lng) throws IOException{ Document doc = new Document(); + + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + doc.add(newField("name", name, customType)); - doc.add(newField("name", name,Field.Store.YES, Field.Index.ANALYZED)); - // convert the lat / long to lucene fields - doc.add(new NumericField(latField, Integer.MAX_VALUE, Field.Store.YES, true).setDoubleValue(lat)); - doc.add(new NumericField(lngField, Integer.MAX_VALUE, Field.Store.YES, true).setDoubleValue(lng)); + FieldType customType2 = new FieldType(NumericField.TYPE_UNSTORED); + customType2.setStored(true); + doc.add(new NumericField(latField, Integer.MAX_VALUE, customType2).setDoubleValue(lat)); + doc.add(new NumericField(lngField, Integer.MAX_VALUE, customType2).setDoubleValue(lng)); // add a default meta field to make searching all documents easy - doc.add(newField("metafile", "doc",Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("metafile", "doc", customType)); int ctpsize = ctps.size(); + FieldType customType3 = new FieldType(TextField.TYPE_UNSTORED); + customType3.setStored(true); + customType3.setTokenized(false); + customType3.setOmitNorms(true); for (int i =0; i < ctpsize; i++){ CartesianTierPlotter ctp = ctps.get(i); - doc.add(new NumericField(ctp.getTierFieldName(), Integer.MAX_VALUE, - Field.Store.YES, - true).setDoubleValue(ctp.getTierBoxId(lat,lng))); + doc.add(new NumericField(ctp.getTierFieldName(), Integer.MAX_VALUE, customType).setDoubleValue(ctp.getTierBoxId(lat,lng))); - doc.add(newField(geoHashPrefix, GeoHashUtils.encode(lat,lng), - Field.Store.YES, - Field.Index.NOT_ANALYZED_NO_NORMS)); + doc.add(newField(geoHashPrefix, GeoHashUtils.encode(lat,lng), customType3)); } writer.addDocument(doc); @@ -278,7 +284,7 @@ assertEquals(2, results); double lastDistance = 0; for(int i =0 ; i < results; i++){ - Document d = searcher.doc(scoreDocs[i].doc); + org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc); String name = d.get("name"); double rsLat = Double.parseDouble(d.get(latField)); @@ -374,7 +380,7 @@ assertEquals(18, results); double lastDistance = 0; for(int i =0 ; i < results; i++){ - Document d = searcher.doc(scoreDocs[i].doc); + org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc); String name = d.get("name"); double rsLat = Double.parseDouble(d.get(latField)); double rsLng = Double.parseDouble(d.get(lngField)); @@ -469,7 +475,7 @@ assertEquals(expected[x], results); double lastDistance = 0; for(int i =0 ; i < results; i++){ - Document d = searcher.doc(scoreDocs[i].doc); + org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc); String name = d.get("name"); double rsLat = Double.parseDouble(d.get(latField)); @@ -564,7 +570,7 @@ assertEquals(expected[x], results); for(int i =0 ; i < results; i++){ - Document d = searcher.doc(scoreDocs[i].doc); + org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc); String name = d.get("name"); double rsLat = Double.parseDouble(d.get(latField)); Index: lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java =================================================================== --- lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java (revision 1143083) +++ lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java (working copy) @@ -19,9 +19,11 @@ import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.NumericField; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.NumericField; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; @@ -62,15 +64,19 @@ private void addPoint(IndexWriter writer, String name, double lat, double lng) throws IOException{ Document doc = new Document(); + + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + doc.add(newField("name", name, customType)); - doc.add(newField("name", name,Field.Store.YES, Field.Index.ANALYZED)); - // convert the lat / long to lucene fields - doc.add(new NumericField(latField, Integer.MAX_VALUE, Field.Store.YES, true).setDoubleValue(lat)); - doc.add(new NumericField(lngField, Integer.MAX_VALUE,Field.Store.YES, true).setDoubleValue(lng)); + FieldType customType2 = new FieldType(NumericField.TYPE_UNSTORED); + customType.setStored(true); + doc.add(new NumericField(latField, Integer.MAX_VALUE, customType2).setDoubleValue(lat)); + doc.add(new NumericField(lngField, Integer.MAX_VALUE, customType2).setDoubleValue(lng)); // add a default meta field to make searching all documents easy - doc.add(newField("metafile", "doc",Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("metafile", "doc", customType)); writer.addDocument(doc); } Index: lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java =================================================================== --- lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (revision 1143083) +++ lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (working copy) @@ -9,8 +9,10 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenFilter; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.NumericField; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.NumericField; +import org.apache.lucene.document2.TextField; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.search.IndexSearcher; @@ -41,200 +43,202 @@ */ public class TestParser extends LuceneTestCase { - private static CoreParser builder; - private static Directory dir; - private static IndexReader reader; - private static IndexSearcher searcher; + private static CoreParser builder; + private static Directory dir; + private static IndexReader reader; + private static IndexSearcher searcher; - @BeforeClass - public static void beforeClass() throws Exception { - // TODO: rewrite test (this needs to set QueryParser.enablePositionIncrements, too, for work with CURRENT): - Analyzer analyzer=new MockAnalyzer(random, MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET, false); + @BeforeClass + public static void beforeClass() throws Exception { + // TODO: rewrite test (this needs to set QueryParser.enablePositionIncrements, too, for work with CURRENT): + Analyzer analyzer=new MockAnalyzer(random, MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET, false); //initialize the parser - builder=new CorePlusExtensionsParser("contents",analyzer); - - BufferedReader d = new BufferedReader(new InputStreamReader(TestParser.class.getResourceAsStream("reuters21578.txt"))); - dir=newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(Version.LUCENE_40, analyzer)); - String line = d.readLine(); - while(line!=null) - { - int endOfDate=line.indexOf('\t'); - String date=line.substring(0,endOfDate).trim(); - String content=line.substring(endOfDate).trim(); - org.apache.lucene.document.Document doc =new org.apache.lucene.document.Document(); - doc.add(newField("date",date,Field.Store.YES,Field.Index.ANALYZED)); - doc.add(newField("contents",content,Field.Store.YES,Field.Index.ANALYZED)); - NumericField numericField = new NumericField("date2"); - numericField.setIntValue(Integer.valueOf(date)); - doc.add(numericField); - writer.addDocument(doc); - line=d.readLine(); - } - d.close(); + builder=new CorePlusExtensionsParser("contents",analyzer); + + BufferedReader d = new BufferedReader(new InputStreamReader(TestParser.class.getResourceAsStream("reuters21578.txt"))); + dir=newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(Version.LUCENE_40, analyzer)); + String line = d.readLine(); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + while(line!=null) + { + int endOfDate=line.indexOf('\t'); + String date=line.substring(0,endOfDate).trim(); + String content=line.substring(endOfDate).trim(); + Document doc = new Document(); + doc.add(newField("date",date,customType)); + doc.add(newField("contents",content,customType)); + NumericField numericField = new NumericField("date2"); + numericField.setIntValue(Integer.valueOf(date)); + doc.add(numericField); + writer.addDocument(doc); + line=d.readLine(); + } + d.close(); writer.close(); - reader=IndexReader.open(dir, true); - searcher=newSearcher(reader); - - } - - - - - @AfterClass - public static void afterClass() throws Exception { - reader.close(); - searcher.close(); - dir.close(); - reader = null; - searcher = null; - dir = null; - builder = null; - } - - public void testSimpleXML() throws ParserException, IOException - { - Query q=parse("TermQuery.xml"); - dumpResults("TermQuery", q, 5); - } - public void testSimpleTermsQueryXML() throws ParserException, IOException - { - Query q=parse("TermsQuery.xml"); - dumpResults("TermsQuery", q, 5); - } - public void testBooleanQueryXML() throws ParserException, IOException - { - Query q=parse("BooleanQuery.xml"); - dumpResults("BooleanQuery", q, 5); - } - public void testRangeFilterQueryXML() throws ParserException, IOException - { - Query q=parse("RangeFilterQuery.xml"); - dumpResults("RangeFilter", q, 5); - } - public void testUserQueryXML() throws ParserException, IOException - { - Query q=parse("UserInputQuery.xml"); - dumpResults("UserInput with Filter", q, 5); - } - - public void testCustomFieldUserQueryXML() throws ParserException, IOException - { - Query q=parse("UserInputQueryCustomField.xml"); - int h = searcher.search(q, null, 1000).totalHits; - assertEquals("UserInputQueryCustomField should produce 0 result ", 0,h); - } - - public void testLikeThisQueryXML() throws Exception - { - Query q=parse("LikeThisQuery.xml"); - dumpResults("like this", q, 5); - } - public void testBoostingQueryXML() throws Exception - { - Query q=parse("BoostingQuery.xml"); - dumpResults("boosting ",q, 5); - } - public void testFuzzyLikeThisQueryXML() throws Exception - { - Query q=parse("FuzzyLikeThisQuery.xml"); - //show rewritten fuzzyLikeThisQuery - see what is being matched on - if(VERBOSE) - { - System.out.println(q.rewrite(reader)); - } - dumpResults("FuzzyLikeThis", q, 5); - } - public void testTermsFilterXML() throws Exception - { - Query q=parse("TermsFilterQuery.xml"); - dumpResults("Terms Filter",q, 5); - } + reader=IndexReader.open(dir, true); + searcher=newSearcher(reader); + + } + + + + + @AfterClass + public static void afterClass() throws Exception { + reader.close(); + searcher.close(); + dir.close(); + reader = null; + searcher = null; + dir = null; + builder = null; + } + + public void testSimpleXML() throws ParserException, IOException + { + Query q=parse("TermQuery.xml"); + dumpResults("TermQuery", q, 5); + } + public void testSimpleTermsQueryXML() throws ParserException, IOException + { + Query q=parse("TermsQuery.xml"); + dumpResults("TermsQuery", q, 5); + } + public void testBooleanQueryXML() throws ParserException, IOException + { + Query q=parse("BooleanQuery.xml"); + dumpResults("BooleanQuery", q, 5); + } + public void testRangeFilterQueryXML() throws ParserException, IOException + { + Query q=parse("RangeFilterQuery.xml"); + dumpResults("RangeFilter", q, 5); + } + public void testUserQueryXML() throws ParserException, IOException + { + Query q=parse("UserInputQuery.xml"); + dumpResults("UserInput with Filter", q, 5); + } + + public void testCustomFieldUserQueryXML() throws ParserException, IOException + { + Query q=parse("UserInputQueryCustomField.xml"); + int h = searcher.search(q, null, 1000).totalHits; + assertEquals("UserInputQueryCustomField should produce 0 result ", 0,h); + } + + public void testLikeThisQueryXML() throws Exception + { + Query q=parse("LikeThisQuery.xml"); + dumpResults("like this", q, 5); + } + public void testBoostingQueryXML() throws Exception + { + Query q=parse("BoostingQuery.xml"); + dumpResults("boosting ",q, 5); + } + public void testFuzzyLikeThisQueryXML() throws Exception + { + Query q=parse("FuzzyLikeThisQuery.xml"); + //show rewritten fuzzyLikeThisQuery - see what is being matched on + if(VERBOSE) + { + System.out.println(q.rewrite(reader)); + } + dumpResults("FuzzyLikeThis", q, 5); + } + public void testTermsFilterXML() throws Exception + { + Query q=parse("TermsFilterQuery.xml"); + dumpResults("Terms Filter",q, 5); + } public void testBoostingTermQueryXML() throws Exception - { - Query q=parse("BoostingTermQuery.xml"); - dumpResults("BoostingTermQuery",q, 5); - } + { + Query q=parse("BoostingTermQuery.xml"); + dumpResults("BoostingTermQuery",q, 5); + } public void testSpanTermXML() throws Exception - { - Query q=parse("SpanQuery.xml"); - dumpResults("Span Query",q, 5); - } - public void testConstantScoreQueryXML() throws Exception - { - Query q=parse("ConstantScoreQuery.xml"); - dumpResults("ConstantScoreQuery",q, 5); - } - public void testMatchAllDocsPlusFilterXML() throws ParserException, IOException - { - Query q=parse("MatchAllDocsQuery.xml"); - dumpResults("MatchAllDocsQuery with range filter", q, 5); - } - public void testBooleanFilterXML() throws ParserException, IOException - { - Query q=parse("BooleanFilter.xml"); - dumpResults("Boolean filter", q, 5); - } - public void testNestedBooleanQuery() throws ParserException, IOException - { - Query q=parse("NestedBooleanQuery.xml"); - dumpResults("Nested Boolean query", q, 5); - } - public void testCachedFilterXML() throws ParserException, IOException - { - Query q=parse("CachedFilter.xml"); - dumpResults("Cached filter", q, 5); - } - public void testDuplicateFilterQueryXML() throws ParserException, IOException - { + { + Query q=parse("SpanQuery.xml"); + dumpResults("Span Query",q, 5); + } + public void testConstantScoreQueryXML() throws Exception + { + Query q=parse("ConstantScoreQuery.xml"); + dumpResults("ConstantScoreQuery",q, 5); + } + public void testMatchAllDocsPlusFilterXML() throws ParserException, IOException + { + Query q=parse("MatchAllDocsQuery.xml"); + dumpResults("MatchAllDocsQuery with range filter", q, 5); + } + public void testBooleanFilterXML() throws ParserException, IOException + { + Query q=parse("BooleanFilter.xml"); + dumpResults("Boolean filter", q, 5); + } + public void testNestedBooleanQuery() throws ParserException, IOException + { + Query q=parse("NestedBooleanQuery.xml"); + dumpResults("Nested Boolean query", q, 5); + } + public void testCachedFilterXML() throws ParserException, IOException + { + Query q=parse("CachedFilter.xml"); + dumpResults("Cached filter", q, 5); + } + public void testDuplicateFilterQueryXML() throws ParserException, IOException + { Assume.assumeTrue(searcher.getIndexReader().getSequentialSubReaders() == null || searcher.getIndexReader().getSequentialSubReaders().length == 1); - Query q=parse("DuplicateFilterQuery.xml"); - int h = searcher.search(q, null, 1000).totalHits; - assertEquals("DuplicateFilterQuery should produce 1 result ", 1,h); - } - - public void testNumericRangeFilterQueryXML() throws ParserException, IOException - { - Query q=parse("NumericRangeFilterQuery.xml"); - dumpResults("NumericRangeFilter", q, 5); - } - - public void testNumericRangeQueryQueryXML() throws ParserException, IOException - { - Query q=parse("NumericRangeQueryQuery.xml"); - dumpResults("NumericRangeQuery", q, 5); - } - + Query q=parse("DuplicateFilterQuery.xml"); + int h = searcher.search(q, null, 1000).totalHits; + assertEquals("DuplicateFilterQuery should produce 1 result ", 1,h); + } + + public void testNumericRangeFilterQueryXML() throws ParserException, IOException + { + Query q=parse("NumericRangeFilterQuery.xml"); + dumpResults("NumericRangeFilter", q, 5); + } + + public void testNumericRangeQueryQueryXML() throws ParserException, IOException + { + Query q=parse("NumericRangeQueryQuery.xml"); + dumpResults("NumericRangeQuery", q, 5); + } + - //================= Helper methods =================================== - private Query parse(String xmlFileName) throws ParserException, IOException - { - InputStream xmlStream=TestParser.class.getResourceAsStream(xmlFileName); - Query result=builder.parse(xmlStream); - xmlStream.close(); - return result; - } - private void dumpResults(String qType,Query q, int numDocs) throws IOException - { + //================= Helper methods =================================== + private Query parse(String xmlFileName) throws ParserException, IOException + { + InputStream xmlStream=TestParser.class.getResourceAsStream(xmlFileName); + Query result=builder.parse(xmlStream); + xmlStream.close(); + return result; + } + private void dumpResults(String qType,Query q, int numDocs) throws IOException + { if (VERBOSE) { System.out.println("TEST: query=" + q); } TopDocs hits = searcher.search(q, null, numDocs); - assertTrue(qType +" should produce results ", hits.totalHits>0); - if(VERBOSE) - { - System.out.println("========="+qType+"============"); - ScoreDoc[] scoreDocs = hits.scoreDocs; - for(int i=0;i0); + if(VERBOSE) + { + System.out.println("========="+qType+"============"); + ScoreDoc[] scoreDocs = hits.scoreDocs; + for(int i=0;iThis class also helps you to limit the resolution of your dates. Do not + * save dates with a finer resolution than you really need, as then + * RangeQuery and PrefixQuery will require more memory and become slower. + * + *

+ * Another approach is {@link NumericUtils}, which provides + * a sortable binary representation (prefix encoded) of numeric values, which + * date/time are. + * For indexing a {@link Date} or {@link Calendar}, just get the unix timestamp as + * long using {@link Date#getTime} or {@link Calendar#getTimeInMillis} and + * index this as a numeric value with {@link NumericField} + * and use {@link NumericRangeQuery} to query it. + */ +public class DateTools { + + final static TimeZone GMT = TimeZone.getTimeZone("GMT"); + + private static final ThreadLocal TL_CAL = new ThreadLocal() { + @Override + protected Calendar initialValue() { + return Calendar.getInstance(GMT, Locale.US); + } + }; + + //indexed by format length + private static final ThreadLocal TL_FORMATS = new ThreadLocal() { + @Override + protected SimpleDateFormat[] initialValue() { + SimpleDateFormat[] arr = new SimpleDateFormat[Resolution.MILLISECOND.formatLen+1]; + for (Resolution resolution : Resolution.values()) { + arr[resolution.formatLen] = (SimpleDateFormat)resolution.format.clone(); + } + return arr; + } + }; + + // cannot create, the class has static methods only + private DateTools() {} + + /** + * Converts a Date to a string suitable for indexing. + * + * @param date the date to be converted + * @param resolution the desired resolution, see + * {@link #round(Date, DateTools.Resolution)} + * @return a string in format yyyyMMddHHmmssSSS or shorter, + * depending on resolution; using GMT as timezone + */ + public static String dateToString(Date date, Resolution resolution) { + return timeToString(date.getTime(), resolution); + } + + /** + * Converts a millisecond time to a string suitable for indexing. + * + * @param time the date expressed as milliseconds since January 1, 1970, 00:00:00 GMT + * @param resolution the desired resolution, see + * {@link #round(long, DateTools.Resolution)} + * @return a string in format yyyyMMddHHmmssSSS or shorter, + * depending on resolution; using GMT as timezone + */ + public static String timeToString(long time, Resolution resolution) { + final Date date = new Date(round(time, resolution)); + return TL_FORMATS.get()[resolution.formatLen].format(date); + } + + /** + * Converts a string produced by timeToString or + * dateToString back to a time, represented as the + * number of milliseconds since January 1, 1970, 00:00:00 GMT. + * + * @param dateString the date string to be converted + * @return the number of milliseconds since January 1, 1970, 00:00:00 GMT + * @throws ParseException if dateString is not in the + * expected format + */ + public static long stringToTime(String dateString) throws ParseException { + return stringToDate(dateString).getTime(); + } + + /** + * Converts a string produced by timeToString or + * dateToString back to a time, represented as a + * Date object. + * + * @param dateString the date string to be converted + * @return the parsed time as a Date object + * @throws ParseException if dateString is not in the + * expected format + */ + public static Date stringToDate(String dateString) throws ParseException { + try { + return TL_FORMATS.get()[dateString.length()].parse(dateString); + } catch (Exception e) { + throw new ParseException("Input is not a valid date string: " + dateString, 0); + } + } + + /** + * Limit a date's resolution. For example, the date 2004-09-21 13:50:11 + * will be changed to 2004-09-01 00:00:00 when using + * Resolution.MONTH. + * + * @param resolution The desired resolution of the date to be returned + * @return the date with all values more precise than resolution + * set to 0 or 1 + */ + public static Date round(Date date, Resolution resolution) { + return new Date(round(date.getTime(), resolution)); + } + + /** + * Limit a date's resolution. For example, the date 1095767411000 + * (which represents 2004-09-21 13:50:11) will be changed to + * 1093989600000 (2004-09-01 00:00:00) when using + * Resolution.MONTH. + * + * @param resolution The desired resolution of the date to be returned + * @return the date with all values more precise than resolution + * set to 0 or 1, expressed as milliseconds since January 1, 1970, 00:00:00 GMT + */ + @SuppressWarnings("fallthrough") + public static long round(long time, Resolution resolution) { + final Calendar calInstance = TL_CAL.get(); + calInstance.setTimeInMillis(time); + + switch (resolution) { + //NOTE: switch statement fall-through is deliberate + case YEAR: + calInstance.set(Calendar.MONTH, 0); + case MONTH: + calInstance.set(Calendar.DAY_OF_MONTH, 1); + case DAY: + calInstance.set(Calendar.HOUR_OF_DAY, 0); + case HOUR: + calInstance.set(Calendar.MINUTE, 0); + case MINUTE: + calInstance.set(Calendar.SECOND, 0); + case SECOND: + calInstance.set(Calendar.MILLISECOND, 0); + case MILLISECOND: + // don't cut off anything + break; + default: + throw new IllegalArgumentException("unknown resolution " + resolution); + } + return calInstance.getTimeInMillis(); + } + + /** Specifies the time granularity. */ + public static enum Resolution { + + YEAR(4), MONTH(6), DAY(8), HOUR(10), MINUTE(12), SECOND(14), MILLISECOND(17); + + final int formatLen; + final SimpleDateFormat format;//should be cloned before use, since it's not threadsafe + + Resolution(int formatLen) { + this.formatLen = formatLen; + // formatLen 10's place: 11111111 + // formatLen 1's place: 12345678901234567 + this.format = new SimpleDateFormat("yyyyMMddHHmmssSSS".substring(0,formatLen),Locale.US); + this.format.setTimeZone(GMT); + } + + /** this method returns the name of the resolution + * in lowercase (for backwards compatibility) */ + @Override + public String toString() { + return super.toString().toLowerCase(Locale.ENGLISH); + } + + } + +} Index: lucene/src/java/org/apache/lucene/document2/Document.java =================================================================== --- lucene/src/java/org/apache/lucene/document2/Document.java (revision 1143083) +++ lucene/src/java/org/apache/lucene/document2/Document.java (working copy) @@ -163,6 +163,18 @@ return fields.size(); } + public final List getFields() { + return fields; + } + + public final String get(String name) { + for (IndexableField field : fields) { + if (field.name().equals(name) && (field.binaryValue(null) == null)) + return field.stringValue(); + } + return null; + } + /** Prints the fields of a document for human consumption. */ @Override public final String toString() { Index: lucene/src/java/org/apache/lucene/document2/NumericField.java =================================================================== --- lucene/src/java/org/apache/lucene/document2/NumericField.java (revision 1143083) +++ lucene/src/java/org/apache/lucene/document2/NumericField.java (working copy) @@ -19,8 +19,6 @@ import java.io.Reader; -import org.apache.lucene.document.NumericField.DataType; - import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.NumericTokenStream; import org.apache.lucene.util.NumericUtils; @@ -147,13 +145,24 @@ } */ - public static final FieldType DEFAULT_TYPE = new FieldType(); + public static final FieldType TYPE_UNSTORED = new FieldType(); + public static final FieldType TYPE_STORED = new FieldType(); static { - DEFAULT_TYPE.setIndexed(true); - DEFAULT_TYPE.setOmitNorms(true); - DEFAULT_TYPE.setOmitTermFreqAndPositions(true); - DEFAULT_TYPE.freeze(); + TYPE_UNSTORED.setIndexed(true); + TYPE_UNSTORED.setTokenized(true); + TYPE_UNSTORED.setOmitNorms(true); + TYPE_UNSTORED.setOmitTermFreqAndPositions(true); + TYPE_UNSTORED.freeze(); + + TYPE_STORED.setIndexed(true); + TYPE_STORED.setStored(true); + TYPE_STORED.setTokenized(true); + TYPE_STORED.setOmitNorms(true); + TYPE_STORED.setOmitTermFreqAndPositions(true); + TYPE_STORED.freeze(); } + + public static enum DataType { INT, LONG, FLOAT, DOUBLE } private DataType dataType; private transient NumericTokenStream numericTS; @@ -171,7 +180,7 @@ * the field name */ public NumericField(String name) { - this(name, NumericUtils.PRECISION_STEP_DEFAULT, NumericField.DEFAULT_TYPE); + this(name, NumericUtils.PRECISION_STEP_DEFAULT, NumericField.TYPE_UNSTORED); } /** @@ -208,7 +217,7 @@ * >precision step */ public NumericField(String name, int precisionStep) { - this(name, precisionStep, NumericField.DEFAULT_TYPE); + this(name, precisionStep, NumericField.TYPE_UNSTORED); } /** Index: lucene/src/java/org/apache/lucene/document2/StringField.java =================================================================== --- lucene/src/java/org/apache/lucene/document2/StringField.java (revision 1143083) +++ lucene/src/java/org/apache/lucene/document2/StringField.java (working copy) @@ -19,17 +19,23 @@ public final class StringField extends Field { + public static final FieldType TYPE_UNSTORED = new FieldType(); + public static final FieldType TYPE_STORED = new FieldType(); + static { + TYPE_UNSTORED.setIndexed(true); + TYPE_UNSTORED.setOmitNorms(true); + TYPE_UNSTORED.setOmitTermFreqAndPositions(true); + TYPE_UNSTORED.freeze(); - public static final FieldType DEFAULT_TYPE = new FieldType(); - static { - DEFAULT_TYPE.setIndexed(true); - DEFAULT_TYPE.setOmitNorms(true); - DEFAULT_TYPE.setOmitTermFreqAndPositions(true); - DEFAULT_TYPE.freeze(); + TYPE_STORED.setIndexed(true); + TYPE_STORED.setStored(true); + TYPE_STORED.setOmitNorms(true); + TYPE_STORED.setOmitTermFreqAndPositions(true); + TYPE_STORED.freeze(); } public StringField(String name, boolean internName, String value) { - super(name, StringField.DEFAULT_TYPE, value); + super(name, StringField.TYPE_UNSTORED, value); } public StringField(String name, String value) { Index: lucene/src/java/org/apache/lucene/document2/TextField.java =================================================================== --- lucene/src/java/org/apache/lucene/document2/TextField.java (revision 1143083) +++ lucene/src/java/org/apache/lucene/document2/TextField.java (working copy) @@ -23,23 +23,29 @@ public final class TextField extends Field { - public static final FieldType DEFAULT_TYPE = new FieldType(); + public static final FieldType TYPE_UNSTORED = new FieldType(); + public static final FieldType TYPE_STORED = new FieldType(); static { - DEFAULT_TYPE.setIndexed(true); - DEFAULT_TYPE.setTokenized(true); - DEFAULT_TYPE.freeze(); + TYPE_UNSTORED.setIndexed(true); + TYPE_UNSTORED.setTokenized(true); + TYPE_UNSTORED.freeze(); + + TYPE_STORED.setIndexed(true); + TYPE_STORED.setStored(true); + TYPE_STORED.setTokenized(true); + TYPE_STORED.freeze(); } public TextField(String name, Reader reader) { - super(name, TextField.DEFAULT_TYPE, reader); + super(name, TextField.TYPE_UNSTORED, reader); } public TextField(String name, String value) { - super(name, TextField.DEFAULT_TYPE, value); + super(name, TextField.TYPE_UNSTORED, value); } public TextField(String name, TokenStream stream) { - super(name, TextField.DEFAULT_TYPE, stream); + super(name, TextField.TYPE_UNSTORED, stream); } public boolean isNumeric() { Index: lucene/src/test-framework/org/apache/lucene/index/DocHelper.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/index/DocHelper.java (revision 1143083) +++ lucene/src/test-framework/org/apache/lucene/index/DocHelper.java (working copy) @@ -26,9 +26,11 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.Fieldable; +import org.apache.lucene.document2.BinaryField; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.search.SimilarityProvider; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; @@ -36,63 +38,110 @@ import static org.apache.lucene.util.LuceneTestCase.TEST_VERSION_CURRENT; class DocHelper { + + public static final FieldType customType; public static final String FIELD_1_TEXT = "field one text"; public static final String TEXT_FIELD_1_KEY = "textField1"; - public static Field textField1 = new Field(TEXT_FIELD_1_KEY, FIELD_1_TEXT, - Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO); - + public static Field textField1; + static { + customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + textField1 = new Field(TEXT_FIELD_1_KEY, customType, FIELD_1_TEXT); + } + + public static final FieldType customType2; public static final String FIELD_2_TEXT = "field field field two text"; //Fields will be lexicographically sorted. So, the order is: field, text, two public static final int [] FIELD_2_FREQS = {3, 1, 1}; public static final String TEXT_FIELD_2_KEY = "textField2"; - public static Field textField2 = new Field(TEXT_FIELD_2_KEY, FIELD_2_TEXT, Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); + public static Field textField2; + static { + customType2 = new FieldType(TextField.TYPE_UNSTORED); + customType2.setStored(true); + customType2.setStoreTermVectors(true); + customType2.setStoreTermVectorPositions(true); + customType2.setStoreTermVectorOffsets(true); + textField2 = new Field(TEXT_FIELD_2_KEY, customType2, FIELD_2_TEXT); + } + public static final FieldType customType3; public static final String FIELD_3_TEXT = "aaaNoNorms aaaNoNorms bbbNoNorms"; public static final String TEXT_FIELD_3_KEY = "textField3"; - public static Field textField3 = new Field(TEXT_FIELD_3_KEY, FIELD_3_TEXT, Field.Store.YES, Field.Index.ANALYZED); - static { textField3.setOmitNorms(true); } + public static Field textField3; + + static { + customType3 = new FieldType(TextField.TYPE_UNSTORED); + customType3.setStored(true); + customType3.setOmitNorms(true); + textField3 = new Field(TEXT_FIELD_3_KEY, customType3, FIELD_3_TEXT); + } + public static final FieldType customType4; public static final String KEYWORD_TEXT = "Keyword"; public static final String KEYWORD_FIELD_KEY = "keyField"; - public static Field keyField = new Field(KEYWORD_FIELD_KEY, KEYWORD_TEXT, - Field.Store.YES, Field.Index.NOT_ANALYZED); + public static Field keyField; + static { + customType4 = new FieldType(TextField.TYPE_UNSTORED); + customType4.setStored(true); + customType4.setTokenized(false); + keyField = new Field(KEYWORD_FIELD_KEY, customType4, KEYWORD_TEXT); + } + public static final FieldType customType5; public static final String NO_NORMS_TEXT = "omitNormsText"; public static final String NO_NORMS_KEY = "omitNorms"; - public static Field noNormsField = new Field(NO_NORMS_KEY, NO_NORMS_TEXT, - Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); + public static Field noNormsField; + static { + customType5 = new FieldType(TextField.TYPE_UNSTORED); + customType5.setOmitNorms(true); + customType5.setStored(true); + customType5.setTokenized(false); + noNormsField = new Field(NO_NORMS_KEY, customType5, NO_NORMS_TEXT); + } + public static final FieldType customType6; public static final String NO_TF_TEXT = "analyzed with no tf and positions"; public static final String NO_TF_KEY = "omitTermFreqAndPositions"; - public static Field noTFField = new Field(NO_TF_KEY, NO_TF_TEXT, - Field.Store.YES, Field.Index.ANALYZED); + public static Field noTFField; static { - noTFField.setOmitTermFreqAndPositions(true); + customType6 = new FieldType(TextField.TYPE_UNSTORED); + customType6.setOmitTermFreqAndPositions(true); + customType6.setStored(true); + noTFField = new Field(NO_TF_KEY, customType6, NO_TF_TEXT); } + public static final FieldType customType7; public static final String UNINDEXED_FIELD_TEXT = "unindexed field text"; public static final String UNINDEXED_FIELD_KEY = "unIndField"; - public static Field unIndField = new Field(UNINDEXED_FIELD_KEY, UNINDEXED_FIELD_TEXT, - Field.Store.YES, Field.Index.NO); + public static Field unIndField; + static { + customType7 = new FieldType(); + customType7.setStored(true); + unIndField = new Field(UNINDEXED_FIELD_KEY, customType7, UNINDEXED_FIELD_TEXT); + } public static final String UNSTORED_1_FIELD_TEXT = "unstored field text"; public static final String UNSTORED_FIELD_1_KEY = "unStoredField1"; - public static Field unStoredField1 = new Field(UNSTORED_FIELD_1_KEY, UNSTORED_1_FIELD_TEXT, - Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.NO); + public static Field unStoredField1 = new Field(UNSTORED_FIELD_1_KEY, TextField.TYPE_UNSTORED, UNSTORED_1_FIELD_TEXT); + public static final FieldType customType8; public static final String UNSTORED_2_FIELD_TEXT = "unstored field text"; public static final String UNSTORED_FIELD_2_KEY = "unStoredField2"; - public static Field unStoredField2 = new Field(UNSTORED_FIELD_2_KEY, UNSTORED_2_FIELD_TEXT, - Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.YES); + public static Field unStoredField2; + static { + customType8 = new FieldType(TextField.TYPE_UNSTORED); + customType8.setStoreTermVectors(true); + unStoredField2 = new Field(UNSTORED_FIELD_2_KEY, customType8, UNSTORED_2_FIELD_TEXT); + } public static final String LAZY_FIELD_BINARY_KEY = "lazyFieldBinary"; public static byte [] LAZY_FIELD_BINARY_BYTES; public static Field lazyFieldBinary; - + public static final String LAZY_FIELD_KEY = "lazyField"; public static final String LAZY_FIELD_TEXT = "These are some field bytes"; - public static Field lazyField = new Field(LAZY_FIELD_KEY, LAZY_FIELD_TEXT, Field.Store.YES, Field.Index.ANALYZED); + public static Field lazyField = new Field(LAZY_FIELD_KEY, customType, LAZY_FIELD_TEXT); public static final String LARGE_LAZY_FIELD_KEY = "largeLazyField"; public static String LARGE_LAZY_FIELD_TEXT; @@ -101,15 +150,13 @@ //From Issue 509 public static final String FIELD_UTF1_TEXT = "field one \u4e00text"; public static final String TEXT_FIELD_UTF1_KEY = "textField1Utf8"; - public static Field textUtfField1 = new Field(TEXT_FIELD_UTF1_KEY, FIELD_UTF1_TEXT, - Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO); + public static Field textUtfField1 = new Field(TEXT_FIELD_UTF1_KEY, customType, FIELD_UTF1_TEXT); public static final String FIELD_UTF2_TEXT = "field field field \u4e00two text"; //Fields will be lexicographically sorted. So, the order is: field, text, two public static final int [] FIELD_UTF2_FREQS = {3, 1, 1}; public static final String TEXT_FIELD_UTF2_KEY = "textField2Utf8"; - public static Field textUtfField2 = new Field(TEXT_FIELD_UTF2_KEY, FIELD_UTF2_TEXT, Field.Store.YES, - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); + public static Field textUtfField2 = new Field(TEXT_FIELD_UTF2_KEY, customType2, FIELD_UTF2_TEXT); @@ -135,16 +182,16 @@ largeLazyField//placeholder for large field, since this is null. It must always be last }; - public static Map all =new HashMap(); - public static Map indexed =new HashMap(); - public static Map stored =new HashMap(); - public static Map unstored=new HashMap(); - public static Map unindexed=new HashMap(); - public static Map termvector=new HashMap(); - public static Map notermvector=new HashMap(); - public static Map lazy= new HashMap(); - public static Map noNorms=new HashMap(); - public static Map noTf=new HashMap(); + public static Map all =new HashMap(); + public static Map indexed =new HashMap(); + public static Map stored =new HashMap(); + public static Map unstored=new HashMap(); + public static Map unindexed=new HashMap(); + public static Map termvector=new HashMap(); + public static Map notermvector=new HashMap(); + public static Map lazy= new HashMap(); + public static Map noNorms=new HashMap(); + public static Map noTf=new HashMap(); static { //Initialize the large Lazy Field @@ -158,28 +205,28 @@ LAZY_FIELD_BINARY_BYTES = "These are some binary field bytes".getBytes("UTF8"); } catch (UnsupportedEncodingException e) { } - lazyFieldBinary = new Field(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES); + lazyFieldBinary = new BinaryField(LAZY_FIELD_BINARY_KEY, LAZY_FIELD_BINARY_BYTES); fields[fields.length - 2] = lazyFieldBinary; LARGE_LAZY_FIELD_TEXT = buffer.toString(); - largeLazyField = new Field(LARGE_LAZY_FIELD_KEY, LARGE_LAZY_FIELD_TEXT, Field.Store.YES, Field.Index.ANALYZED); + largeLazyField = new Field(LARGE_LAZY_FIELD_KEY, customType, LARGE_LAZY_FIELD_TEXT); fields[fields.length - 1] = largeLazyField; for (int i=0; i map, Fieldable field) { + private static void add(Map map, IndexableField field) { map.put(field.name(), field); } @@ -248,6 +295,10 @@ } public static int numFields(Document doc) { + return doc.size(); + } + + public static int numFields2(org.apache.lucene.document.Document doc) { return doc.getFields().size(); } } Index: lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (revision 1143083) +++ lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (working copy) @@ -1079,16 +1079,17 @@ } public static org.apache.lucene.document2.Field newField(Random random, String name, String value, FieldType type) { + FieldType newType = new FieldType(type); if (usually(random)) { // most of the time, don't modify the params - return new org.apache.lucene.document2.Field(name, type, value); + return new org.apache.lucene.document2.Field(name, newType, value); } - if (!type.stored() && random.nextBoolean()) { - type.setStored(true); // randomly store it + if (!newType.stored() && random.nextBoolean()) { + newType.setStored(true); // randomly store it } - return new org.apache.lucene.document2.Field(name, type, value); + return new org.apache.lucene.document2.Field(name, newType, value); } /** Index: lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java (revision 1143083) +++ lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java (working copy) @@ -34,12 +34,12 @@ import java.util.zip.ZipEntry; import java.util.zip.ZipFile; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Fieldable; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.ConcurrentMergeScheduler; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LogMergePolicy; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.MergeScheduler; @@ -372,10 +372,9 @@ /** Adds field info for a Document. */ public static void add(Document doc, FieldInfos fieldInfos) { - List fields = doc.getFields(); - for (Fieldable field : fields) { - fieldInfos.addOrUpdate(field.name(), field.isIndexed(), field.isTermVectorStored(), field.isStorePositionWithTermVector(), - field.isStoreOffsetWithTermVector(), field.getOmitNorms(), false, field.getOmitTermFreqAndPositions()); + for (IndexableField field : doc) { + fieldInfos.addOrUpdate(field.name(), field.indexed(), field.storeTermVectors(), field.storeTermVectorPositions(), + field.storeTermVectorOffsets(), field.omitNorms(), false, field.omitTermFreqAndPositions()); } } Index: lucene/src/test/org/apache/lucene/TestDemo.java =================================================================== --- lucene/src/test/org/apache/lucene/TestDemo.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/TestDemo.java (working copy) @@ -55,7 +55,7 @@ Document doc = new Document(); String longTerm = "longtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongtermlongterm"; String text = "This is the text to be indexed. " + longTerm; - FieldType textType = new FieldType(TextField.DEFAULT_TYPE); + FieldType textType = new FieldType(TextField.TYPE_UNSTORED); textType.setStored(true); doc.add(newField("fieldname", text, textType)); iwriter.addDocument(doc); Index: lucene/src/test/org/apache/lucene/TestExternalCodecs.java =================================================================== --- lucene/src/test/org/apache/lucene/TestExternalCodecs.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/TestExternalCodecs.java (working copy) @@ -20,7 +20,7 @@ import org.apache.lucene.util.*; import org.apache.lucene.util.Bits; import org.apache.lucene.index.*; -import org.apache.lucene.document.*; +import org.apache.lucene.document2.*; import org.apache.lucene.search.*; import org.apache.lucene.analysis.*; import org.apache.lucene.index.codecs.*; @@ -518,13 +518,13 @@ w.setInfoStream(VERBOSE ? System.out : null); Document doc = new Document(); // uses default codec: - doc.add(newField("field1", "this field uses the standard codec as the test", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("field1", "this field uses the standard codec as the test", TextField.TYPE_UNSTORED)); // uses pulsing codec: - Field field2 = newField("field2", "this field uses the pulsing codec as the test", Field.Store.NO, Field.Index.ANALYZED); + Field field2 = newField("field2", "this field uses the pulsing codec as the test", TextField.TYPE_UNSTORED); provider.setFieldCodec(field2.name(), "Pulsing"); doc.add(field2); - Field idField = newField("id", "", Field.Store.NO, Field.Index.NOT_ANALYZED); + Field idField = newField("id", "", StringField.TYPE_UNSTORED); provider.setFieldCodec(idField.name(), "Pulsing"); doc.add(idField); Index: lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java =================================================================== --- lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java (working copy) @@ -31,8 +31,9 @@ import org.apache.lucene.index.MergeScheduler; import org.apache.lucene.index.MergePolicy.OneMerge; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.StringField; /** * Holds tests cases to verify external APIs are accessible @@ -90,7 +91,7 @@ dir.failOn(new FailOnlyOnMerge()); Document doc = new Document(); - Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); + Field idField = newField("id", "", StringField.TYPE_STORED); doc.add(idField); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( Index: lucene/src/test/org/apache/lucene/TestSearch.java =================================================================== --- lucene/src/test/org/apache/lucene/TestSearch.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/TestSearch.java (working copy) @@ -26,7 +26,7 @@ import junit.textui.TestRunner; import org.apache.lucene.store.*; -import org.apache.lucene.document.*; +import org.apache.lucene.document2.*; import org.apache.lucene.analysis.*; import org.apache.lucene.index.*; import org.apache.lucene.search.*; @@ -92,8 +92,8 @@ }; for (int j = 0; j < docs.length; j++) { Document d = new Document(); - d.add(newField("contents", docs[j], Field.Store.YES, Field.Index.ANALYZED)); - d.add(newField("id", ""+j, Field.Index.NOT_ANALYZED_NO_NORMS)); + d.add(newField("contents", docs[j], TextField.TYPE_STORED)); + d.add(newField("id", ""+j, StringField.TYPE_UNSTORED)); writer.addDocument(d); } writer.close(); @@ -127,7 +127,7 @@ out.println(hits.length + " total results"); for (int i = 0 ; i < hits.length && i < 10; i++) { - Document d = searcher.doc(hits[i].doc); + org.apache.lucene.document.Document d = searcher.doc(hits[i].doc); out.println(i + " " + hits[i].score + " " + d.get("contents")); } } Index: lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java =================================================================== --- lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java (working copy) @@ -23,7 +23,7 @@ import java.util.Random; import org.apache.lucene.store.*; -import org.apache.lucene.document.*; +import org.apache.lucene.document2.*; import org.apache.lucene.analysis.*; import org.apache.lucene.index.*; import org.apache.lucene.search.*; @@ -93,8 +93,8 @@ for (int j = 0; j < MAX_DOCS; j++) { Document d = new Document(); - d.add(newField(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES, Field.Index.ANALYZED)); - d.add(newField(ID_FIELD, Integer.toString(j), Field.Store.YES, Field.Index.ANALYZED)); + d.add(newField(PRIORITY_FIELD, HIGH_PRIORITY, TextField.TYPE_STORED)); + d.add(newField(ID_FIELD, Integer.toString(j), TextField.TYPE_STORED)); writer.addDocument(d); } writer.close(); @@ -142,7 +142,7 @@ out.println(hits.length + " total results\n"); for (int i = 0 ; i < hits.length; i++) { if ( i < 10 || (i > 94 && i < 105) ) { - Document d = searcher.doc(hits[i].doc); + org.apache.lucene.document.Document d = searcher.doc(hits[i].doc); out.println(i + " " + d.get(ID_FIELD)); } } @@ -152,7 +152,7 @@ assertEquals("total results", expectedCount, hits.length); for (int i = 0 ; i < hits.length; i++) { if (i < 10 || (i > 94 && i < 105) ) { - Document d = searcher.doc(hits[i].doc); + org.apache.lucene.document.Document d = searcher.doc(hits[i].doc); assertEquals("check " + i, String.valueOf(i), d.get(ID_FIELD)); } } Index: lucene/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (working copy) @@ -22,9 +22,8 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.Field.TermVector; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.DocsAndPositionsEnum; @@ -60,7 +59,7 @@ stream = new CachingTokenFilter(stream); - doc.add(new Field("preanalyzed", stream, TermVector.NO)); + doc.add(new TextField("preanalyzed", stream)); // 1) we consume all tokens twice before we add the doc to the index checkTokens(stream); Index: lucene/src/test/org/apache/lucene/index/Test2BTerms.java =================================================================== --- lucene/src/test/org/apache/lucene/index/Test2BTerms.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/Test2BTerms.java (working copy) @@ -22,7 +22,7 @@ import org.apache.lucene.search.*; import org.apache.lucene.analysis.*; import org.apache.lucene.analysis.tokenattributes.*; -import org.apache.lucene.document.*; +import org.apache.lucene.document2.*; import org.apache.lucene.index.codecs.CodecProvider; import java.io.File; import java.io.IOException; @@ -176,9 +176,12 @@ Document doc = new Document(); final MyTokenStream ts = new MyTokenStream(random, TERMS_PER_DOC); - Field field = new Field("field", ts); - field.setOmitTermFreqAndPositions(true); - field.setOmitNorms(true); + + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setOmitTermFreqAndPositions(true); + customType.setOmitNorms(true); + Field field = new Field("field", customType, ts); doc.add(field); //w.setInfoStream(System.out); final int numDocs = (int) (TERM_COUNT/TERMS_PER_DOC); Index: lucene/src/test/org/apache/lucene/index/TestAddIndexes.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (working copy) @@ -23,11 +23,11 @@ import java.util.List; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; -import org.apache.lucene.document.Field.TermVector; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.StringField; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.index.codecs.mocksep.MockSepCodec; @@ -164,11 +164,12 @@ // Adds 10 docs, then replaces them with another 10 // docs, so 10 pending deletes: + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setTokenized(false); for (int i = 0; i < 20; i++) { Document doc = new Document(); - doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED)); - doc.add(newField("content", "bbb " + i, Field.Store.NO, - Field.Index.ANALYZED)); + doc.add(newField("id", "" + (i % 10), customType)); + doc.add(newField("content", "bbb " + i, TextField.TYPE_UNSTORED)); writer.updateDocument(new Term("id", "" + (i%10)), doc); } // Deletes one of the 10 added docs, leaving 9: @@ -200,10 +201,12 @@ // Adds 10 docs, then replaces them with another 10 // docs, so 10 pending deletes: + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setTokenized(false); for (int i = 0; i < 20; i++) { Document doc = new Document(); - doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED)); - doc.add(newField("content", "bbb " + i, Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("id", "" + (i % 10), customType)); + doc.add(newField("content", "bbb " + i, TextField.TYPE_UNSTORED)); writer.updateDocument(new Term("id", "" + (i%10)), doc); } @@ -238,11 +241,12 @@ // Adds 10 docs, then replaces them with another 10 // docs, so 10 pending deletes: + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setTokenized(false); for (int i = 0; i < 20; i++) { Document doc = new Document(); - doc.add(newField("id", "" + (i % 10), Field.Store.NO, Field.Index.NOT_ANALYZED)); - doc.add(newField("content", "bbb " + i, Field.Store.NO, - Field.Index.ANALYZED)); + doc.add(newField("id", "" + (i % 10), customType)); + doc.add(newField("content", "bbb " + i, TextField.TYPE_UNSTORED)); writer.updateDocument(new Term("id", "" + (i%10)), doc); } @@ -502,8 +506,7 @@ private void addDocs(IndexWriter writer, int numDocs) throws IOException { for (int i = 0; i < numDocs; i++) { Document doc = new Document(); - doc.add(newField("content", "aaa", Field.Store.NO, - Field.Index.ANALYZED)); + doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED)); writer.addDocument(doc); } } @@ -511,8 +514,7 @@ private void addDocs2(IndexWriter writer, int numDocs) throws IOException { for (int i = 0; i < numDocs; i++) { Document doc = new Document(); - doc.add(newField("content", "bbb", Field.Store.NO, - Field.Index.ANALYZED)); + doc.add(newField("content", "bbb", TextField.TYPE_UNSTORED)); writer.addDocument(doc); } } @@ -581,20 +583,22 @@ .setMaxBufferedDocs(5).setMergePolicy(lmp)); Document doc = new Document(); - doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setStoreTermVectors(true); + customType.setStoreTermVectorPositions(true); + customType.setStoreTermVectorOffsets(true); + doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType)); for(int i=0;i<60;i++) writer.addDocument(doc); Document doc2 = new Document(); - doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, - Field.Index.NO)); - doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, - Field.Index.NO)); - doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, - Field.Index.NO)); - doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, - Field.Index.NO)); + FieldType customType2 = new FieldType(); + customType2.setStored(true); + doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2)); + doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2)); + doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2)); + doc2.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType2)); for(int i=0;i<10;i++) writer.addDocument(doc2); writer.close(); @@ -618,7 +622,7 @@ private void addDoc(IndexWriter writer) throws IOException { Document doc = new Document(); - doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED)); writer.addDocument(doc); } @@ -943,7 +947,7 @@ IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)); IndexWriter writer = new IndexWriter(dirs[i], conf); Document doc = new Document(); - doc.add(new Field("id", "myid", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); + doc.add(new StringField("id", "myid")); writer.addDocument(doc); writer.close(); } @@ -972,8 +976,10 @@ private void addDocs3(IndexWriter writer, int numDocs) throws IOException { for (int i = 0; i < numDocs; i++) { Document doc = new Document(); - doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED)); - doc.add(newField("id", "" + i, Field.Store.YES, Field.Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED)); + doc.add(newField("id", "" + i, customType)); writer.addDocument(doc); } } @@ -1060,7 +1066,10 @@ dirs[i] = new RAMDirectory(); IndexWriter w = new IndexWriter(dirs[i], new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); Document d = new Document(); - d.add(new Field("c", "v", Store.YES, Index.ANALYZED, TermVector.YES)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setStoreTermVectors(true); + d.add(new Field("c", customType, "v")); w.addDocument(d); w.close(); } @@ -1098,10 +1107,12 @@ new MockAnalyzer(random)).setMergePolicy(lmp2); IndexWriter w2 = new IndexWriter(src, conf2); Document doc = new Document(); - doc.add(new Field("c", "some text", Store.YES, Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + doc.add(new Field("c", customType, "some text")); w2.addDocument(doc); doc = new Document(); - doc.add(new Field("d", "delete", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); + doc.add(new StringField("d", "delete")); w2.addDocument(doc); w2.commit(); w2.deleteDocuments(new Term("d", "delete")); @@ -1151,7 +1162,9 @@ conf.setCodecProvider(provider); IndexWriter w = new IndexWriter(toAdd, conf); Document doc = new Document(); - doc.add(newField("foo", "bar", Index.NOT_ANALYZED)); + FieldType customType = new FieldType(); + customType.setIndexed(true); + doc.add(newField("foo", "bar", customType)); w.addDocument(doc); w.close(); } Index: lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java (working copy) @@ -18,7 +18,7 @@ import org.apache.lucene.util.*; import org.apache.lucene.store.*; -import org.apache.lucene.document.*; +import org.apache.lucene.document2.*; import org.apache.lucene.analysis.MockAnalyzer; import java.util.Random; @@ -93,10 +93,12 @@ @Override public void doWork() throws Exception { // Update all 100 docs... + FieldType customType = new FieldType(StringField.TYPE_UNSTORED); + customType.setStored(true); for(int i=0; i<100; i++) { Document d = new Document(); - d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); - d.add(new Field("contents", English.intToEnglish(i+10*count), Field.Store.NO, Field.Index.ANALYZED)); + d.add(new Field("id", customType, Integer.toString(i))); + d.add(new TextField("contents", English.intToEnglish(i+10*count))); writer.updateDocument(new Term("id", Integer.toString(i)), d); } } @@ -134,10 +136,12 @@ writer.setInfoStream(VERBOSE ? System.out : null); // Establish a base index of 100 docs: + FieldType customType = new FieldType(StringField.TYPE_UNSTORED); + customType.setStored(true); for(int i=0;i<100;i++) { Document d = new Document(); - d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); - d.add(newField("contents", English.intToEnglish(i), Field.Store.NO, Field.Index.ANALYZED)); + d.add(newField("id", Integer.toString(i), customType)); + d.add(newField("contents", English.intToEnglish(i), TextField.TYPE_UNSTORED)); if ((i-1)%7 == 0) { writer.commit(); } Index: lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (working copy) @@ -27,10 +27,12 @@ import java.util.Random; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.document.Fieldable; -import org.apache.lucene.document.NumericField; +import org.apache.lucene.document2.NumericField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.DefaultSimilarity; import org.apache.lucene.search.DocIdSetIterator; @@ -285,12 +287,12 @@ for(int i=0;i<35;i++) { if (!delDocs.get(i)) { - Document d = reader.document(i); + org.apache.lucene.document.Document d = reader.document(i); List fields = d.getFields(); if (d.getField("content3") == null) { final int numFields = 5; assertEquals(numFields, fields.size()); - Field f = d.getField("id"); + org.apache.lucene.document.Field f = d.getField("id"); assertEquals(""+i, f.stringValue()); f = d.getField("utf8"); @@ -318,7 +320,7 @@ // First document should be #21 since it's norm was // increased: - Document d = searcher.getIndexReader().document(hits[0].doc); + org.apache.lucene.document.Document d = searcher.getIndexReader().document(hits[0].doc); assertEquals("didn't get the right document first", "21", d.get("id")); doTestHits(hits, 34, searcher.getIndexReader()); @@ -364,7 +366,7 @@ // make sure searching sees right # hits IndexSearcher searcher = new IndexSearcher(dir, true); ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; - Document d = searcher.getIndexReader().document(hits[0].doc); + org.apache.lucene.document.Document d = searcher.getIndexReader().document(hits[0].doc); assertEquals("wrong first document", "21", d.get("id")); doTestHits(hits, 44, searcher.getIndexReader()); searcher.close(); @@ -412,7 +414,7 @@ IndexSearcher searcher = new IndexSearcher(dir, true); ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; assertEquals("wrong number of hits", 34, hits.length); - Document d = searcher.doc(hits[0].doc); + org.apache.lucene.document.Document d = searcher.doc(hits[0].doc); assertEquals("wrong first document", "21", d.get("id")); searcher.close(); @@ -582,12 +584,20 @@ private void addDoc(IndexWriter writer, int id) throws IOException { Document doc = new Document(); - doc.add(new Field("content", "aaa", Field.Store.NO, Field.Index.ANALYZED)); - doc.add(new Field("id", Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(new Field("autf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - doc.add(new Field("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - doc.add(new Field("content2", "here is more content with aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - doc.add(new Field("fie\u2C77ld", "field with non-ascii name", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(new TextField("content", "aaa")); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); + doc.add(new Field("id", customType, Integer.toString(id))); + FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED); + customType2.setStored(true); + customType2.setStoreTermVectors(true); + customType2.setStoreTermVectorPositions(true); + customType2.setStoreTermVectorOffsets(true); + doc.add(new Field("autf8", customType2, "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd")); + doc.add(new Field("utf8", customType2, "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd")); + doc.add(new Field("content2", customType2, "here is more content with aaa aaa aaa")); + doc.add(new Field("fie\u2C77ld", customType2, "field with non-ascii name")); // add numeric fields, to test if flex preserves encoding doc.add(new NumericField("trieInt", 4).setIntValue(id)); doc.add(new NumericField("trieLong", 4).setLongValue(id)); @@ -596,11 +606,15 @@ private void addNoProxDoc(IndexWriter writer) throws IOException { Document doc = new Document(); - Field f = new Field("content3", "aaa", Field.Store.YES, Field.Index.ANALYZED); - f.setOmitTermFreqAndPositions(true); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setOmitTermFreqAndPositions(true); + Field f = new Field("content3", customType, "aaa"); doc.add(f); - f = new Field("content4", "aaa", Field.Store.YES, Field.Index.NO); - f.setOmitTermFreqAndPositions(true); + FieldType customType2 = new FieldType(); + customType2.setStored(true); + customType2.setOmitTermFreqAndPositions(true); + f = new Field("content4", customType2, "aaa"); doc.add(f); writer.addDocument(doc); } @@ -670,7 +684,7 @@ for (int id=10; id<15; id++) { ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", 4, Integer.valueOf(id), Integer.valueOf(id), true, true), 100).scoreDocs; assertEquals("wrong number of hits", 1, hits.length); - Document d = searcher.doc(hits[0].doc); + org.apache.lucene.document.Document d = searcher.doc(hits[0].doc); assertEquals(String.valueOf(id), d.get("id")); hits = searcher.search(NumericRangeQuery.newLongRange("trieLong", 4, Long.valueOf(id), Long.valueOf(id), true, true), 100).scoreDocs; Index: lucene/src/test/org/apache/lucene/index/TestBinaryTerms.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestBinaryTerms.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestBinaryTerms.java (working copy) @@ -19,8 +19,10 @@ import java.io.IOException; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; @@ -47,8 +49,10 @@ bytes.bytes[1] = (byte) (255 - i); bytes.length = 2; Document doc = new Document(); - doc.add(new Field("id", "" + i, Field.Store.YES, Field.Index.NO)); - doc.add(new Field("bytes", tokenStream)); + FieldType customType = new FieldType(); + customType.setStored(true); + doc.add(new Field("id", customType, "" + i)); + doc.add(new TextField("bytes", tokenStream)); iw.addDocument(doc); } Index: lucene/src/test/org/apache/lucene/index/TestCheckIndex.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestCheckIndex.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestCheckIndex.java (working copy) @@ -26,8 +26,10 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.store.Directory; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.util.Constants; public class TestCheckIndex extends LuceneTestCase { @@ -36,7 +38,12 @@ Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); Document doc = new Document(); - doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setStoreTermVectors(true); + customType.setStoreTermVectorPositions(true); + customType.setStoreTermVectorOffsets(true); + doc.add(newField("field", "aaa", customType)); for(int i=0;i<19;i++) { writer.addDocument(doc); } Index: lucene/src/test/org/apache/lucene/index/TestCodecs.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestCodecs.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestCodecs.java (working copy) @@ -22,9 +22,10 @@ import java.util.HashSet; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.index.codecs.FieldsConsumer; import org.apache.lucene.index.codecs.FieldsProducer; @@ -330,7 +331,9 @@ pq.add(new Term("content", "ccc")); final Document doc = new Document(); - doc.add(newField("content", "aaa bbb ccc ddd", Store.NO, Field.Index.ANALYZED_NO_NORMS)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setOmitNorms(true); + doc.add(newField("content", "aaa bbb ccc ddd", customType)); // add document and force commit for creating a first segment writer.addDocument(doc); Index: lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (working copy) @@ -19,8 +19,10 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.util.LuceneTestCase; @@ -75,7 +77,10 @@ IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); writer.setInfoStream(VERBOSE ? System.out : null); Document doc = new Document(); - Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); + Field idField = newField("id", "", customType); doc.add(idField); int extraCount = 0; @@ -135,7 +140,10 @@ writer.setInfoStream(VERBOSE ? System.out : null); Document doc = new Document(); - Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); + Field idField = newField("id", "", customType); doc.add(idField); for(int i=0;i<10;i++) { if (VERBOSE) { @@ -180,7 +188,7 @@ for(int j=0;j<21;j++) { Document doc = new Document(); - doc.add(newField("content", "a b c", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("content", "a b c", TextField.TYPE_UNSTORED)); writer.addDocument(doc); } @@ -202,7 +210,10 @@ public void testNoWaitClose() throws IOException { MockDirectoryWrapper directory = newDirectory(); Document doc = new Document(); - Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); + Field idField = newField("id", "", customType); doc.add(idField); IndexWriter writer = new IndexWriter( Index: lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java (working copy) @@ -20,11 +20,11 @@ import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; -import org.apache.lucene.document.Field.TermVector; +import org.apache.lucene.document2.BinaryField; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; import org.junit.Test; @@ -38,8 +38,11 @@ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); Document d1 = new Document(); - d1.add(new Field("f1", "first field", Store.YES, Index.ANALYZED, TermVector.NO)); - d1.add(new Field("f2", "second field", Store.YES, Index.ANALYZED, TermVector.NO)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); + d1.add(new Field("f1", customType, "first field")); + d1.add(new Field("f2", customType, "second field")); writer.addDocument(d1); if (i == 1) { @@ -50,10 +53,13 @@ } Document d2 = new Document(); - d2.add(new Field("f2", "second field", Store.YES, Index.ANALYZED, TermVector.NO)); - d2.add(new Field("f1", "first field", Store.YES, Index.ANALYZED, TermVector.YES)); - d2.add(new Field("f3", "third field", Store.YES, Index.ANALYZED, TermVector.NO)); - d2.add(new Field("f4", "fourth field", Store.YES, Index.ANALYZED, TermVector.NO)); + FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED); + customType2.setStored(true); + customType2.setStoreTermVectors(true); + d2.add(new TextField("f2", "second field")); + d2.add(new Field("f1", customType2, "first field")); + d2.add(new TextField("f3", "third field")); + d2.add(new TextField("f4", "fourth field")); writer.addDocument(d2); writer.close(); @@ -99,18 +105,23 @@ IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); Document d1 = new Document(); - d1.add(new Field("f1", "first field", Store.YES, Index.ANALYZED, TermVector.NO)); - d1.add(new Field("f2", "second field", Store.YES, Index.ANALYZED, TermVector.NO)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + d1.add(new Field("f1", customType, "first field")); + d1.add(new Field("f2", customType, "second field")); writer.addDocument(d1); writer.close(); writer = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); Document d2 = new Document(); - d2.add(new Field("f2", "second field", Store.YES, Index.ANALYZED, TermVector.NO)); - d2.add(new Field("f1", "first field", Store.YES, Index.ANALYZED, TermVector.YES)); - d2.add(new Field("f3", "third field", Store.YES, Index.ANALYZED, TermVector.NO)); - d2.add(new Field("f4", "fourth field", Store.YES, Index.ANALYZED, TermVector.NO)); + FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED); + customType2.setStored(true); + customType2.setStoreTermVectors(true); + d2.add(new Field("f2", customType, "second field")); + d2.add(new Field("f1", customType2, "first field")); + d2.add(new Field("f3", customType, "third field")); + d2.add(new Field("f4", customType, "fourth field")); writer.addDocument(d2); writer.close(); @@ -156,6 +167,8 @@ public void testFieldNumberGaps() throws IOException { int numIters = atLeast(13); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); for (int i = 0; i < numIters; i++) { Directory dir = newDirectory(); { @@ -163,10 +176,8 @@ TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy( NoMergePolicy.NO_COMPOUND_FILES)); Document d = new Document(); - d.add(new Field("f1", "d1 first field", Store.YES, Index.ANALYZED, - TermVector.NO)); - d.add(new Field("f2", "d1 second field", Store.YES, Index.ANALYZED, - TermVector.NO)); + d.add(new Field("f1", customType, "d1 first field")); + d.add(new Field("f2", customType, "d1 second field")); writer.addDocument(d); writer.close(); SegmentInfos sis = new SegmentInfos(); @@ -185,9 +196,8 @@ random.nextBoolean() ? NoMergePolicy.NO_COMPOUND_FILES : NoMergePolicy.COMPOUND_FILES)); Document d = new Document(); - d.add(new Field("f1", "d2 first field", Store.YES, Index.ANALYZED, - TermVector.NO)); - d.add(new Field("f3", new byte[] { 1, 2, 3 })); + d.add(new Field("f1", customType, "d2 first field")); + d.add(new BinaryField("f3", new byte[] { 1, 2, 3 })); writer.addDocument(d); writer.close(); SegmentInfos sis = new SegmentInfos(); @@ -210,11 +220,9 @@ random.nextBoolean() ? NoMergePolicy.NO_COMPOUND_FILES : NoMergePolicy.COMPOUND_FILES)); Document d = new Document(); - d.add(new Field("f1", "d3 first field", Store.YES, Index.ANALYZED, - TermVector.NO)); - d.add(new Field("f2", "d3 second field", Store.YES, Index.ANALYZED, - TermVector.NO)); - d.add(new Field("f3", new byte[] { 1, 2, 3, 4, 5 })); + d.add(new Field("f1", customType, "d3 first field")); + d.add(new Field("f2", customType, "d3 second field")); + d.add(new BinaryField("f3", new byte[] { 1, 2, 3, 4, 5 })); writer.addDocument(d); writer.close(); SegmentInfos sis = new SegmentInfos(); @@ -303,10 +311,10 @@ for (FieldInfo fi : fis) { Field expected = getField(Integer.parseInt(fi.name)); - assertEquals(expected.isIndexed(), fi.isIndexed); - assertEquals(expected.isTermVectorStored(), fi.storeTermVector); - assertEquals(expected.isStorePositionWithTermVector(), fi.storePositionWithTermVector); - assertEquals(expected.isStoreOffsetWithTermVector(), fi.storeOffsetWithTermVector); + assertEquals(expected.indexed(), fi.isIndexed); + assertEquals(expected.storeTermVectors(), fi.storeTermVector); + assertEquals(expected.storeTermVectorPositions(), fi.storePositionWithTermVector); + assertEquals(expected.storeTermVectorOffsets(), fi.storeOffsetWithTermVector); } } @@ -316,23 +324,99 @@ private Field getField(int number) { int mode = number % 16; String fieldName = "" + number; + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + + FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED); + customType2.setStored(true); + customType2.setTokenized(false); + + FieldType customType3 = new FieldType(TextField.TYPE_UNSTORED); + customType3.setTokenized(false); + + FieldType customType4 = new FieldType(TextField.TYPE_UNSTORED); + customType4.setTokenized(false); + customType4.setStored(false); + customType4.setStoreTermVectors(true); + customType4.setStoreTermVectorOffsets(true); + + FieldType customType5 = new FieldType(TextField.TYPE_UNSTORED); + customType5.setStoreTermVectors(true); + customType5.setStoreTermVectorOffsets(true); + + FieldType customType6 = new FieldType(TextField.TYPE_UNSTORED); + customType6.setTokenized(false); + customType6.setStored(true); + customType6.setStoreTermVectors(true); + customType6.setStoreTermVectorOffsets(true); + + FieldType customType7 = new FieldType(TextField.TYPE_UNSTORED); + customType7.setTokenized(false); + customType7.setStoreTermVectors(true); + customType7.setStoreTermVectorOffsets(true); + + FieldType customType8 = new FieldType(TextField.TYPE_UNSTORED); + customType8.setTokenized(false); + customType8.setStored(true); + customType8.setStoreTermVectors(true); + customType8.setStoreTermVectorPositions(true); + + FieldType customType9 = new FieldType(TextField.TYPE_UNSTORED); + customType9.setStoreTermVectors(true); + customType9.setStoreTermVectorPositions(true); + + FieldType customType10 = new FieldType(TextField.TYPE_UNSTORED); + customType10.setTokenized(false); + customType10.setStored(true); + customType10.setStoreTermVectors(true); + customType10.setStoreTermVectorPositions(true); + + FieldType customType11 = new FieldType(TextField.TYPE_UNSTORED); + customType11.setTokenized(false); + customType11.setStoreTermVectors(true); + customType11.setStoreTermVectorPositions(true); + + FieldType customType12 = new FieldType(TextField.TYPE_UNSTORED); + customType12.setStored(true); + customType12.setStoreTermVectors(true); + customType12.setStoreTermVectorOffsets(true); + customType12.setStoreTermVectorPositions(true); + + FieldType customType13 = new FieldType(TextField.TYPE_UNSTORED); + customType13.setStoreTermVectors(true); + customType13.setStoreTermVectorOffsets(true); + customType13.setStoreTermVectorPositions(true); + + FieldType customType14 = new FieldType(TextField.TYPE_UNSTORED); + customType14.setStored(true); + customType14.setTokenized(false); + customType14.setStoreTermVectors(true); + customType14.setStoreTermVectorOffsets(true); + customType14.setStoreTermVectorPositions(true); + + FieldType customType15 = new FieldType(TextField.TYPE_UNSTORED); + customType15.setTokenized(false); + customType15.setStoreTermVectors(true); + customType15.setStoreTermVectorOffsets(true); + customType15.setStoreTermVectorPositions(true); + switch (mode) { - case 0: return new Field(fieldName, "some text", Store.YES, Index.ANALYZED, TermVector.NO); - case 1: return new Field(fieldName, "some text", Store.NO, Index.ANALYZED, TermVector.NO); - case 2: return new Field(fieldName, "some text", Store.YES, Index.NOT_ANALYZED, TermVector.NO); - case 3: return new Field(fieldName, "some text", Store.NO, Index.NOT_ANALYZED, TermVector.NO); - case 4: return new Field(fieldName, "some text", Store.YES, Index.ANALYZED, TermVector.WITH_OFFSETS); - case 5: return new Field(fieldName, "some text", Store.NO, Index.ANALYZED, TermVector.WITH_OFFSETS); - case 6: return new Field(fieldName, "some text", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_OFFSETS); - case 7: return new Field(fieldName, "some text", Store.NO, Index.NOT_ANALYZED, TermVector.WITH_OFFSETS); - case 8: return new Field(fieldName, "some text", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS); - case 9: return new Field(fieldName, "some text", Store.NO, Index.ANALYZED, TermVector.WITH_POSITIONS); - case 10: return new Field(fieldName, "some text", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS); - case 11: return new Field(fieldName, "some text", Store.NO, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS); - case 12: return new Field(fieldName, "some text", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS); - case 13: return new Field(fieldName, "some text", Store.NO, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS); - case 14: return new Field(fieldName, "some text", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS); - case 15: return new Field(fieldName, "some text", Store.NO, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS); + case 0: return new Field(fieldName, customType, "some text"); + case 1: return new TextField(fieldName, "some text"); + case 2: return new Field(fieldName, customType2, "some text"); + case 3: return new Field(fieldName, customType3, "some text"); + case 4: return new Field(fieldName, customType4, "some text"); + case 5: return new Field(fieldName, customType5, "some text"); + case 6: return new Field(fieldName, customType6, "some text"); + case 7: return new Field(fieldName, customType7, "some text"); + case 8: return new Field(fieldName, customType8, "some text"); + case 9: return new Field(fieldName, customType9, "some text"); + case 10: return new Field(fieldName, customType10, "some text"); + case 11: return new Field(fieldName, customType11, "some text"); + case 12: return new Field(fieldName, customType12, "some text"); + case 13: return new Field(fieldName, customType13, "some text"); + case 14: return new Field(fieldName, customType14, "some text"); + case 15: return new Field(fieldName, customType15, "some text"); default: return null; } } Index: lucene/src/test/org/apache/lucene/index/TestCrash.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestCrash.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestCrash.java (working copy) @@ -24,8 +24,9 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.NoLockFactory; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; public class TestCrash extends LuceneTestCase { @@ -44,8 +45,10 @@ } Document doc = new Document(); - doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(newField("id", "0", Field.Store.YES, Field.Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(false); + doc.add(newField("content", "aaa", customType)); + doc.add(newField("id", "0", customType)); for(int i=0;i<157;i++) writer.addDocument(doc); Index: lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java (working copy) @@ -24,8 +24,9 @@ import java.util.Collection; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -841,7 +842,7 @@ private void addDoc(IndexWriter writer) throws IOException { Document doc = new Document(); - doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED)); writer.addDocument(doc); } } Index: lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java (working copy) @@ -20,8 +20,10 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; @@ -79,12 +81,12 @@ sis.read(dir); IndexReader reader = openReader(); assertTrue(reader != null); - Document newDoc1 = reader.document(0); + org.apache.lucene.document.Document newDoc1 = reader.document(0); assertTrue(newDoc1 != null); - assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size()); - Document newDoc2 = reader.document(1); + assertTrue(DocHelper.numFields2(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size()); + org.apache.lucene.document.Document newDoc2 = reader.document(1); assertTrue(newDoc2 != null); - assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); + assertTrue(DocHelper.numFields2(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); TermFreqVector vector = reader.getTermFreqVector(0, DocHelper.TEXT_FIELD_2_KEY); assertTrue(vector != null); TestSegmentReader.checkNorms(reader); @@ -202,7 +204,9 @@ new MockAnalyzer(random)).setOpenMode( create ? OpenMode.CREATE : OpenMode.APPEND)); Document doc = new Document(); - doc.add(newField("body", s, Field.Store.YES, Field.Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(false); + doc.add(newField("body", s, customType)); iw.addDocument(doc); iw.close(); } Index: lucene/src/test/org/apache/lucene/index/TestDoc.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDoc.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestDoc.java (working copy) @@ -30,8 +30,9 @@ import junit.textui.TestRunner; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.Directory; @@ -184,7 +185,7 @@ { File file = new File(workDir, fileName); Document doc = new Document(); - doc.add(new Field("contents", new FileReader(file))); + doc.add(new TextField("contents", new FileReader(file))); writer.addDocument(doc); writer.commit(); return writer.newestSegment(); Index: lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java (working copy) @@ -25,9 +25,11 @@ import java.util.Set; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.NumericField; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.NumericField; +import org.apache.lucene.document2.StringField; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.DocTermOrds.TermOrdsIterator; import org.apache.lucene.index.codecs.BlockTermsReader; import org.apache.lucene.index.codecs.BlockTermsWriter; @@ -62,7 +64,7 @@ Directory dir = newDirectory(); final RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); Document doc = new Document(); - Field field = newField("field", "", Field.Index.ANALYZED); + Field field = newField("field", "", TextField.TYPE_UNSTORED); doc.add(field); field.setValue("a b c"); w.addDocument(doc); @@ -264,7 +266,7 @@ } for(int ord : ordsForDocSet) { ordsForDoc[upto++] = ord; - Field field = newField("field", termsArray[ord].utf8ToString(), Field.Index.NOT_ANALYZED_NO_NORMS); + Field field = newField("field", termsArray[ord].utf8ToString(), StringField.TYPE_UNSTORED); if (VERBOSE) { System.out.println(" f=" + termsArray[ord].utf8ToString()); } @@ -367,7 +369,7 @@ } for(int ord : ordsForDocSet) { ordsForDoc[upto++] = ord; - Field field = newField("field", termsArray[ord].utf8ToString(), Field.Index.NOT_ANALYZED_NO_NORMS); + Field field = newField("field", termsArray[ord].utf8ToString(), StringField.TYPE_UNSTORED); if (VERBOSE) { System.out.println(" f=" + termsArray[ord].utf8ToString()); } Index: lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java (working copy) @@ -21,8 +21,9 @@ import java.util.Arrays; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexReader.ReaderContext; import org.apache.lucene.store.Directory; @@ -49,9 +50,11 @@ newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); for (int i = 0; i < 39; i++) { Document doc = new Document(); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setOmitNorms(true); doc.add(newField(fieldName, "1 2 3 4 5 6 7 8 9 10 " + "1 2 3 4 5 6 7 8 9 10 " + "1 2 3 4 5 6 7 8 9 10 " - + "1 2 3 4 5 6 7 8 9 10", Field.Store.NO, Field.Index.ANALYZED_NO_NORMS)); + + "1 2 3 4 5 6 7 8 9 10", customType)); writer.addDocument(doc); } IndexReader reader = writer.getReader(); @@ -117,6 +120,8 @@ int max = 1051; int term = random.nextInt(max); Integer[][] positionsInDoc = new Integer[numDocs][]; + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setOmitNorms(true); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); ArrayList positions = new ArrayList(); @@ -133,8 +138,7 @@ builder.append(term); positions.add(num); } - doc.add(newField(fieldName, builder.toString(), Field.Store.NO, - Field.Index.ANALYZED_NO_NORMS)); + doc.add(newField(fieldName, builder.toString(), customType)); positionsInDoc[i] = positions.toArray(new Integer[0]); writer.addDocument(doc); } @@ -199,6 +203,8 @@ int max = 15678; int term = random.nextInt(max); int[] freqInDoc = new int[numDocs]; + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setOmitNorms(true); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); StringBuilder builder = new StringBuilder(); @@ -209,8 +215,7 @@ freqInDoc[i]++; } } - doc.add(newField(fieldName, builder.toString(), Field.Store.NO, - Field.Index.ANALYZED_NO_NORMS)); + doc.add(newField(fieldName, builder.toString(), customType)); writer.addDocument(doc); } @@ -275,6 +280,8 @@ RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); int howMany = 1000; + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setOmitNorms(true); for (int i = 0; i < 39; i++) { Document doc = new Document(); StringBuilder builder = new StringBuilder(); @@ -285,8 +292,7 @@ builder.append("odd "); } } - doc.add(newField(fieldName, builder.toString(), Field.Store.NO, - Field.Index.ANALYZED_NO_NORMS)); + doc.add(newField(fieldName, builder.toString(), customType)); writer.addDocument(doc); } Index: lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (working copy) @@ -28,12 +28,11 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; -import org.apache.lucene.document.Field.TermVector; import org.apache.lucene.document.Fieldable; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.store.Directory; import org.apache.lucene.util.AttributeSource; import org.apache.lucene.util.BytesRef; @@ -70,7 +69,7 @@ //After adding the document, we should be able to read it back in SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR); assertTrue(reader != null); - Document doc = reader.document(0); + org.apache.lucene.document.Document doc = reader.document(0); assertTrue(doc != null); //System.out.println("Document: " + doc); @@ -122,8 +121,10 @@ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); Document doc = new Document(); - doc.add(newField("repeated", "repeated one", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(newField("repeated", "repeated two", Field.Store.YES, Field.Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + doc.add(newField("repeated", "repeated one", customType)); + doc.add(newField("repeated", "repeated two", customType)); writer.addDocument(doc); writer.commit(); @@ -187,7 +188,9 @@ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); Document doc = new Document(); - doc.add(newField("f1", "a 5 a a", Field.Store.YES, Field.Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + doc.add(newField("f1", "a 5 a a", customType)); writer.addDocument(doc); writer.commit(); @@ -213,8 +216,8 @@ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); Document doc = new Document(); - - doc.add(new Field("preanalyzed", new TokenStream() { + + doc.add(new TextField("preanalyzed", new TokenStream() { private String[] tokens = new String[] {"term1", "term2", "term3", "term2"}; private int index = 0; @@ -231,7 +234,7 @@ } } - }, TermVector.NO)); + })); writer.addDocument(doc); writer.commit(); @@ -264,11 +267,20 @@ public void testMixedTermVectorSettingsSameField() throws Exception { Document doc = new Document(); // f1 first without tv then with tv - doc.add(newField("f1", "v1", Store.YES, Index.NOT_ANALYZED, TermVector.NO)); - doc.add(newField("f1", "v2", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); + doc.add(newField("f1", "v1", customType)); + FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED); + customType2.setStored(true); + customType2.setTokenized(false); + customType2.setStoreTermVectors(true); + customType2.setStoreTermVectorOffsets(true); + customType2.setStoreTermVectorPositions(true); + doc.add(newField("f1", "v2", customType2)); // f2 first with tv then without tv - doc.add(newField("f2", "v1", Store.YES, Index.NOT_ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); - doc.add(newField("f2", "v2", Store.YES, Index.NOT_ANALYZED, TermVector.NO)); + doc.add(newField("f2", "v1", customType2)); + doc.add(newField("f2", "v2", customType)); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); @@ -297,13 +309,19 @@ public void testLUCENE_1590() throws Exception { Document doc = new Document(); // f1 has no norms - doc.add(newField("f1", "v1", Store.NO, Index.ANALYZED_NO_NORMS)); - doc.add(newField("f1", "v2", Store.YES, Index.NO)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setOmitNorms(true); + FieldType customType2 = new FieldType(); + customType2.setStored(true); + doc.add(newField("f1", "v1", customType)); + doc.add(newField("f1", "v2", customType2)); // f2 has no TF - Field f = newField("f2", "v1", Store.NO, Index.ANALYZED); - f.setOmitTermFreqAndPositions(true); + FieldType customType3 = new FieldType(TextField.TYPE_UNSTORED); + customType3.setStored(true); + customType3.setOmitTermFreqAndPositions(true); + Field f = newField("f2", "v1", customType3); doc.add(f); - doc.add(newField("f2", "v2", Store.YES, Index.NO)); + doc.add(newField("f2", "v2", customType2)); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); Index: lucene/src/test/org/apache/lucene/index/TestFieldInfos.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestFieldInfos.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestFieldInfos.java (working copy) @@ -19,7 +19,7 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IndexOutput; Index: lucene/src/test/org/apache/lucene/index/TestFieldsReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (working copy) @@ -44,7 +44,7 @@ public class TestFieldsReader extends LuceneTestCase { private static Directory dir; - private static Document testDoc = new Document(); + private static org.apache.lucene.document2.Document testDoc = new org.apache.lucene.document2.Document(); private static FieldInfos fieldInfos = null; private final static String TEST_SEGMENT_NAME = "_0"; Index: lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestFilterIndexReader.java (working copy) @@ -22,8 +22,9 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Bits; @@ -128,16 +129,18 @@ Directory directory = newDirectory(); IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); Document d1 = new Document(); - d1.add(newField("default","one two", Field.Store.YES, Field.Index.ANALYZED)); + d1.add(newField("default","one two", customType)); writer.addDocument(d1); Document d2 = new Document(); - d2.add(newField("default","one three", Field.Store.YES, Field.Index.ANALYZED)); + d2.add(newField("default","one three", customType)); writer.addDocument(d2); Document d3 = new Document(); - d3.add(newField("default","two four", Field.Store.YES, Field.Index.ANALYZED)); + d3.add(newField("default","two four", customType)); writer.addDocument(d3); writer.close(); Index: lucene/src/test/org/apache/lucene/index/TestFlex.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestFlex.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestFlex.java (working copy) @@ -19,7 +19,7 @@ import org.apache.lucene.store.*; import org.apache.lucene.analysis.*; -import org.apache.lucene.document.*; +import org.apache.lucene.document2.*; import org.apache.lucene.util.*; public class TestFlex extends LuceneTestCase { @@ -39,10 +39,10 @@ for(int iter=0;iter<2;iter++) { if (iter == 0) { Document doc = new Document(); - doc.add(newField("field1", "this is field1", Field.Store.NO, Field.Index.ANALYZED)); - doc.add(newField("field2", "this is field2", Field.Store.NO, Field.Index.ANALYZED)); - doc.add(newField("field3", "aaa", Field.Store.NO, Field.Index.ANALYZED)); - doc.add(newField("field4", "bbb", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("field1", "this is field1", TextField.TYPE_UNSTORED)); + doc.add(newField("field2", "this is field2", TextField.TYPE_UNSTORED)); + doc.add(newField("field3", "aaa", TextField.TYPE_UNSTORED)); + doc.add(newField("field4", "bbb", TextField.TYPE_UNSTORED)); for(int i=0;i files = writer.getIndexFileNames(); @@ -115,6 +112,8 @@ public void testIndexReaderCommit() throws IOException { int num = atLeast(3); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); for (int i = 0; i < num; i++) { Directory dir = newDirectory(); { @@ -122,17 +121,14 @@ new MockAnalyzer(random)); IndexWriter writer = new IndexWriter(dir, config); Document d = new Document(); - d.add(new Field("f1", "d1 first field", Store.YES, Index.ANALYZED, - TermVector.NO)); - d.add(new Field("f2", "d1 second field", Store.YES, Index.ANALYZED, - TermVector.NO)); + d.add(new Field("f1", customType, "d1 first field")); + d.add(new Field("f2", customType, "d1 second field")); writer.addDocument(d); writer.commit(); assertFNXFiles(dir, "1.fnx"); d = new Document(); - d.add(new Field("f1", "d2 first field", Store.YES, Index.ANALYZED, - TermVector.NO)); - d.add(new Field("f3", new byte[] { 1, 2, 3 })); + d.add(new Field("f1", customType, "d2 first field")); + d.add(new BinaryField("f3", new byte[] { 1, 2, 3 })); writer.addDocument(d); writer.commit(); assertFNXFiles(dir, "2.fnx"); @@ -159,6 +155,8 @@ public void testGlobalFieldNumberFilesAcrossCommits() throws IOException { int num = atLeast(3); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); for (int i = 0; i < num; i++) { Directory dir = newDirectory(); { @@ -166,17 +164,14 @@ TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy( new KeepAllDeletionPolicy())); Document d = new Document(); - d.add(new Field("f1", "d1 first field", Store.YES, Index.ANALYZED, - TermVector.NO)); - d.add(new Field("f2", "d1 second field", Store.YES, Index.ANALYZED, - TermVector.NO)); + d.add(new Field("f1", customType, "d1 first field")); + d.add(new Field("f2", customType, "d1 second field")); writer.addDocument(d); writer.commit(); assertFNXFiles(dir, "1.fnx"); d = new Document(); - d.add(new Field("f1", "d2 first field", Store.YES, Index.ANALYZED, - TermVector.NO)); - d.add(new Field("f3", new byte[] { 1, 2, 3 })); + d.add(new Field("f1", customType, "d2 first field")); + d.add(new BinaryField("f3", new byte[] { 1, 2, 3 })); writer.addDocument(d); writer.commit(); writer.commit(); @@ -190,11 +185,9 @@ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); Document d = new Document(); - d.add(new Field("f1", "d3 first field", Store.YES, Index.ANALYZED, - TermVector.NO)); - d.add(new Field("f2", "d3 second field", Store.YES, Index.ANALYZED, - TermVector.NO)); - d.add(new Field("f3", new byte[] { 1, 2, 3, 4, 5 })); + d.add(new Field("f1", customType, "d3 first field")); + d.add(new Field("f2", customType, "d3 second field")); + d.add(new BinaryField("f3", new byte[] { 1, 2, 3, 4, 5 })); writer.addDocument(d); writer.close(); assertFNXFiles(dir, "2.fnx"); @@ -211,23 +204,22 @@ public void testGlobalFieldNumberOnOldCommit() throws IOException { int num = atLeast(3); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); for (int i = 0; i < num; i++) { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy( new KeepAllDeletionPolicy())); Document d = new Document(); - d.add(new Field("f1", "d1 first field", Store.YES, Index.ANALYZED, - TermVector.NO)); - d.add(new Field("f2", "d1 second field", Store.YES, Index.ANALYZED, - TermVector.NO)); + d.add(new Field("f1", customType, "d1 first field")); + d.add(new Field("f2", customType, "d1 second field")); writer.addDocument(d); writer.commit(); assertFNXFiles(dir, "1.fnx"); d = new Document(); - d.add(new Field("f1", "d2 first field", Store.YES, Index.ANALYZED, - TermVector.NO)); - d.add(new Field("f3", new byte[] { 1, 2, 3 })); + d.add(new Field("f1", customType, "d2 first field")); + d.add(new BinaryField("f3", new byte[] { 1, 2, 3 })); writer.addDocument(d); assertFNXFiles(dir, "1.fnx"); writer.close(); @@ -240,9 +232,8 @@ new KeepAllDeletionPolicy()).setIndexCommit(listCommits.get(0))); d = new Document(); - d.add(new Field("f1", "d2 first field", Store.YES, Index.ANALYZED, - TermVector.NO)); - d.add(new Field("f3", new byte[] { 1, 2, 3 })); + d.add(new Field("f1", customType, "d2 first field")); + d.add(new BinaryField("f3", new byte[] { 1, 2, 3 })); writer.addDocument(d); writer.commit(); // now we have 3 files since f3 is not present in the first commit @@ -271,9 +262,13 @@ Document doc = new Document(); final int numFields = 1 + random.nextInt(fieldNames.length); for (int j = 0; j < numFields; j++) { + FieldType customType = new FieldType(); + customType.setIndexed(true); + customType.setTokenized(random.nextBoolean()); + customType.setOmitNorms(random.nextBoolean()); doc.add(newField(fieldNames[random.nextInt(fieldNames.length)], _TestUtil.randomRealisticUnicodeString(random), - Index.toIndex(true, random.nextBoolean(), random.nextBoolean()))); + customType)); } writer.addDocument(doc); @@ -322,9 +317,13 @@ TEST_VERSION_CURRENT, new MockAnalyzer(random))); Document doc = new Document(); for (String string : fieldNames) { + FieldType customType = new FieldType(); + customType.setIndexed(true); + customType.setTokenized(random.nextBoolean()); + customType.setOmitNorms(random.nextBoolean()); doc.add(newField(string, _TestUtil.randomRealisticUnicodeString(random), - Index.toIndex(true, random.nextBoolean(), random.nextBoolean()))); + customType)); } writer.addDocument(doc); @@ -419,8 +418,12 @@ String name = copySortedMap.get(nextField); assertNotNull(name); + FieldType customType = new FieldType(); + customType.setIndexed(true); + customType.setTokenized(random.nextBoolean()); + customType.setOmitNorms(random.nextBoolean()); doc.add(newField(name, _TestUtil.randomRealisticUnicodeString(random), - Index.toIndex(true, random.nextBoolean(), random.nextBoolean()))); + customType)); writer.addDocument(doc); if (random.nextInt(10) == 0) { writer.commit(); @@ -480,8 +483,9 @@ } Document d = new Document(); - d.add(new Field("f1", "d1 first field", Store.YES, Index.ANALYZED, - TermVector.NO)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + d.add(new Field("f1", customType, "d1 first field")); writer.addDocument(d); writer.prepareCommit(); // the fnx file should still be under control of the SIS Index: lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java (working copy) @@ -25,8 +25,9 @@ import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import java.io.*; @@ -229,8 +230,11 @@ private void addDoc(IndexWriter writer, int id) throws IOException { Document doc = new Document(); - doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED)); - doc.add(newField("id", Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED)); + doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setIndexed(true); + customType.setTokenized(false); + doc.add(newField("id", Integer.toString(id), customType)); writer.addDocument(doc); } } Index: lucene/src/test/org/apache/lucene/index/TestIndexReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexReader.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestIndexReader.java (working copy) @@ -31,8 +31,12 @@ import java.util.SortedSet; import org.junit.Assume; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.BinaryField; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.StringField; +import org.apache.lucene.document2.TextField; import org.apache.lucene.document.FieldSelector; import org.apache.lucene.document.Fieldable; import org.apache.lucene.document.SetBasedFieldSelector; @@ -154,10 +158,20 @@ ); Document doc = new Document(); - doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(new Field("text","test1", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("unindexed","test1", Field.Store.YES, Field.Index.NO)); - doc.add(new Field("unstored","test1", Field.Store.NO, Field.Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); + + FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED); + customType2.setStored(true); + + FieldType customType3 = new FieldType(); + customType3.setStored(true); + + doc.add(new Field("keyword",customType,"test1")); + doc.add(new Field("text",customType2,"test1")); + doc.add(new Field("unindexed",customType3,"test1")); + doc.add(new TextField("unstored","test1")); writer.addDocument(doc); writer.close(); @@ -180,29 +194,49 @@ int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor(); for (int i = 0; i < 5*mergeFactor; i++) { doc = new Document(); - doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(new Field("text","test1", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("unindexed","test1", Field.Store.YES, Field.Index.NO)); - doc.add(new Field("unstored","test1", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(new Field("keyword",customType,"test1")); + doc.add(new Field("text",customType2, "test1")); + doc.add(new Field("unindexed",customType3,"test1")); + doc.add(new TextField("unstored","test1")); writer.addDocument(doc); } // new fields are in some different segments (we hope) for (int i = 0; i < 5*mergeFactor; i++) { doc = new Document(); - doc.add(new Field("keyword2","test1", Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(new Field("text2","test1", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("unindexed2","test1", Field.Store.YES, Field.Index.NO)); - doc.add(new Field("unstored2","test1", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(new Field("keyword2",customType,"test1")); + doc.add(new Field("text2",customType2, "test1")); + doc.add(new Field("unindexed2",customType3,"test1")); + doc.add(new TextField("unstored2","test1")); writer.addDocument(doc); } // new termvector fields + + FieldType customType4 = new FieldType(TextField.TYPE_UNSTORED); + customType4.setStored(true); + FieldType customType5 = new FieldType(TextField.TYPE_UNSTORED); + customType5.setStored(true); + customType5.setStoreTermVectors(true); + FieldType customType6 = new FieldType(TextField.TYPE_UNSTORED); + customType6.setStored(true); + customType6.setStoreTermVectors(true); + customType6.setStoreTermVectorOffsets(true); + FieldType customType7 = new FieldType(TextField.TYPE_UNSTORED); + customType7.setStored(true); + customType7.setStoreTermVectors(true); + customType7.setStoreTermVectorPositions(true); + FieldType customType8 = new FieldType(TextField.TYPE_UNSTORED); + customType8.setStored(true); + customType8.setStoreTermVectors(true); + customType8.setStoreTermVectorOffsets(true); + customType8.setStoreTermVectorPositions(true); + for (int i = 0; i < 5*mergeFactor; i++) { doc = new Document(); - doc.add(new Field("tvnot","tvnot", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); - doc.add(new Field("termvector","termvector", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES)); - doc.add(new Field("tvoffset","tvoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS)); - doc.add(new Field("tvposition","tvposition", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS)); - doc.add(newField("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(new Field("tvnot",customType4,"tvnot")); + doc.add(new Field("termvector",customType5,"termvector")); + doc.add(new Field("tvoffset",customType6,"tvoffset")); + doc.add(new Field("tvposition",customType7,"tvposition")); + doc.add(new Field("tvpositionoffset",customType8, "tvpositionoffset")); writer.addDocument(doc); } @@ -277,14 +311,32 @@ // want to get some more segments here // new termvector fields int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor(); + FieldType customType4 = new FieldType(TextField.TYPE_UNSTORED); + customType4.setStored(true); + FieldType customType5 = new FieldType(TextField.TYPE_UNSTORED); + customType5.setStored(true); + customType5.setStoreTermVectors(true); + FieldType customType6 = new FieldType(TextField.TYPE_UNSTORED); + customType6.setStored(true); + customType6.setStoreTermVectors(true); + customType6.setStoreTermVectorOffsets(true); + FieldType customType7 = new FieldType(TextField.TYPE_UNSTORED); + customType7.setStored(true); + customType7.setStoreTermVectors(true); + customType7.setStoreTermVectorPositions(true); + FieldType customType8 = new FieldType(TextField.TYPE_UNSTORED); + customType8.setStored(true); + customType8.setStoreTermVectors(true); + customType8.setStoreTermVectorOffsets(true); + customType8.setStoreTermVectorPositions(true); for (int i = 0; i < 5 * mergeFactor; i++) { Document doc = new Document(); - doc.add(new Field("tvnot","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); - doc.add(new Field("termvector","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES)); - doc.add(new Field("tvoffset","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS)); - doc.add(new Field("tvposition","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS)); - doc.add(new Field("tvpositionoffset","one two two three three three", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - + doc.add(new Field("tvnot",customType4,"one two two three three three")); + doc.add(new Field("termvector",customType5,"one two two three three three")); + doc.add(new Field("tvoffset",customType6,"one two two three three three")); + doc.add(new Field("tvposition",customType7,"one two two three three three")); + doc.add(new Field("tvpositionoffset",customType8, "one two two three three three")); + writer.addDocument(doc); } writer.close(); @@ -338,16 +390,16 @@ writer.close(); writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy())); Document doc = new Document(); - doc.add(new Field("bin1", bin)); - doc.add(new Field("junk", "junk text", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(new BinaryField("bin1", bin)); + doc.add(new TextField("junk", "junk text")); writer.addDocument(doc); writer.close(); IndexReader reader = IndexReader.open(dir, false); - doc = reader.document(reader.maxDoc() - 1); - Field[] fields = doc.getFields("bin1"); + org.apache.lucene.document.Document doc2 = reader.document(reader.maxDoc() - 1); + org.apache.lucene.document.Field[] fields = doc2.getFields("bin1"); assertNotNull(fields); assertEquals(1, fields.length); - Field b1 = fields[0]; + org.apache.lucene.document.Field b1 = fields[0]; assertTrue(b1.isBinary()); BytesRef bytesRef = b1.binaryValue(null); assertEquals(bin.length, bytesRef.length); @@ -357,8 +409,8 @@ Set lazyFields = new HashSet(); lazyFields.add("bin1"); FieldSelector sel = new SetBasedFieldSelector(new HashSet(), lazyFields); - doc = reader.document(reader.maxDoc() - 1, sel); - Fieldable[] fieldables = doc.getFieldables("bin1"); + doc2 = reader.document(reader.maxDoc() - 1, sel); + Fieldable[] fieldables = doc2.getFieldables("bin1"); assertNotNull(fieldables); assertEquals(1, fieldables.length); Fieldable fb1 = fieldables[0]; @@ -377,8 +429,8 @@ writer.optimize(); writer.close(); reader = IndexReader.open(dir, false); - doc = reader.document(reader.maxDoc() - 1); - fields = doc.getFields("bin1"); + doc2 = reader.document(reader.maxDoc() - 1); + fields = doc2.getFields("bin1"); assertNotNull(fields); assertEquals(1, fields.length); b1 = fields[0]; @@ -778,38 +830,76 @@ static void addDocumentWithFields(IndexWriter writer) throws IOException { Document doc = new Document(); - doc.add(newField("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(newField("text","test1", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(newField("unindexed","test1", Field.Store.YES, Field.Index.NO)); - doc.add(newField("unstored","test1", Field.Store.NO, Field.Index.ANALYZED)); + + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); + + FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED); + customType2.setStored(true); + + FieldType customType3 = new FieldType(); + customType3.setStored(true); + doc.add(newField("keyword", "test1", customType)); + doc.add(newField("text", "test1", customType2)); + doc.add(newField("unindexed", "test1", customType3)); + doc.add(new TextField("unstored","test1")); writer.addDocument(doc); } static void addDocumentWithDifferentFields(IndexWriter writer) throws IOException { - Document doc = new Document(); - doc.add(newField("keyword2","test1", Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(newField("text2","test1", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(newField("unindexed2","test1", Field.Store.YES, Field.Index.NO)); - doc.add(newField("unstored2","test1", Field.Store.NO, Field.Index.ANALYZED)); - writer.addDocument(doc); + Document doc = new Document(); + + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); + + FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED); + customType2.setStored(true); + + FieldType customType3 = new FieldType(); + customType3.setStored(true); + doc.add(newField("keyword2", "test1", customType)); + doc.add(newField("text2", "test1", customType2)); + doc.add(newField("unindexed2", "test1", customType3)); + doc.add(new TextField("unstored2","test1")); + writer.addDocument(doc); } static void addDocumentWithTermVectorFields(IndexWriter writer) throws IOException { Document doc = new Document(); - doc.add(newField("tvnot","tvnot", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.NO)); - doc.add(newField("termvector","termvector", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.YES)); - doc.add(newField("tvoffset","tvoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_OFFSETS)); - doc.add(newField("tvposition","tvposition", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS)); - doc.add(newField("tvpositionoffset","tvpositionoffset", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + FieldType customType4 = new FieldType(TextField.TYPE_UNSTORED); + customType4.setStored(true); + FieldType customType5 = new FieldType(TextField.TYPE_UNSTORED); + customType5.setStored(true); + customType5.setStoreTermVectors(true); + FieldType customType6 = new FieldType(TextField.TYPE_UNSTORED); + customType6.setStored(true); + customType6.setStoreTermVectors(true); + customType6.setStoreTermVectorOffsets(true); + FieldType customType7 = new FieldType(TextField.TYPE_UNSTORED); + customType7.setStored(true); + customType7.setStoreTermVectors(true); + customType7.setStoreTermVectorPositions(true); + FieldType customType8 = new FieldType(TextField.TYPE_UNSTORED); + customType8.setStored(true); + customType8.setStoreTermVectors(true); + customType8.setStoreTermVectorOffsets(true); + customType8.setStoreTermVectorPositions(true); + doc.add(newField("tvnot","tvnot",customType4)); + doc.add(newField("termvector","termvector",customType5)); + doc.add(newField("tvoffset","tvoffset", customType6)); + doc.add(newField("tvposition","tvposition", customType7)); + doc.add(newField("tvpositionoffset","tvpositionoffset", customType8)); writer.addDocument(doc); } static void addDoc(IndexWriter writer, String value) throws IOException { Document doc = new Document(); - doc.add(newField("content", value, Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("content", value, TextField.TYPE_UNSTORED)); writer.addDocument(doc); } @@ -860,16 +950,16 @@ // check stored fields for (int i = 0; i < index1.maxDoc(); i++) { if (delDocs1 == null || !delDocs1.get(i)) { - Document doc1 = index1.document(i); - Document doc2 = index2.document(i); + org.apache.lucene.document.Document doc1 = index1.document(i); + org.apache.lucene.document.Document doc2 = index2.document(i); List fieldable1 = doc1.getFields(); List fieldable2 = doc2.getFields(); assertEquals("Different numbers of fields for doc " + i + ".", fieldable1.size(), fieldable2.size()); Iterator itField1 = fieldable1.iterator(); Iterator itField2 = fieldable2.iterator(); while (itField1.hasNext()) { - Field curField1 = (Field) itField1.next(); - Field curField2 = (Field) itField2.next(); + org.apache.lucene.document.Field curField1 = (org.apache.lucene.document.Field) itField1.next(); + org.apache.lucene.document.Field curField2 = (org.apache.lucene.document.Field) itField2.next(); assertEquals("Different fields names for doc " + i + ".", curField1.name(), curField2.name()); assertEquals("Different field values for doc " + i + ".", curField1.stringValue(), curField2.stringValue()); } @@ -1047,7 +1137,12 @@ static Document createDocument(String id) { Document doc = new Document(); - doc.add(newField("id", id, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); + customType.setOmitNorms(true); + + doc.add(newField("id", id, customType)); return doc; } @@ -1097,7 +1192,7 @@ Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); Document doc = new Document(); - doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED)); + doc.add(newField("number", "17", StringField.TYPE_UNSTORED)); writer.addDocument(doc); writer.close(); @@ -1132,7 +1227,7 @@ setMergePolicy(newLogMergePolicy(10)) ); Document doc = new Document(); - doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED)); + doc.add(newField("number", "17", StringField.TYPE_UNSTORED)); writer.addDocument(doc); writer.commit(); @@ -1164,8 +1259,8 @@ Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodecProvider(_TestUtil.alwaysCodec("Standard"))); Document doc = new Document(); - doc.add(newField("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO, Field.Index.ANALYZED)); - doc.add(newField("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", TextField.TYPE_UNSTORED)); + doc.add(newField("number", "0 1 2 3 4 5 6 7 8 9", TextField.TYPE_UNSTORED)); writer.addDocument(doc); writer.addDocument(doc); writer.commit(); @@ -1197,8 +1292,8 @@ Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setCodecProvider(_TestUtil.alwaysCodec("Standard"))); Document doc = new Document(); - doc.add(newField("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", Field.Store.NO, Field.Index.ANALYZED)); - doc.add(newField("number", "0 1 2 3 4 5 6 7 8 9", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("field", "a b c d e f g h i j k l m n o p q r s t u v w x y z", TextField.TYPE_UNSTORED)); + doc.add(newField("number", "0 1 2 3 4 5 6 7 8 9", TextField.TYPE_UNSTORED)); writer.addDocument(doc); writer.addDocument(doc); writer.close(); @@ -1302,7 +1397,7 @@ Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))); Document d = new Document(); - d.add(newField("f", "a a b", Field.Index.ANALYZED)); + d.add(newField("f", "a a b", TextField.TYPE_UNSTORED)); writer.addDocument(d); IndexReader r = writer.getReader(); writer.close(); Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java (working copy) @@ -21,8 +21,9 @@ import org.apache.lucene.search.DefaultSimilarity; import org.apache.lucene.search.Similarity; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.LuceneTestCase; @@ -500,7 +501,9 @@ setMergePolicy(newLogMergePolicy(false)) ); Document doc = new Document(); - doc.add(newField("field", "yes it's stored", Field.Store.YES, Field.Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + doc.add(newField("field", "yes it's stored", customType)); w.addDocument(doc); w.close(); IndexReader r1 = IndexReader.open(dir, false); Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (working copy) @@ -24,10 +24,11 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.StringField; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.SegmentNorms; import org.apache.lucene.search.DefaultSimilarity; @@ -329,8 +330,11 @@ private Document newDoc() { Document d = new Document(); float boost = nextNorm("anyfield"); // in this test the same similarity is used for all fields so it does not matter what field is passed + + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setTokenized(false); for (int i = 0; i < 10; i++) { - Field f = newField("f" + i, "v" + i, Store.NO, Index.NOT_ANALYZED); + Field f = newField("f" + i, "v" + i, customType); f.setBoost(boost); d.add(f); } Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java (working copy) @@ -20,8 +20,9 @@ import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; @@ -276,11 +277,13 @@ Directory dir = newDirectory(); RandomIndexWriter w= new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); Document doc = new Document(); - doc.add(newField("f", "doctor", Field.Store.NO, Field.Index.NOT_ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setTokenized(false); + doc.add(newField("f", "doctor", customType)); w.addDocument(doc); doc = new Document(); w.commit(); - doc.add(newField("f", "who", Field.Store.NO, Field.Index.NOT_ANALYZED)); + doc.add(newField("f", "who", customType)); w.addDocument(doc); IndexReader r = new SlowMultiReaderWrapper(w.getReader()); w.close(); Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java (working copy) @@ -20,8 +20,9 @@ import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.search.DefaultSimilarity; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; @@ -50,10 +51,13 @@ System.out.println("TEST: create initial index"); writer.setInfoStream(System.out); } + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); for(int i=0;i<157;i++) { Document d = new Document(); - d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); - d.add(newField("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED)); + d.add(newField("id", Integer.toString(i), customType)); + d.add(newField("content", "aaa " + i, TextField.TYPE_UNSTORED)); writer.addDocument(d); if (0==i%10) writer.commit(); Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (working copy) @@ -30,10 +30,11 @@ import java.util.Set; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.StringField; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.DefaultSimilarity; import org.apache.lucene.search.FieldCache; @@ -168,17 +169,26 @@ IndexReader reader = IndexReader.open(dir, false); try { int M = 3; + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); + FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED); + customType2.setStored(true); + customType2.setTokenized(false); + customType2.setOmitNorms(true); + FieldType customType3 = new FieldType(); + customType3.setStored(true); for (int i=0; i<4; i++) { for (int j=0; j0) { int k = i-1; int n = j + k*M; - Document prevItereationDoc = reader.document(n); + org.apache.lucene.document.Document prevItereationDoc = reader.document(n); assertNotNull(prevItereationDoc); String id = prevItereationDoc.get("id"); assertEquals(k+"_"+j, id); @@ -956,13 +966,21 @@ Document doc = new Document(); sb.append("a"); sb.append(n); - doc.add(new Field("field1", sb.toString(), Store.YES, Index.ANALYZED)); - doc.add(new Field("fielda", sb.toString(), Store.YES, Index.NOT_ANALYZED_NO_NORMS)); - doc.add(new Field("fieldb", sb.toString(), Store.YES, Index.NO)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED); + customType2.setStored(true); + customType2.setTokenized(false); + customType2.setOmitNorms(true); + FieldType customType3 = new FieldType(); + customType3.setStored(true); + doc.add(new Field("field1", customType, sb.toString())); + doc.add(new Field("fielda", customType2, sb.toString())); + doc.add(new Field("fieldb", customType3, sb.toString())); sb.append(" b"); sb.append(n); for (int i = 1; i < numFields; i++) { - doc.add(new Field("field" + (i+1), sb.toString(), Store.YES, Index.ANALYZED)); + doc.add(new Field("field" + (i+1), customType, sb.toString())); } return doc; } @@ -1177,7 +1195,7 @@ ); for(int i=0;i<4;i++) { Document doc = new Document(); - doc.add(newField("id", ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED)); + doc.add(newField("id", ""+i, StringField.TYPE_UNSTORED)); writer.addDocument(doc); Map data = new HashMap(); data.put("index", i+""); @@ -1238,7 +1256,7 @@ setMergePolicy(newLogMergePolicy(10)) ); Document doc = new Document(); - doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED)); + doc.add(newField("number", "17", StringField.TYPE_UNSTORED)); writer.addDocument(doc); writer.commit(); Index: lucene/src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -42,12 +42,12 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; -import org.apache.lucene.document.Field.TermVector; -import org.apache.lucene.document.Field; import org.apache.lucene.document.Fieldable; +import org.apache.lucene.document2.BinaryField; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldCache; @@ -77,6 +77,7 @@ public class TestIndexWriter extends LuceneTestCase { + private static final FieldType storedTextType = new FieldType(TextField.TYPE_UNSTORED); public void testDocCount() throws IOException { Directory dir = newDirectory(); @@ -137,15 +138,15 @@ static void addDoc(IndexWriter writer) throws IOException { Document doc = new Document(); - doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED)); writer.addDocument(doc); } static void addDocWithIndex(IndexWriter writer, int index) throws IOException { Document doc = new Document(); - doc.add(newField("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED)); - doc.add(newField("id", "" + index, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("content", "aaa " + index, storedTextType)); + doc.add(newField("id", "" + index, storedTextType)); writer.addDocument(doc); } @@ -255,12 +256,12 @@ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10)); for(int j=0;j<100;j++) { Document doc = new Document(); - doc.add(newField("a"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); - doc.add(newField("b"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); - doc.add(newField("c"+j, "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); - doc.add(newField("d"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(newField("e"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(newField("f"+j, "aaa", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("a"+j, "aaa" + j, storedTextType)); + doc.add(newField("b"+j, "aaa" + j, storedTextType)); + doc.add(newField("c"+j, "aaa" + j, storedTextType)); + doc.add(newField("d"+j, "aaa", storedTextType)); + doc.add(newField("e"+j, "aaa", storedTextType)); + doc.add(newField("f"+j, "aaa", storedTextType)); writer.addDocument(doc); } writer.close(); @@ -291,7 +292,7 @@ int lastNumFile = dir.listAll().length; for(int j=0;j<9;j++) { Document doc = new Document(); - doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("field", "aaa" + j, storedTextType)); writer.addDocument(doc); int numFile = dir.listAll().length; // Verify that with a tiny RAM buffer we see new @@ -314,7 +315,7 @@ int lastFlushCount = -1; for(int j=1;j<52;j++) { Document doc = new Document(); - doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(new Field("field", storedTextType, "aaa" + j)); writer.addDocument(doc); _TestUtil.syncConcurrentMerges(writer); int flushCount = writer.getFlushCount(); @@ -368,7 +369,7 @@ for(int j=1;j<52;j++) { Document doc = new Document(); - doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(new Field("field", storedTextType, "aaa" + j)); writer.addDocument(doc); } @@ -429,7 +430,7 @@ for(int j=0;j<100;j++) { Document doc = new Document(); for(int k=0;k<100;k++) { - doc.add(newField("field", Integer.toString(random.nextInt()), Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("field", Integer.toString(random.nextInt()), storedTextType)); } writer.addDocument(doc); } @@ -438,7 +439,7 @@ // occurs (heavy on byte blocks) for(int j=0;j<100;j++) { Document doc = new Document(); - doc.add(newField("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("field", "aaa aaa aaa aaa aaa aaa aaa aaa aaa aaa", storedTextType)); writer.addDocument(doc); } @@ -453,7 +454,7 @@ String longTerm = b.toString(); Document doc = new Document(); - doc.add(newField("field", longTerm, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("field", longTerm, storedTextType)); writer.addDocument(doc); } } @@ -471,12 +472,18 @@ MockDirectoryWrapper dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10)); // Enable norms for only 1 doc, pre flush + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setOmitNorms(true); for(int j=0;j<10;j++) { Document doc = new Document(); - Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED); + Field f = null; if (j != 8) { - f.setOmitNorms(true); + f = newField("field", "aaa", customType); } + else { + f = newField("field", "aaa", storedTextType); + } doc.add(f); writer.addDocument(doc); } @@ -494,10 +501,13 @@ // Enable norms for only 1 doc, post flush for(int j=0;j<27;j++) { Document doc = new Document(); - Field f = newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED); + Field f = null; if (j != 26) { - f.setOmitNorms(true); + f = newField("field", "aaa", customType); } + else { + f = newField("field", "aaa", storedTextType); + } doc.add(f); writer.addDocument(doc); } @@ -526,7 +536,12 @@ b.append(" a a a a a a a a"); } Document doc = new Document(); - doc.add(newField("field", b.toString(), Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setStoreTermVectors(true); + customType.setStoreTermVectorPositions(true); + customType.setStoreTermVectorOffsets(true); + doc.add(newField("field", b.toString(), customType)); writer.addDocument(doc); writer.close(); @@ -594,7 +609,12 @@ setMergePolicy(newLogMergePolicy(10)) ); Document doc = new Document(); - doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setStoreTermVectors(true); + customType.setStoreTermVectorPositions(true); + customType.setStoreTermVectorOffsets(true); + doc.add(newField("field", "aaa", customType)); for(int i=0;i<19;i++) writer.addDocument(doc); writer.flush(false, true); @@ -614,7 +634,12 @@ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); writer.setInfoStream(VERBOSE ? System.out : null); Document doc = new Document(); - doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setStoreTermVectors(true); + customType.setStoreTermVectorPositions(true); + customType.setStoreTermVectorOffsets(true); + doc.add(newField("field", "aaa", customType)); writer.addDocument(doc); writer.commit(); if (VERBOSE) { @@ -643,7 +668,9 @@ TEST_VERSION_CURRENT, new MockAnalyzer(random))); Document document = new Document(); - document.add(newField("tvtest", "", Store.NO, Index.ANALYZED, TermVector.YES)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStoreTermVectors(true); + document.add(newField("tvtest", "", customType)); iw.addDocument(document); iw.close(); dir.close(); @@ -660,8 +687,9 @@ ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2); IndexWriter iw = new IndexWriter(dir, conf); Document document = new Document(); - document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED, - Field.TermVector.YES)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStoreTermVectors(true); + document.add(newField("tvtest", "a b c", customType)); Thread.currentThread().setPriority(Thread.MAX_PRIORITY); for(int i=0;i<4;i++) iw.addDocument(document); @@ -687,24 +715,21 @@ Document doc = new Document(); String contents = "aa bb cc dd ee ff gg hh ii jj kk"; + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + FieldType type = null; if (i == 7) { // Add empty docs here - doc.add(newField("content3", "", Field.Store.NO, - Field.Index.ANALYZED)); + doc.add(newField("content3", "", TextField.TYPE_UNSTORED)); } else { - Field.Store storeVal; if (i%2 == 0) { - doc.add(newField("content4", contents, Field.Store.YES, - Field.Index.ANALYZED)); - storeVal = Field.Store.YES; + doc.add(newField("content4", contents, customType)); + type = customType; } else - storeVal = Field.Store.NO; - doc.add(newField("content1", contents, storeVal, - Field.Index.ANALYZED)); - doc.add(newField("content3", "", Field.Store.YES, - Field.Index.ANALYZED)); - doc.add(newField("content5", "", storeVal, - Field.Index.ANALYZED)); + type = TextField.TYPE_UNSTORED; + doc.add(newField("content1", contents, TextField.TYPE_UNSTORED)); + doc.add(newField("content3", "", customType)); + doc.add(newField("content5", "", type)); } for(int j=0;j<4;j++) @@ -730,7 +755,11 @@ Directory directory = newDirectory(); final Document doc = new Document(); - Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); + + Field idField = newField("id", "", customType); doc.add(idField); for(int pass=0;pass<2;pass++) { @@ -834,7 +863,7 @@ for(int i=0;i<10000;i++) b.append(" a"); b.append(" x"); - doc.add(newField("field", b.toString(), Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("field", b.toString(), TextField.TYPE_UNSTORED)); writer.addDocument(doc); writer.close(); @@ -852,7 +881,7 @@ Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); Document doc = new Document(); - doc.add(newField("", "a b c", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("", "a b c", TextField.TYPE_UNSTORED)); writer.addDocument(doc); writer.close(); dir.close(); @@ -886,8 +915,9 @@ Directory dir = newDirectory(); MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); Document doc = new Document(); - doc.add(newField("field", "a field", Field.Store.YES, - Field.Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + doc.add(newField("field", "a field", customType)); w.addDocument(doc); w.commit(); assertTrue(w.beforeWasCalled); @@ -930,7 +960,7 @@ Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); Document doc = new Document(); - doc.add(new Field("field", tokens)); + doc.add(new TextField("field", tokens)); w.addDocument(doc); w.commit(); @@ -971,7 +1001,7 @@ b[i] = (byte) (i+77); Document doc = new Document(); - Field f = new Field("binary", b, 10, 17); + Field f = new BinaryField("binary", b, 10, 17); byte[] bx = f.binaryValue(null).bytes; assertTrue(bx != null); assertEquals(50, bx.length); @@ -982,9 +1012,9 @@ w.close(); IndexReader ir = IndexReader.open(dir, true); - doc = ir.document(0); - f = doc.getField("binary"); - b = f.binaryValue(null).bytes; + org.apache.lucene.document.Document doc2 = ir.document(0); + org.apache.lucene.document.Field f2 = doc2.getField("binary"); + b = f2.binaryValue(null).bytes; assertTrue(b != null); assertEquals(17, b.length, 17); assertEquals(87, b[0]); @@ -1000,10 +1030,11 @@ IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)); Document doc = new Document(); - Field f = newField("field", "", Field.Store.NO, - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS); - Field f2 = newField("field", "crunch man", Field.Store.NO, - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStoreTermVectors(true); + customType.setStoreTermVectorPositions(true); + Field f = newField("field", "", customType); + Field f2 = newField("field", "crunch man", customType); doc.add(f); doc.add(f2); w.addDocument(doc); @@ -1045,8 +1076,14 @@ Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); Document doc = new Document(); - doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setStoreTermVectors(true); + customType.setStoreTermVectorPositions(true); + customType.setStoreTermVectorOffsets(true); + + doc.add(newField("content", "aaa bbb ccc ddd eee fff ggg hhh iii", customType)); writer.addDocument(doc); writer.addDocument(doc); writer.addDocument(doc); @@ -1098,7 +1135,7 @@ w = new IndexWriter(dir, conf); Document doc = new Document(); - doc.add(newField("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("field", "some text contents", storedTextType)); for(int i=0;i<100;i++) { w.addDocument(doc); if (i%10 == 0) { @@ -1212,9 +1249,18 @@ b[i] = (byte) (i+77); Document doc = new Document(); - Field f = new Field("binary", b, 10, 17); + + FieldType customType = new FieldType(BinaryField.TYPE_STORED); + customType.setTokenized(true); + customType.setIndexed(true); + + Field f = new Field("binary", customType, b, 10, 17); f.setTokenStream(new MockTokenizer(new StringReader("doc1field1"), MockTokenizer.WHITESPACE, false)); - Field f2 = newField("string", "value", Field.Store.YES,Field.Index.ANALYZED); + + FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED); + customType2.setStored(true); + + Field f2 = newField("string", "value", customType2); f2.setTokenStream(new MockTokenizer(new StringReader("doc1field2"), MockTokenizer.WHITESPACE, false)); doc.add(f); doc.add(f2); @@ -1237,9 +1283,9 @@ w.close(); IndexReader ir = IndexReader.open(dir, true); - doc = ir.document(0); - f = doc.getField("binary"); - b = f.binaryValue(null).bytes; + org.apache.lucene.document.Document doc2 = ir.document(0); + org.apache.lucene.document.Field f3 = doc2.getField("binary"); + b = f3.binaryValue(null).bytes; assertTrue(b != null); assertEquals(17, b.length, 17); assertEquals(87, b[0]); @@ -1271,25 +1317,28 @@ Directory d = newDirectory(); IndexWriter w = new IndexWriter(d, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); Document doc = new Document(); - doc.add(newField("zzz", "a b c", Field.Store.YES, Field.Index.NO)); - doc.add(newField("aaa", "a b c", Field.Store.YES, Field.Index.NO)); - doc.add(newField("zzz", "1 2 3", Field.Store.YES, Field.Index.NO)); + + FieldType customType = new FieldType(); + customType.setStored(true); + doc.add(newField("zzz", "a b c", customType)); + doc.add(newField("aaa", "a b c", customType)); + doc.add(newField("zzz", "1 2 3", customType)); w.addDocument(doc); IndexReader r = w.getReader(); - doc = r.document(0); - Iterator it = doc.getFields().iterator(); + org.apache.lucene.document.Document doc2 = r.document(0); + Iterator it = doc2.getFields().iterator(); assertTrue(it.hasNext()); - Field f = (Field) it.next(); + org.apache.lucene.document.Field f = (org.apache.lucene.document.Field) it.next(); assertEquals(f.name(), "zzz"); assertEquals(f.stringValue(), "a b c"); assertTrue(it.hasNext()); - f = (Field) it.next(); + f = (org.apache.lucene.document.Field) it.next(); assertEquals(f.name(), "aaa"); assertEquals(f.stringValue(), "a b c"); assertTrue(it.hasNext()); - f = (Field) it.next(); + f = (org.apache.lucene.document.Field) it.next(); assertEquals(f.name(), "zzz"); assertEquals(f.stringValue(), "1 2 3"); assertFalse(it.hasNext()); @@ -1321,7 +1370,7 @@ s.append(' ').append(i); } Document d = new Document(); - Field f = newField("field", s.toString(), Field.Store.NO, Field.Index.ANALYZED); + Field f = newField("field", s.toString(), TextField.TYPE_UNSTORED); d.add(f); w.addDocument(d); @@ -1353,7 +1402,7 @@ setMergePolicy(mergePolicy) ); Document doc = new Document(); - doc.add(newField("field", "go", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("field", "go", TextField.TYPE_UNSTORED)); w.addDocument(doc); IndexReader r; if (iter == 0) { @@ -1416,7 +1465,14 @@ // First commit Document doc = new Document(); - doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); + + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setStoreTermVectors(true); + customType.setStoreTermVectorPositions(true); + customType.setStoreTermVectorOffsets(true); + + doc.add(newField("c", "val", customType)); writer.addDocument(doc); writer.commit(); assertEquals(1, IndexReader.listCommits(dir).size()); @@ -1426,7 +1482,7 @@ // Second commit - now KeepOnlyLastCommit cannot delete the prev commit. doc = new Document(); - doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("c", "val", customType)); writer.addDocument(doc); writer.commit(); assertEquals(2, IndexReader.listCommits(dir).size()); @@ -1473,14 +1529,19 @@ } Document doc = new Document(); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setStoreTermVectors(true); + customType.setStoreTermVectorPositions(true); + customType.setStoreTermVectorOffsets(true); // create as many files as possible - doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("c", "val", customType)); writer.addDocument(doc); // Adding just one document does not call flush yet. assertEquals("only the stored and term vector files should exist in the directory", 5 + extraFileCount, dir.listAll().length); doc = new Document(); - doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("c", "val", customType)); writer.addDocument(doc); // The second document should cause a flush. @@ -1503,7 +1564,12 @@ TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); Document doc = new Document(); - doc.add(newField("c", "val", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setStoreTermVectors(true); + customType.setStoreTermVectorPositions(true); + customType.setStoreTermVectorOffsets(true); + doc.add(newField("c", "val", customType)); w.addDocument(doc); w.addDocument(doc); IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig( @@ -1530,7 +1596,10 @@ final List fieldIDs = new ArrayList(); - Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); + Field idField = newField("id", "", customType); for(int i=0;i { private final Document doc; private final int count; + + /* private field types */ + /* private field types */ + private static final FieldType custom = new FieldType(TextField.TYPE_UNSTORED); + private static final FieldType custom1 = new FieldType(TextField.TYPE_UNSTORED); + private static final FieldType custom2 = new FieldType(StringField.TYPE_UNSTORED); + private static final FieldType custom3 = new FieldType(); + private static final FieldType custom4 = new FieldType(StringField.TYPE_UNSTORED); + private static final FieldType custom5 = new FieldType(TextField.TYPE_UNSTORED); + + static { + custom.setStored(true); + + custom1.setStoreTermVectors(true); + custom1.setStoreTermVectorPositions(true); + custom1.setStoreTermVectorOffsets(true); + + custom2.setStored(true); + + custom3.setStored(true); + + custom4.setStoreTermVectors(true); + custom4.setStoreTermVectorPositions(true); + custom4.setStoreTermVectorOffsets(true); + + custom5.setStored(true); + custom5.setStoreTermVectors(true); + custom5.setStoreTermVectorPositions(true); + custom5.setStoreTermVectorOffsets(true); + } + public DocCopyIterator(Document doc, int count) { this.count = count; this.doc = doc; @@ -100,17 +134,17 @@ final Document doc = new Document(); - doc.add(newField("content1", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(newField("content6", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - doc.add(newField("content2", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NOT_ANALYZED)); - doc.add(newField("content3", "aaa bbb ccc ddd", Field.Store.YES, Field.Index.NO)); + doc.add(newField("content1", "aaa bbb ccc ddd", DocCopyIterator.custom)); + doc.add(newField("content6", "aaa bbb ccc ddd", DocCopyIterator.custom1)); + doc.add(newField("content2", "aaa bbb ccc ddd", DocCopyIterator.custom2)); + doc.add(newField("content3", "aaa bbb ccc ddd", DocCopyIterator.custom3)); - doc.add(newField("content4", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.ANALYZED)); - doc.add(newField("content5", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED)); + doc.add(newField("content4", "aaa bbb ccc ddd", TextField.TYPE_UNSTORED)); + doc.add(newField("content5", "aaa bbb ccc ddd", StringField.TYPE_UNSTORED)); - doc.add(newField("content7", "aaa bbb ccc ddd", Field.Store.NO, Field.Index.NOT_ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("content7", "aaa bbb ccc ddd", DocCopyIterator.custom4)); - final Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); + final Field idField = newField("id", "", DocCopyIterator.custom2); doc.add(idField); final long stopTime = System.currentTimeMillis() + 500; @@ -336,8 +370,7 @@ MockIndexWriter2 w = new MockIndexWriter2(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); w.setInfoStream(VERBOSE ? System.out : null); Document doc = new Document(); - doc.add(newField("field", "a field", Field.Store.YES, - Field.Index.ANALYZED)); + doc.add(newField("field", "a field", DocCopyIterator.custom)); w.addDocument(doc); w.doFail = true; try { @@ -356,8 +389,7 @@ MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); w.setInfoStream(VERBOSE ? System.out : null); Document doc = new Document(); - doc.add(newField("field", "a field", Field.Store.YES, - Field.Index.ANALYZED)); + doc.add(newField("field", "a field", DocCopyIterator.custom)); w.addDocument(doc); Analyzer analyzer = new Analyzer() { @@ -370,8 +402,7 @@ }; Document crashDoc = new Document(); - crashDoc.add(newField("crash", "do it on token 4", Field.Store.YES, - Field.Index.ANALYZED)); + crashDoc.add(newField("crash", "do it on token 4", DocCopyIterator.custom)); try { w.addDocument(crashDoc, analyzer); fail("did not hit expected exception"); @@ -412,8 +443,7 @@ MockIndexWriter3 w = new MockIndexWriter3(dir, conf); w.doFail = true; Document doc = new Document(); - doc.add(newField("field", "a field", Field.Store.YES, - Field.Index.ANALYZED)); + doc.add(newField("field", "a field", DocCopyIterator.custom)); for(int i=0;i<10;i++) try { w.addDocument(doc); @@ -456,8 +486,7 @@ Document doc = new Document(); String contents = "aa bb cc dd ee ff gg hh ii jj kk"; - doc.add(newField("content", contents, Field.Store.NO, - Field.Index.ANALYZED)); + doc.add(newField("content", contents, TextField.TYPE_UNSTORED)); try { writer.addDocument(doc); fail("did not hit expected exception"); @@ -466,14 +495,12 @@ // Make sure we can add another normal document doc = new Document(); - doc.add(newField("content", "aa bb cc dd", Field.Store.NO, - Field.Index.ANALYZED)); + doc.add(newField("content", "aa bb cc dd", TextField.TYPE_UNSTORED)); writer.addDocument(doc); // Make sure we can add another normal document doc = new Document(); - doc.add(newField("content", "aa bb cc dd", Field.Store.NO, - Field.Index.ANALYZED)); + doc.add(newField("content", "aa bb cc dd", TextField.TYPE_UNSTORED)); writer.addDocument(doc); writer.close(); @@ -544,8 +571,7 @@ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); Document doc = new Document(); String contents = "aa bb cc dd ee ff gg hh ii jj kk"; - doc.add(newField("content", contents, Field.Store.NO, - Field.Index.ANALYZED)); + doc.add(newField("content", contents, TextField.TYPE_UNSTORED)); boolean hitError = false; for(int i=0;i<200;i++) { try { @@ -588,14 +614,11 @@ lmp.setMergeFactor(Math.max(lmp.getMergeFactor(), 5)); Document doc = new Document(); - doc.add(newField("contents", "here are some contents", Field.Store.YES, - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5)); writer.addDocument(doc); writer.addDocument(doc); - doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES, - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - doc.add(newField("other", "this will not get indexed", Field.Store.YES, - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("crash", "this should crash after 4 terms", DocCopyIterator.custom5)); + doc.add(newField("other", "this will not get indexed", DocCopyIterator.custom5)); try { writer.addDocument(doc); fail("did not hit expected exception"); @@ -608,8 +631,7 @@ if (0 == i) { doc = new Document(); - doc.add(newField("contents", "here are some contents", Field.Store.YES, - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5)); writer.addDocument(doc); writer.addDocument(doc); } @@ -641,8 +663,7 @@ writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(10)); doc = new Document(); - doc.add(newField("contents", "here are some contents", Field.Store.YES, - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("contents", "here are some contents", DocCopyIterator.custom5)); for(int j=0;j<17;j++) writer.addDocument(doc); writer.optimize(); @@ -698,14 +719,11 @@ try { for(int iter=0;iter allTerms = new HashSet(); Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java (working copy) @@ -20,8 +20,9 @@ import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; @@ -54,7 +55,13 @@ public void run() { final Document doc = new Document(); - doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setStoreTermVectors(true); + customType.setStoreTermVectorPositions(true); + customType.setStoreTermVectorOffsets(true); + + doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType)); int idUpto = 0; int fullCount = 0; @@ -288,7 +295,12 @@ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)) .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler())); final Document doc = new Document(); - doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setStoreTermVectors(true); + customType.setStoreTermVectorPositions(true); + customType.setStoreTermVectorOffsets(true); + doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", customType)); for(int i=0;i<6;i++) writer.addDocument(doc); Index: lucene/src/test/org/apache/lucene/index/TestIsCurrent.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIsCurrent.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestIsCurrent.java (working copy) @@ -17,9 +17,9 @@ * limitations under the License. */ -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.util.*; import org.apache.lucene.store.*; @@ -43,7 +43,9 @@ // write document Document doc = new Document(); - doc.add(newField("UUID", "1", Store.YES, Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + doc.add(newField("UUID", "1", customType)); writer.addDocument(doc); writer.commit(); } Index: lucene/src/test/org/apache/lucene/index/TestLazyBug.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestLazyBug.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestLazyBug.java (working copy) @@ -22,7 +22,11 @@ import java.util.Set; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.*; +import org.apache.lucene.document.FieldSelector; +import org.apache.lucene.document.FieldSelectorResult; +import org.apache.lucene.document.Fieldable; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.TextField; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; import org.junit.AfterClass; @@ -87,8 +91,7 @@ doc.add(newField("f"+f, data[f % data.length] + '#' + data[random.nextInt(data.length)], - Field.Store.NO, - Field.Index.ANALYZED)); + TextField.TYPE_UNSTORED)); } writer.addDocument(doc); } @@ -102,7 +105,7 @@ public void doTest(int[] docs) throws Exception { IndexReader reader = IndexReader.open(directory, true); for (int i = 0; i < docs.length; i++) { - Document d = reader.document(docs[i], SELECTOR); + org.apache.lucene.document.Document d = reader.document(docs[i], SELECTOR); d.get(MAGIC_FIELD); List fields = d.getFields(); Index: lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestLazyProxSkipping.java (working copy) @@ -24,9 +24,10 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.codecs.CodecProvider; -import org.apache.lucene.document.Field; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.ScoreDoc; @@ -84,6 +85,9 @@ setMaxBufferedDocs(10). setMergePolicy(newLogMergePolicy(false)) ); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + for (int i = 0; i < numDocs; i++) { Document doc = new Document(); String content; @@ -98,7 +102,7 @@ content = this.term3 + " " + this.term2; } - doc.add(newField(this.field, content, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField(this.field, content, customType)); writer.addDocument(doc); } @@ -144,9 +148,11 @@ public void testSeek() throws IOException { Directory directory = newDirectory(); IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); for (int i = 0; i < 10; i++) { Document doc = new Document(); - doc.add(newField(this.field, "a b", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField(this.field, "a b", customType)); writer.addDocument(doc); } Index: lucene/src/test/org/apache/lucene/index/TestLongPostings.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestLongPostings.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/index/TestLongPostings.java (working copy) @@ -25,8 +25,9 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.TextField; +import org.apache.lucene.document2.Field; import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; @@ -110,7 +111,7 @@ for(int idx=0;idx= 1); - Document result = reader.document(0); + org.apache.lucene.document.Document result = reader.document(0); assertTrue(result != null); //There are 2 unstored fields on the document that are not preserved across writing - assertTrue(DocHelper.numFields(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size()); + assertTrue(DocHelper.numFields2(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size()); List fields = result.getFields(); for (final Fieldable field : fields ) { @@ -174,9 +174,9 @@ public static void checkNorms(IndexReader reader) throws IOException { // test omit norms for (int i=0; i aDocs = new HashSet(); final Document doc = new Document(); - final Field f = newField("field", "", Field.Index.NOT_ANALYZED_NO_NORMS); + FieldType customType = new FieldType(StringField.TYPE_UNSTORED); + customType.setStored(true); + final Field f = newField("field", "", StringField.TYPE_UNSTORED); doc.add(f); - final Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); + final Field idField = newField("id", "", customType); doc.add(idField); int num = atLeast(5000); for(int id=0;id fieldNameComparator = new Comparator() { - public int compare(Fieldable o1, Fieldable o2) { - return o1.name().compareTo(o2.name()); - } + public int compare(Fieldable o1, Fieldable o2) { + return o1.name().compareTo(o2.name()); + } }; + static Comparator fieldNameComparator2 = new Comparator() { + public int compare(IndexableField o1, IndexableField o2) { + return o1.name().compareTo(o2.name()); + } + }; // This test avoids using any extra synchronization in the multiple // indexing threads to test that IndexWriter does correctly synchronize @@ -249,13 +257,13 @@ Iterator iter = docs.values().iterator(); while (iter.hasNext()) { Document d = iter.next(); - ArrayList fields = new ArrayList(); + ArrayList fields = new ArrayList(); fields.addAll(d.getFields()); // put fields in same order each time - Collections.sort(fields, fieldNameComparator); + Collections.sort(fields, fieldNameComparator2); Document d1 = new Document(); - d1.setBoost(d.getBoost()); + //d1.setBoost(d.getBoost()); for (int i=0; i ff1 = d1.getFields(); List ff2 = d2.getFields(); @@ -520,8 +528,8 @@ for (int i=0; i fields = new ArrayList(); String idString = getIdString(); - Field idField = newField(idTerm.field(), idString, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); + Field idField = newField(idTerm.field(), idString, customType1); fields.add(idField); int nFields = nextInt(maxFields); for (int i=0; i lastScore); lastScore = scores[i]; } Index: lucene/src/test/org/apache/lucene/search/TestDocIdSet.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestDocIdSet.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestDocIdSet.java (working copy) @@ -24,9 +24,9 @@ import junit.framework.Assert; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.StringField; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.RandomIndexWriter; @@ -103,7 +103,7 @@ Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, dir); Document doc = new Document(); - doc.add(newField("c", "val", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); + doc.add(newField("c", "val", StringField.TYPE_UNSTORED)); writer.addDocument(doc); IndexReader reader = writer.getReader(); writer.close(); Index: lucene/src/test/org/apache/lucene/search/TestElevationComparator.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestElevationComparator.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestElevationComparator.java (working copy) @@ -18,8 +18,9 @@ */ import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.*; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.search.FieldValueHitQueue.Entry; @@ -124,7 +125,7 @@ private Document adoc(String[] vals) { Document doc = new Document(); for (int i = 0; i < vals.length - 2; i += 2) { - doc.add(newField(vals[i], vals[i + 1], Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField(vals[i], vals[i + 1], TextField.TYPE_STORED)); } return doc; } Index: lucene/src/test/org/apache/lucene/search/TestExplanations.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestExplanations.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestExplanations.java (working copy) @@ -20,8 +20,10 @@ import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.queryParser.ParseException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.StringField; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -74,11 +76,11 @@ RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); for (int i = 0; i < docFields.length; i++) { Document doc = new Document(); - doc.add(newField(KEY, ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED)); - Field f = newField(FIELD, docFields[i], Field.Store.NO, Field.Index.ANALYZED); + doc.add(newField(KEY, ""+i, StringField.TYPE_UNSTORED)); + Field f = newField(FIELD, docFields[i], TextField.TYPE_UNSTORED); f.setBoost(i); doc.add(f); - doc.add(newField(ALTFIELD, docFields[i], Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField(ALTFIELD, docFields[i], TextField.TYPE_UNSTORED)); writer.addDocument(doc); } reader = writer.getReader(); Index: lucene/src/test/org/apache/lucene/search/TestFieldCache.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFieldCache.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestFieldCache.java (working copy) @@ -17,8 +17,9 @@ */ import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.RandomIndexWriter; @@ -55,12 +56,12 @@ writer.w.setInfoStream(VERBOSE ? System.out : null); for (int i = 0; i < NUM_DOCS; i++){ Document doc = new Document(); - doc.add(newField("theLong", String.valueOf(theLong--), Field.Store.NO, Field.Index.NOT_ANALYZED)); - doc.add(newField("theDouble", String.valueOf(theDouble--), Field.Store.NO, Field.Index.NOT_ANALYZED)); - doc.add(newField("theByte", String.valueOf(theByte--), Field.Store.NO, Field.Index.NOT_ANALYZED)); - doc.add(newField("theShort", String.valueOf(theShort--), Field.Store.NO, Field.Index.NOT_ANALYZED)); - doc.add(newField("theInt", String.valueOf(theInt--), Field.Store.NO, Field.Index.NOT_ANALYZED)); - doc.add(newField("theFloat", String.valueOf(theFloat--), Field.Store.NO, Field.Index.NOT_ANALYZED)); + doc.add(newField("theLong", String.valueOf(theLong--), StringField.TYPE_UNSTORED)); + doc.add(newField("theDouble", String.valueOf(theDouble--), StringField.TYPE_UNSTORED)); + doc.add(newField("theByte", String.valueOf(theByte--), StringField.TYPE_UNSTORED)); + doc.add(newField("theShort", String.valueOf(theShort--), StringField.TYPE_UNSTORED)); + doc.add(newField("theInt", String.valueOf(theInt--), StringField.TYPE_UNSTORED)); + doc.add(newField("theFloat", String.valueOf(theFloat--), StringField.TYPE_UNSTORED)); // sometimes skip the field: if (random.nextInt(40) != 17) { @@ -77,7 +78,7 @@ s = _TestUtil.randomUnicodeString(random, 250); } unicodeStrings[i] = s; - doc.add(newField("theRandomUnicodeString", unicodeStrings[i], Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); + doc.add(newField("theRandomUnicodeString", unicodeStrings[i], StringField.TYPE_UNSTORED)); } writer.addDocument(doc); } Index: lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java (working copy) @@ -24,8 +24,9 @@ import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.StringField; import org.apache.lucene.store.Directory; import org.junit.Test; @@ -535,8 +536,8 @@ for (int d = -20; d <= 20; d++) { Document doc = new Document(); - doc.add(newField("id",Integer.toString(d), Field.Store.NO, Field.Index.NOT_ANALYZED)); - doc.add(newField("body","body", Field.Store.NO, Field.Index.NOT_ANALYZED)); + doc.add(newField("id",Integer.toString(d), StringField.TYPE_UNSTORED)); + doc.add(newField("body","body", StringField.TYPE_UNSTORED)); writer.addDocument(doc); } Index: lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java (working copy) @@ -19,8 +19,9 @@ import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.store.Directory; @@ -41,7 +42,7 @@ for (int i = 0; i < 100; i++) { Document doc = new Document(); int term = i * 10; //terms are units of 10; - doc.add(newField(fieldName, "" + term, Field.Store.YES, Field.Index.NOT_ANALYZED)); + doc.add(newField(fieldName, "" + term, StringField.TYPE_STORED)); w.addDocument(doc); } IndexReader reader = w.getReader(); Index: lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java (working copy) @@ -20,8 +20,9 @@ import java.util.BitSet; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; @@ -54,23 +55,23 @@ RandomIndexWriter writer = new RandomIndexWriter (random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); Document doc = new Document(); - doc.add (newField("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED)); - doc.add (newField("sorter", "b", Field.Store.YES, Field.Index.ANALYZED)); + doc.add (newField("field", "one two three four five", TextField.TYPE_STORED)); + doc.add (newField("sorter", "b", TextField.TYPE_STORED)); writer.addDocument (doc); doc = new Document(); - doc.add (newField("field", "one two three four", Field.Store.YES, Field.Index.ANALYZED)); - doc.add (newField("sorter", "d", Field.Store.YES, Field.Index.ANALYZED)); + doc.add (newField("field", "one two three four", TextField.TYPE_STORED)); + doc.add (newField("sorter", "d", TextField.TYPE_STORED)); writer.addDocument (doc); doc = new Document(); - doc.add (newField("field", "one two three y", Field.Store.YES, Field.Index.ANALYZED)); - doc.add (newField("sorter", "a", Field.Store.YES, Field.Index.ANALYZED)); + doc.add (newField("field", "one two three y", TextField.TYPE_STORED)); + doc.add (newField("sorter", "a", TextField.TYPE_STORED)); writer.addDocument (doc); doc = new Document(); - doc.add (newField("field", "one two x", Field.Store.YES, Field.Index.ANALYZED)); - doc.add (newField("sorter", "c", Field.Store.YES, Field.Index.ANALYZED)); + doc.add (newField("field", "one two x", TextField.TYPE_STORED)); + doc.add (newField("sorter", "c", TextField.TYPE_STORED)); writer.addDocument (doc); // tests here require single segment (eg try seed Index: lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java (working copy) @@ -21,8 +21,8 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.StringField; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexWriter; @@ -64,7 +64,7 @@ try { for (int i = 0; i < 60; i++) {//Simple docs Document doc = new Document(); - doc.add(newField(FIELD, Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); + doc.add(newField(FIELD, Integer.toString(i), StringField.TYPE_STORED)); writer.addDocument(doc); } if(optimize) Index: lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java (working copy) @@ -22,8 +22,9 @@ import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.RandomIndexWriter; @@ -472,7 +473,7 @@ private void addDoc(String text, RandomIndexWriter writer) throws IOException { Document doc = new Document(); - doc.add(newField("field", text, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("field", text, TextField.TYPE_STORED)); writer.addDocument(doc); } } Index: lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java (working copy) @@ -23,8 +23,9 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -85,7 +86,7 @@ RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false)).setMergePolicy(newLogMergePolicy())); Document doc = new Document(); - Field field = newField("field", "", Field.Store.NO, Field.Index.ANALYZED); + Field field = newField("field", "", TextField.TYPE_UNSTORED); doc.add(field); for (int i = 0; i < terms; i++) { Index: lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java (working copy) @@ -20,8 +20,9 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexReader; @@ -124,7 +125,7 @@ private void addDoc(String text, IndexWriter iw, float boost) throws IOException { Document doc = new Document(); - Field f = newField("key", text, Field.Store.YES, Field.Index.ANALYZED); + Field f = newField("key", text, TextField.TYPE_STORED); f.setBoost(boost); doc.add(f); iw.addDocument(doc); Index: lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (working copy) @@ -33,8 +33,10 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.StringField; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.RAMDirectory; @@ -166,7 +168,7 @@ private void add(String s, RandomIndexWriter writer) throws IOException { Document doc = new Document(); - doc.add(newField("body", s, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("body", s, TextField.TYPE_STORED)); writer.addDocument(doc); } @@ -289,8 +291,8 @@ private void add(String s, String type, RandomIndexWriter writer) throws IOException { Document doc = new Document(); - doc.add(newField("body", s, Field.Store.YES, Field.Index.ANALYZED)); - doc.add(newField("type", type, Field.Store.YES, Field.Index.NOT_ANALYZED)); + doc.add(newField("body", s, TextField.TYPE_STORED)); + doc.add(newField("type", type, StringField.TYPE_UNSTORED)); writer.addDocument(doc); } @@ -403,7 +405,7 @@ RandomIndexWriter writer = new RandomIndexWriter(random, dir, new CannedAnalyzer(tokens)); Document doc = new Document(); - doc.add(new Field("field", "", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(new TextField("field", "")); writer.addDocument(doc); writer.addDocument(doc); IndexReader r = writer.getReader(); @@ -494,7 +496,7 @@ IndexWriterConfig cfg = newIndexWriterConfig(TEST_VERSION_CURRENT, new CannedAnalyzer(INCR_0_DOC_TOKENS)); IndexWriter writer = new IndexWriter(dir, cfg); Document doc = new Document(); - doc.add(new Field("field", "", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(new TextField("field", "")); writer.addDocument(doc); IndexReader r = IndexReader.open(writer,false); writer.close(); Index: lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java (working copy) @@ -19,8 +19,10 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.StringField; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexWriterConfig; @@ -64,16 +66,14 @@ newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setMergePolicy(newLogMergePolicy())); + FieldType customType = new FieldType(TextField.TYPE_STORED); + customType.setTokenized(false); for (int i = 0; i < data.length; i++) { Document doc = new Document(); - doc.add(newField("id", String.valueOf(i), Field.Store.YES, - Field.Index.NOT_ANALYZED));// Field.Keyword("id",String.valueOf(i))); - doc - .add(newField("all", "all", Field.Store.YES, - Field.Index.NOT_ANALYZED));// Field.Keyword("all","all")); + doc.add(newField("id", String.valueOf(i), customType));// Field.Keyword("id",String.valueOf(i))); + doc.add(newField("all", "all", customType));// Field.Keyword("all","all")); if (null != data[i]) { - doc.add(newField("data", data[i], Field.Store.YES, - Field.Index.ANALYZED));// Field.Text("data",data[i])); + doc.add(newField("data", data[i], TextField.TYPE_STORED));// Field.Text("data",data[i])); } writer.addDocument(doc); } Index: lucene/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java (working copy) @@ -18,8 +18,9 @@ */ import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.StringField; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiReader; @@ -53,7 +54,7 @@ for (int i = 0; i < 10; i++) { Document doc = new Document(); - doc.add(newField("data", Integer.toString(i), Field.Store.NO, Field.Index.NOT_ANALYZED)); + doc.add(newField("data", Integer.toString(i), StringField.TYPE_UNSTORED)); writer.addDocument(doc); ((i % 2 == 0) ? swriter1 : swriter2).addDocument(doc); } Index: lucene/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java (working copy) @@ -20,7 +20,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.*; +import org.apache.lucene.document2.*; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.TermFreqVector; @@ -41,9 +41,13 @@ IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); //writer.setUseCompoundFile(false); //writer.infoStream = System.out; + FieldType customType = new FieldType(TextField.TYPE_STORED); + customType.setStored(true); + customType.setTokenized(false); + customType.setStoreTermVectors(true); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); - Fieldable fld = newField("field", English.intToEnglish(i), Field.Store.YES, Field.Index.NOT_ANALYZED, Field.TermVector.YES); + Field fld = newField("field", English.intToEnglish(i), customType); doc.add(fld); writer.addDocument(doc); } Index: lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java (working copy) @@ -22,9 +22,9 @@ import java.text.DecimalFormatSymbols; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.NumericField; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.NumericField; +import org.apache.lucene.document2.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.store.Directory; @@ -51,8 +51,8 @@ Document doc = new Document(); for (int m=0, c=random.nextInt(10); m<=c; m++) { int value = random.nextInt(Integer.MAX_VALUE); - doc.add(newField("asc", format.format(value), Field.Store.NO, Field.Index.NOT_ANALYZED)); - doc.add(new NumericField("trie", Field.Store.NO, true).setIntValue(value)); + doc.add(newField("asc", format.format(value), StringField.TYPE_UNSTORED)); + doc.add(new NumericField("trie").setIntValue(value)); } writer.addDocument(doc); } Index: lucene/src/test/org/apache/lucene/search/TestNot.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestNot.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestNot.java (working copy) @@ -24,8 +24,8 @@ import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.store.Directory; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.TextField; /** Similarity unit test. * @@ -38,7 +38,7 @@ RandomIndexWriter writer = new RandomIndexWriter(random, store); Document d1 = new Document(); - d1.add(newField("field", "a b", Field.Store.YES, Field.Index.ANALYZED)); + d1.add(newField("field", "a b", TextField.TYPE_STORED)); writer.addDocument(d1); IndexReader reader = writer.getReader(); Index: lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java (working copy) @@ -18,9 +18,9 @@ */ import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.NumericField; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.NumericField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexWriter; @@ -59,13 +59,13 @@ .setMergePolicy(newLogMergePolicy())); NumericField - field8 = new NumericField("field8", 8, Field.Store.YES, true), - field4 = new NumericField("field4", 4, Field.Store.YES, true), - field2 = new NumericField("field2", 2, Field.Store.YES, true), - fieldNoTrie = new NumericField("field"+Integer.MAX_VALUE, Integer.MAX_VALUE, Field.Store.YES, true), - ascfield8 = new NumericField("ascfield8", 8, Field.Store.NO, true), - ascfield4 = new NumericField("ascfield4", 4, Field.Store.NO, true), - ascfield2 = new NumericField("ascfield2", 2, Field.Store.NO, true); + field8 = new NumericField("field8", 8, NumericField.TYPE_STORED), + field4 = new NumericField("field4", 4, NumericField.TYPE_STORED), + field2 = new NumericField("field2", 2, NumericField.TYPE_STORED), + fieldNoTrie = new NumericField("field"+Integer.MAX_VALUE, Integer.MAX_VALUE, NumericField.TYPE_STORED), + ascfield8 = new NumericField("ascfield8", 8, NumericField.TYPE_UNSTORED), + ascfield4 = new NumericField("ascfield4", 4, NumericField.TYPE_UNSTORED), + ascfield2 = new NumericField("ascfield2", 2, NumericField.TYPE_UNSTORED); Document doc = new Document(); // add fields, that have a distance to test general functionality @@ -150,7 +150,7 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count"+type, count, sd.length ); - Document doc=searcher.doc(sd[0].doc); + org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); assertEquals("First doc"+type, 2*distance+startOffset, Integer.parseInt(doc.get(field)) ); doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc"+type, (1+count)*distance+startOffset, Integer.parseInt(doc.get(field)) ); @@ -211,7 +211,7 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); - Document doc=searcher.doc(sd[0].doc); + org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); assertEquals("First doc", startOffset, Integer.parseInt(doc.get(field)) ); doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, Integer.parseInt(doc.get(field)) ); @@ -252,7 +252,7 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", noDocs-count, sd.length ); - Document doc=searcher.doc(sd[0].doc); + org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); assertEquals("First doc", count*distance+startOffset, Integer.parseInt(doc.get(field)) ); doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (noDocs-1)*distance+startOffset, Integer.parseInt(doc.get(field)) ); Index: lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (working copy) @@ -18,9 +18,9 @@ */ import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.NumericField; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.NumericField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.RandomIndexWriter; @@ -56,15 +56,15 @@ .setMergePolicy(newLogMergePolicy())); NumericField - field8 = new NumericField("field8", 8, Field.Store.YES, true), - field6 = new NumericField("field6", 6, Field.Store.YES, true), - field4 = new NumericField("field4", 4, Field.Store.YES, true), - field2 = new NumericField("field2", 2, Field.Store.YES, true), - fieldNoTrie = new NumericField("field"+Integer.MAX_VALUE, Integer.MAX_VALUE, Field.Store.YES, true), - ascfield8 = new NumericField("ascfield8", 8, Field.Store.NO, true), - ascfield6 = new NumericField("ascfield6", 6, Field.Store.NO, true), - ascfield4 = new NumericField("ascfield4", 4, Field.Store.NO, true), - ascfield2 = new NumericField("ascfield2", 2, Field.Store.NO, true); + field8 = new NumericField("field8", 8, NumericField.TYPE_STORED), + field6 = new NumericField("field6", 6, NumericField.TYPE_STORED), + field4 = new NumericField("field4", 4, NumericField.TYPE_STORED), + field2 = new NumericField("field2", 2, NumericField.TYPE_STORED), + fieldNoTrie = new NumericField("field"+Integer.MAX_VALUE, Integer.MAX_VALUE, NumericField.TYPE_STORED), + ascfield8 = new NumericField("ascfield8", 8, NumericField.TYPE_UNSTORED), + ascfield6 = new NumericField("ascfield6", 6, NumericField.TYPE_UNSTORED), + ascfield4 = new NumericField("ascfield4", 4, NumericField.TYPE_UNSTORED), + ascfield2 = new NumericField("ascfield2", 2, NumericField.TYPE_UNSTORED); Document doc = new Document(); // add fields, that have a distance to test general functionality @@ -150,7 +150,7 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count"+type, count, sd.length ); - Document doc=searcher.doc(sd[0].doc); + org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); assertEquals("First doc"+type, 2*distance+startOffset, Long.parseLong(doc.get(field)) ); doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc"+type, (1+count)*distance+startOffset, Long.parseLong(doc.get(field)) ); @@ -217,7 +217,7 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); - Document doc=searcher.doc(sd[0].doc); + org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); assertEquals("First doc", startOffset, Long.parseLong(doc.get(field)) ); doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, Long.parseLong(doc.get(field)) ); @@ -263,7 +263,7 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", noDocs-count, sd.length ); - Document doc=searcher.doc(sd[0].doc); + org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); assertEquals("First doc", count*distance+startOffset, Long.parseLong(doc.get(field)) ); doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (noDocs-1)*distance+startOffset, Long.parseLong(doc.get(field)) ); Index: lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java (working copy) @@ -18,8 +18,8 @@ */ import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.IndexReader; @@ -47,16 +47,11 @@ Document doc3 = new Document(); Document doc4 = new Document(); Document doc5 = new Document(); - doc1.add(newField("body", "blueberry pie", Field.Store.YES, - Field.Index.ANALYZED)); - doc2.add(newField("body", "blueberry strudel", Field.Store.YES, - Field.Index.ANALYZED)); - doc3.add(newField("body", "blueberry pizza", Field.Store.YES, - Field.Index.ANALYZED)); - doc4.add(newField("body", "blueberry chewing gum", Field.Store.YES, - Field.Index.ANALYZED)); - doc5.add(newField("body", "piccadilly circus", Field.Store.YES, - Field.Index.ANALYZED)); + doc1.add(newField("body", "blueberry pie", TextField.TYPE_STORED)); + doc2.add(newField("body", "blueberry strudel", TextField.TYPE_STORED)); + doc3.add(newField("body", "blueberry pizza", TextField.TYPE_STORED)); + doc4.add(newField("body", "blueberry chewing gum", TextField.TYPE_STORED)); + doc5.add(newField("body", "piccadilly circus", TextField.TYPE_STORED)); writer.addDocument(doc1); writer.addDocument(doc2); writer.addDocument(doc3); Index: lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java (working copy) @@ -20,7 +20,7 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.analysis.*; import org.apache.lucene.analysis.tokenattributes.*; -import org.apache.lucene.document.*; +import org.apache.lucene.document2.*; import org.apache.lucene.index.*; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.queryParser.QueryParser; @@ -69,19 +69,19 @@ RandomIndexWriter writer = new RandomIndexWriter(random, directory, analyzer); Document doc = new Document(); - doc.add(newField("field", "one two three four five", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(newField("repeated", "this is a repeated field - first part", Field.Store.YES, Field.Index.ANALYZED)); - Fieldable repeatedField = newField("repeated", "second part of a repeated field", Field.Store.YES, Field.Index.ANALYZED); + doc.add(newField("field", "one two three four five", TextField.TYPE_STORED)); + doc.add(newField("repeated", "this is a repeated field - first part", TextField.TYPE_STORED)); + IndexableField repeatedField = newField("repeated", "second part of a repeated field", TextField.TYPE_STORED); doc.add(repeatedField); - doc.add(newField("palindrome", "one two three two one", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("palindrome", "one two three two one", TextField.TYPE_STORED)); writer.addDocument(doc); doc = new Document(); - doc.add(newField("nonexist", "phrase exist notexist exist found", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("nonexist", "phrase exist notexist exist found", TextField.TYPE_STORED)); writer.addDocument(doc); doc = new Document(); - doc.add(newField("nonexist", "phrase exist notexist exist found", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("nonexist", "phrase exist notexist exist found", TextField.TYPE_STORED)); writer.addDocument(doc); reader = writer.getReader(); @@ -224,7 +224,7 @@ RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig( Version.LUCENE_40, stopAnalyzer)); Document doc = new Document(); - doc.add(newField("field", "the stop words are here", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("field", "the stop words are here", TextField.TYPE_STORED)); writer.addDocument(doc); IndexReader reader = writer.getReader(); writer.close(); @@ -259,12 +259,12 @@ RandomIndexWriter writer = new RandomIndexWriter(random, directory); Document doc = new Document(); - doc.add(newField("source", "marketing info", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("source", "marketing info", TextField.TYPE_STORED)); writer.addDocument(doc); doc = new Document(); - doc.add(newField("contents", "foobar", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(newField("source", "marketing info", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("contents", "foobar", TextField.TYPE_STORED)); + doc.add(newField("source", "marketing info", TextField.TYPE_STORED)); writer.addDocument(doc); IndexReader reader = writer.getReader(); @@ -295,15 +295,15 @@ writer = new RandomIndexWriter(random, directory, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE)); doc = new Document(); - doc.add(newField("contents", "map entry woo", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("contents", "map entry woo", TextField.TYPE_STORED)); writer.addDocument(doc); doc = new Document(); - doc.add(newField("contents", "woo map entry", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("contents", "woo map entry", TextField.TYPE_STORED)); writer.addDocument(doc); doc = new Document(); - doc.add(newField("contents", "map foobarword entry woo", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("contents", "map foobarword entry woo", TextField.TYPE_STORED)); writer.addDocument(doc); reader = writer.getReader(); @@ -346,15 +346,15 @@ RandomIndexWriter writer = new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); Document doc = new Document(); - doc.add(newField("field", "foo firstname lastname foo", Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("field", "foo firstname lastname foo", TextField.TYPE_STORED)); writer.addDocument(doc); Document doc2 = new Document(); - doc2.add(newField("field", "foo firstname zzz lastname foo", Field.Store.YES, Field.Index.ANALYZED)); + doc2.add(newField("field", "foo firstname zzz lastname foo", TextField.TYPE_STORED)); writer.addDocument(doc2); Document doc3 = new Document(); - doc3.add(newField("field", "foo firstname zzz yyy lastname foo", Field.Store.YES, Field.Index.ANALYZED)); + doc3.add(newField("field", "foo firstname zzz yyy lastname foo", TextField.TYPE_STORED)); writer.addDocument(doc3); IndexReader reader = writer.getReader(); @@ -609,7 +609,7 @@ RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).setMergePolicy(newLogMergePolicy())); List> docs = new ArrayList>(); Document d = new Document(); - Field f = newField("f", "", Field.Store.NO, Field.Index.ANALYZED); + Field f = newField("f", "", TextField.TYPE_UNSTORED); d.add(f); Random r = random; Index: lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java (working copy) @@ -29,8 +29,8 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.IndexReader; @@ -89,7 +89,7 @@ Directory store = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, store, analyzer); Document d = new Document(); - d.add(newField("field", "bogus", Field.Store.YES, Field.Index.ANALYZED)); + d.add(newField("field", "bogus", TextField.TYPE_STORED)); writer.addDocument(d); IndexReader reader = writer.getReader(); writer.close(); @@ -239,7 +239,7 @@ Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, dir, new MockPayloadAnalyzer()); Document doc = new Document(); - doc.add(new Field("content", new StringReader( + doc.add(new TextField("content", new StringReader( "a a b c d e a f g h i j a b k k"))); writer.addDocument(doc); Index: lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java (working copy) @@ -22,8 +22,8 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.StringField; /** * Tests {@link PrefixFilter} class. @@ -40,7 +40,7 @@ RandomIndexWriter writer = new RandomIndexWriter(random, directory); for (int i = 0; i < categories.length; i++) { Document doc = new Document(); - doc.add(newField("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED)); + doc.add(newField("category", categories[i], StringField.TYPE_STORED)); writer.addDocument(doc); } IndexReader reader = writer.getReader(); Index: lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java (working copy) @@ -18,8 +18,9 @@ */ import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -50,8 +51,7 @@ RandomIndexWriter writer = new RandomIndexWriter(random, directory); Document doc = new Document(); - Field field = newField(FIELD, "meaninglessnames", Field.Store.NO, - Field.Index.NOT_ANALYZED_NO_NORMS); + Field field = newField(FIELD, "meaninglessnames", StringField.TYPE_UNSTORED); doc.add(field); for (int i = 0; i < 5137; ++i) { Index: lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java (working copy) @@ -24,8 +24,8 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.StringField; /** * Tests {@link PrefixQuery} class. @@ -41,7 +41,7 @@ RandomIndexWriter writer = new RandomIndexWriter(random, directory); for (int i = 0; i < categories.length; i++) { Document doc = new Document(); - doc.add(newField("category", categories[i], Field.Store.YES, Field.Index.NOT_ANALYZED)); + doc.add(newField("category", categories[i], StringField.TYPE_STORED)); writer.addDocument(doc); } IndexReader reader = writer.getReader(); Index: lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java (working copy) @@ -21,8 +21,9 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; @@ -53,7 +54,7 @@ .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000))); Document doc = new Document(); - Field field = newField("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED); + Field field = newField("field", "", StringField.TYPE_UNSTORED); doc.add(field); // we generate aweful prefixes: good for testing. Index: lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java (working copy) @@ -17,9 +17,8 @@ * limitations under the License. */ -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -33,7 +32,7 @@ Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, dir); Document doc = new Document(); - doc.add(newField("field", "value", Store.NO, Index.ANALYZED)); + doc.add(newField("field", "value", TextField.TYPE_UNSTORED)); writer.addDocument(doc); IndexReader reader = writer.getReader(); writer.close(); Index: lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java (working copy) @@ -20,8 +20,8 @@ import java.io.IOException; import java.util.Arrays; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -50,7 +50,7 @@ Document doc = new Document(); doc.add(newField(FN, "the quick brown fox jumps over the lazy ??? dog 493432 49344", - Field.Store.NO, Field.Index.ANALYZED)); + TextField.TYPE_UNSTORED)); writer.addDocument(doc); reader = writer.getReader(); writer.close(); Index: lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java (working copy) @@ -23,8 +23,10 @@ import java.util.Locale; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -51,7 +53,10 @@ .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000))); Document doc = new Document(); - Field field = newField("field", "", Field.Store.NO, Field.Index.ANALYZED_NO_NORMS); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setOmitNorms(true); + Field field = newField("field", "", customType); doc.add(field); NumberFormat df = new DecimalFormat("000", new DecimalFormatSymbols(Locale.ENGLISH)); Index: lucene/src/test/org/apache/lucene/search/TestRegexpRandom2.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestRegexpRandom2.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestRegexpRandom2.java (working copy) @@ -24,8 +24,9 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.Term; @@ -63,7 +64,7 @@ newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.KEYWORD, false)) .setMaxBufferedDocs(_TestUtil.nextInt(random, 50, 1000))); Document doc = new Document(); - Field field = newField("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED); + Field field = newField("field", "", StringField.TYPE_UNSTORED); doc.add(field); List terms = new ArrayList(); int num = atLeast(200); Index: lucene/src/test/org/apache/lucene/search/TestScorerPerf.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestScorerPerf.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/TestScorerPerf.java (working copy) @@ -12,8 +12,8 @@ import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.StringField; /** * Licensed to the Apache Software Foundation (ASF) under one or more @@ -64,7 +64,7 @@ Document d = new Document(); for (int j=0; j 1) { tester.values[i] = 10 + random.nextInt( 20 ); // get some field overlap - doc.add(newField(tester.field, String.valueOf(tester.values[i]), - Field.Store.NO, Field.Index.NOT_ANALYZED )); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setTokenized(false); + doc.add(newField(tester.field, String.valueOf(tester.values[i]), customType)); } } writer.addDocument(doc); Index: lucene/src/test/org/apache/lucene/search/function/FunctionTestSetup.java =================================================================== --- lucene/src/test/org/apache/lucene/search/function/FunctionTestSetup.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/function/FunctionTestSetup.java (working copy) @@ -19,9 +19,10 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.Fieldable; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.store.Directory; @@ -116,23 +117,26 @@ private static void addDoc(RandomIndexWriter iw, int i) throws Exception { Document d = new Document(); - Fieldable f; + Field f; int scoreAndID = i + 1; - f = newField(ID_FIELD, id2String(scoreAndID), Field.Store.YES, Field.Index.NOT_ANALYZED); // for debug purposes - f.setOmitNorms(true); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + customType.setTokenized(false); + customType.setOmitNorms(true); + + f = newField(ID_FIELD, id2String(scoreAndID), customType); // for debug purposes d.add(f); - f = newField(TEXT_FIELD, "text of doc" + scoreAndID + textLine(i), Field.Store.NO, Field.Index.ANALYZED); // for regular search - f.setOmitNorms(true); + FieldType customType2 = new FieldType(TextField.TYPE_UNSTORED); + customType2.setOmitNorms(true); + f = newField(TEXT_FIELD, "text of doc" + scoreAndID + textLine(i), customType2); // for regular search d.add(f); - f = newField(INT_FIELD, "" + scoreAndID, Field.Store.NO, Field.Index.NOT_ANALYZED); // for function scoring - f.setOmitNorms(true); + f = newField(INT_FIELD, "" + scoreAndID, customType); // for function scoring d.add(f); - f = newField(FLOAT_FIELD, scoreAndID + ".000", Field.Store.NO, Field.Index.NOT_ANALYZED); // for function scoring - f.setOmitNorms(true); + f = newField(FLOAT_FIELD, scoreAndID + ".000", customType); // for function scoring d.add(f); iw.addDocument(d); Index: lucene/src/test/org/apache/lucene/search/function/TestValueSource.java =================================================================== --- lucene/src/test/org/apache/lucene/search/function/TestValueSource.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/function/TestValueSource.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.analysis.*; import org.apache.lucene.index.*; import org.apache.lucene.index.IndexReader.AtomicReaderContext; -import org.apache.lucene.document.*; +import org.apache.lucene.document2.*; public class TestValueSource extends LuceneTestCase { @@ -32,7 +32,9 @@ IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); ((LogMergePolicy) w.getConfig().getMergePolicy()).setMergeFactor(10); Document doc = new Document(); - Field f = newField("field", "", Field.Store.NO, Field.Index.NOT_ANALYZED); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setTokenized(false); + Field f = newField("field", "", customType); doc.add(f); for(int i=0;i<17;i++) { Index: lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (working copy) @@ -24,8 +24,9 @@ import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.FieldInvertState; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Payload; @@ -112,9 +113,11 @@ //writer.infoStream = System.out; for (int i = 0; i < 1000; i++) { Document doc = new Document(); - doc.add(newField("field", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + doc.add(newField("field", English.intToEnglish(i), customType)); String txt = English.intToEnglish(i) +' '+English.intToEnglish(i+1); - doc.add(newField("field2", txt, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("field2", txt, customType)); writer.addDocument(doc); } reader = writer.getReader(); Index: lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java (working copy) @@ -43,8 +43,10 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; +import org.apache.lucene.document2.Field; import java.io.Reader; import java.io.IOException; @@ -115,13 +117,15 @@ newIndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer()) .setSimilarityProvider(similarityProvider).setMergePolicy(newLogMergePolicy())); //writer.infoStream = System.out; + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); for (int i = 0; i < 1000; i++) { Document doc = new Document(); - Field noPayloadField = newField(PayloadHelper.NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED); + Field noPayloadField = newField(PayloadHelper.NO_PAYLOAD_FIELD, English.intToEnglish(i), customType); //noPayloadField.setBoost(0); doc.add(noPayloadField); - doc.add(newField("field", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED)); - doc.add(newField("multiField", English.intToEnglish(i) + " " + English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("field", English.intToEnglish(i), customType)); + doc.add(newField("multiField", English.intToEnglish(i) + " " + English.intToEnglish(i), customType)); writer.addDocument(doc); } reader = writer.getReader(); Index: lucene/src/test/org/apache/lucene/search/spans/TestBasics.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestBasics.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/spans/TestBasics.java (working copy) @@ -31,8 +31,9 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Payload; import org.apache.lucene.index.RandomIndexWriter; @@ -120,7 +121,9 @@ //writer.infoStream = System.out; for (int i = 0; i < 2000; i++) { Document doc = new Document(); - doc.add(newField("field", English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + doc.add(newField("field", English.intToEnglish(i), customType)); writer.addDocument(doc); } reader = writer.getReader(); Index: lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java (working copy) @@ -21,8 +21,9 @@ import java.util.Set; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -46,7 +47,7 @@ } protected static Field field(String name, String value) { - return newField(name, value, Field.Store.NO, Field.Index.ANALYZED); + return newField(name, value, TextField.TYPE_UNSTORED); } protected static IndexSearcher searcher; Index: lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (working copy) @@ -18,8 +18,8 @@ */ import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexReader.ReaderContext; @@ -60,7 +60,7 @@ RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); for (int i = 0; i < docFields.length; i++) { Document doc = new Document(); - doc.add(newField(FIELD, docFields[i], Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField(FIELD, docFields[i], TextField.TYPE_UNSTORED)); writer.addDocument(doc); } reader = writer.getReader(); Index: lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (working copy) @@ -30,8 +30,9 @@ import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.IndexReader; @@ -113,8 +114,9 @@ newIndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer()).setSimilarityProvider(similarity)); Document doc = new Document(); - doc.add(newField(PayloadHelper.FIELD, "one two three one four three", - Field.Store.YES, Field.Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + doc.add(newField(PayloadHelper.FIELD, "one two three one four three", customType)); writer.addDocument(doc); IndexReader reader = writer.getReader(); writer.close(); @@ -261,7 +263,7 @@ newIndexWriterConfig(TEST_VERSION_CURRENT, new TestPayloadAnalyzer())); Document doc = new Document(); - doc.add(new Field("content", new StringReader("a b c d e f g h i j a k"))); + doc.add(new TextField("content", new StringReader("a b c d e f g h i j a k"))); writer.addDocument(doc); IndexReader reader = writer.getReader(); @@ -300,7 +302,7 @@ newIndexWriterConfig(TEST_VERSION_CURRENT, new TestPayloadAnalyzer())); Document doc = new Document(); - doc.add(new Field("content", new StringReader("a b a d k f a h i k a k"))); + doc.add(new TextField("content", new StringReader("a b a d k f a h i k a k"))); writer.addDocument(doc); IndexReader reader = writer.getReader(); IndexSearcher is = newSearcher(reader); @@ -337,7 +339,7 @@ newIndexWriterConfig(TEST_VERSION_CURRENT, new TestPayloadAnalyzer())); Document doc = new Document(); - doc.add(new Field("content", new StringReader("j k a l f k k p a t a k l k t a"))); + doc.add(new TextField("content", new StringReader("j k a l f k k p a t a k l k t a"))); writer.addDocument(doc); IndexReader reader = writer.getReader(); IndexSearcher is = newSearcher(reader); @@ -379,7 +381,9 @@ newIndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer()).setSimilarityProvider(similarity)); Document doc = new Document(); - doc.add(newField(PayloadHelper.FIELD,"xx rr yy mm pp", Field.Store.YES, Field.Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + doc.add(newField(PayloadHelper.FIELD,"xx rr yy mm pp", customType)); writer.addDocument(doc); IndexReader reader = writer.getReader(); @@ -440,10 +444,12 @@ newIndexWriterConfig(TEST_VERSION_CURRENT, new PayloadAnalyzer()).setSimilarityProvider(similarity)); Document doc = null; + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); for(int i = 0; i < docs.length; i++) { doc = new Document(); String docText = docs[i]; - doc.add(newField(PayloadHelper.FIELD,docText, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField(PayloadHelper.FIELD,docText, customType)); writer.addDocument(doc); } Index: lucene/src/test/org/apache/lucene/search/spans/TestSpanFirstQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestSpanFirstQuery.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/spans/TestSpanFirstQuery.java (working copy) @@ -20,8 +20,8 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -41,10 +41,10 @@ RandomIndexWriter writer = new RandomIndexWriter(random, dir, analyzer); Document doc = new Document(); - doc.add(newField("field", "the quick brown fox", Field.Index.ANALYZED)); + doc.add(newField("field", "the quick brown fox", TextField.TYPE_UNSTORED)); writer.addDocument(doc); Document doc2 = new Document(); - doc2.add(newField("field", "quick brown fox", Field.Index.ANALYZED)); + doc2.add(newField("field", "quick brown fox", TextField.TYPE_UNSTORED)); writer.addDocument(doc2); IndexReader reader = writer.getReader(); Index: lucene/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java (working copy) @@ -17,8 +17,9 @@ * limitations under the License. */ -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.TextField; +import org.apache.lucene.document2.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -42,7 +43,7 @@ directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random, directory); Document doc = new Document(); - Field field = newField("field", "", Field.Store.NO, Field.Index.ANALYZED); + Field field = newField("field", "", TextField.TYPE_UNSTORED); doc.add(field); field.setValue("quick brown fox"); Index: lucene/src/test/org/apache/lucene/search/spans/TestSpans.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestSpans.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/spans/TestSpans.java (working copy) @@ -37,8 +37,10 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.StringField; +import org.apache.lucene.document2.TextField; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.ReaderUtil; @@ -56,9 +58,11 @@ super.setUp(); directory = newDirectory(); RandomIndexWriter writer= new RandomIndexWriter(random, directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); for (int i = 0; i < docFields.length; i++) { Document doc = new Document(); - doc.add(newField(field, docFields[i], Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField(field, docFields[i], customType)); writer.addDocument(doc); } reader = writer.getReader(); @@ -452,8 +456,12 @@ // LUCENE-1404 private void addDoc(IndexWriter writer, String id, String text) throws IOException { final Document doc = new Document(); - doc.add( newField("id", id, Field.Store.YES, Field.Index.NOT_ANALYZED) ); - doc.add( newField("text", text, Field.Store.YES, Field.Index.ANALYZED) ); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + FieldType customType2 = new FieldType(StringField.TYPE_UNSTORED); + customType2.setStored(true); + doc.add( newField("id", id, customType2) ); + doc.add( newField("text", text, customType) ); writer.addDocument(doc); } Index: lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java (working copy) @@ -24,8 +24,10 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenFilter; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.StringField; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -90,10 +92,12 @@ final String text) throws IOException { final Document document = new Document(); - document.add(newField(FIELD_ID, id, Field.Store.YES, - Field.Index.NOT_ANALYZED)); - document.add(newField(FIELD_TEXT, text, Field.Store.YES, - Field.Index.ANALYZED)); + FieldType customType = new FieldType(TextField.TYPE_UNSTORED); + customType.setStored(true); + FieldType customType2 = new FieldType(StringField.TYPE_UNSTORED); + customType2.setStored(true); + document.add(newField(FIELD_ID, id, customType2)); + document.add(newField(FIELD_TEXT, text, customType)); writer.addDocument(document); } @@ -161,7 +165,7 @@ int id = topdocs.scoreDocs[i].doc; float score = topdocs.scoreDocs[i].score; - Document doc = s.doc(id); + org.apache.lucene.document.Document doc = s.doc(id); assertEquals(expectedIds[i], doc.get(FIELD_ID)); boolean scoreEq = Math.abs(expectedScores[i] - score) < tolerance; if (!scoreEq) { Index: lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java =================================================================== --- lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java (working copy) @@ -27,8 +27,8 @@ import java.util.Random; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -251,8 +251,8 @@ ); for(int i=0;i<37;i++) { Document doc = new Document(); - doc.add(newField("content", "aaa bbb ccc ddd" + i, Field.Store.YES, Field.Index.ANALYZED)); - doc.add(newField("id", "" + i, Field.Store.YES, Field.Index.ANALYZED)); + doc.add(newField("content", "aaa bbb ccc ddd" + i, TextField.TYPE_STORED)); + doc.add(newField("id", "" + i, TextField.TYPE_STORED)); writer.addDocument(doc); } writer.close(); Index: lucene/src/test/org/apache/lucene/store/TestLockFactory.java =================================================================== --- lucene/src/test/org/apache/lucene/store/TestLockFactory.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/store/TestLockFactory.java (working copy) @@ -24,8 +24,8 @@ import java.util.Map; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; @@ -415,7 +415,7 @@ private void addDoc(IndexWriter writer) throws IOException { Document doc = new Document(); - doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED)); + doc.add(newField("content", "aaa", TextField.TYPE_UNSTORED)); writer.addDocument(doc); } } Index: lucene/src/test/org/apache/lucene/store/TestMultiMMap.java =================================================================== --- lucene/src/test/org/apache/lucene/store/TestMultiMMap.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/store/TestMultiMMap.java (working copy) @@ -21,8 +21,9 @@ import java.util.Random; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.util.LuceneTestCase; @@ -62,8 +63,8 @@ dir.setUseUnmap(true); RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy())); Document doc = new Document(); - Field docid = newField("docid", "0", Field.Store.YES, Field.Index.NOT_ANALYZED); - Field junk = newField("junk", "", Field.Store.YES, Field.Index.NOT_ANALYZED); + Field docid = newField("docid", "0", StringField.TYPE_STORED); + Field junk = newField("junk", "", StringField.TYPE_STORED); doc.add(docid); doc.add(junk); Index: lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java =================================================================== --- lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java (revision 1143083) +++ lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java (working copy) @@ -26,8 +26,9 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -59,7 +60,7 @@ Document doc = null; for (int i = 0; i < docsToAdd; i++) { doc = new Document(); - doc.add(newField("content", English.intToEnglish(i).trim(), Field.Store.YES, Field.Index.NOT_ANALYZED)); + doc.add(newField("content", English.intToEnglish(i).trim(), StringField.TYPE_STORED)); writer.addDocument(doc); } assertEquals(docsToAdd, writer.maxDoc()); @@ -87,7 +88,7 @@ // search for all documents for (int i = 0; i < docsToAdd; i++) { - Document doc = searcher.doc(i); + org.apache.lucene.document.Document doc = searcher.doc(i); assertTrue(doc.getField("content") != null); } @@ -119,7 +120,7 @@ public void run() { for (int j=1; j