Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (revision 1150855) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (working copy) @@ -185,7 +185,7 @@ Highlighter highlighter = new Highlighter(scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -234,7 +234,7 @@ Highlighter highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -256,7 +256,7 @@ highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -278,7 +278,7 @@ highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -303,7 +303,7 @@ Highlighter highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -327,7 +327,7 @@ Highlighter highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -352,7 +352,7 @@ Highlighter highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(NUMERIC_FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(NUMERIC_FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -375,7 +375,7 @@ highlighter.setTextFragmenter(new SimpleFragmenter(40)); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, @@ -393,7 +393,7 @@ int maxNumFragmentsRequired = 2; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); QueryScorer scorer = new QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(this, scorer); @@ -418,7 +418,7 @@ Highlighter highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer, 5)); @@ -437,7 +437,7 @@ highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer, 20)); @@ -459,7 +459,7 @@ Highlighter highlighter = new Highlighter(this,scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME,new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -530,7 +530,7 @@ highlighter.setTextFragmenter(new SimpleFragmenter(40)); int maxNumFragmentsRequired = 2; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, @@ -688,7 +688,7 @@ hits = searcher.search(query, null, 1000); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); int maxNumFragmentsRequired = 2; String fragmentSeparator = "..."; QueryScorer scorer; @@ -716,7 +716,7 @@ numHighlights = 0; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); int maxNumFragmentsRequired = 2; String fragmentSeparator = "..."; QueryScorer scorer; @@ -744,7 +744,7 @@ numHighlights = 0; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); int maxNumFragmentsRequired = 2; String fragmentSeparator = "..."; QueryScorer scorer; @@ -908,7 +908,7 @@ doSearching("Kennedy"); numHighlights = 0; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, @@ -922,7 +922,7 @@ numHighlights = 0; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this); @@ -933,7 +933,7 @@ numHighlights = 0; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, @@ -1041,7 +1041,7 @@ // new Highlighter(HighlighterTest.this, new QueryTermScorer(query)); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this); @@ -1064,7 +1064,7 @@ doSearching("Kennedy"); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, @@ -1216,7 +1216,7 @@ int maxNumFragmentsRequired = 3; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this, false); @@ -1596,7 +1596,7 @@ * QueryFragmentScorer(query)); * * for (int i = 0; i < hits.totalHits; i++) { String text = - * searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream + * searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream * tokenStream=bigramAnalyzer.tokenStream(FIELD_NAME,new StringReader(text)); * String highlightedText = highlighter.getBestFragment(tokenStream,text); * System.out.println(highlightedText); } } @@ -1631,7 +1631,7 @@ public void assertExpectedHighlightCount(final int maxNumFragmentsRequired, final int expectedHighlights) throws Exception { for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); QueryScorer scorer = new QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(this, scorer); @@ -1864,7 +1864,7 @@ throws Exception { for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); int maxNumFragmentsRequired = 2; String fragmentSeparator = "..."; Scorer scorer = null; Index: lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java =================================================================== --- lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java (revision 1150855) +++ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java (working copy) @@ -189,7 +189,7 @@ } else { InstantiatedDocument document = new InstantiatedDocument(); // copy stored fields from source reader - org.apache.lucene.document.Document sourceDocument = sourceIndexReader.document(i); + Document sourceDocument = sourceIndexReader.document2(i); for (IndexableField field : sourceDocument) { if (fields == null || fields.contains(field.name())) { document.getDocument2().add(field); Index: lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java =================================================================== --- lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (revision 1150855) +++ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (working copy) @@ -29,7 +29,7 @@ import java.util.Set; import java.util.Comparator; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.*; import org.apache.lucene.index.IndexReader.ReaderContext; import org.apache.lucene.store.Directory; @@ -272,12 +272,7 @@ */ @Override - public Document document(int n) throws IOException { - return getIndex().getDocumentsByNumber()[n].getDocument(); - } - - @Override - public org.apache.lucene.document2.Document document2(int n) throws IOException { + public Document document2(int n) throws IOException { return getIndex().getDocumentsByNumber()[n].getDocument2(); } Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribFieldsReader.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribFieldsReader.java (revision 1150855) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribFieldsReader.java (working copy) @@ -22,14 +22,13 @@ import java.util.*; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.FieldSelector; -import org.apache.lucene.document.FieldSelectorResult; -import org.apache.lucene.document.FieldSelectorVisitor; -import org.apache.lucene.document.Fieldable; -import org.apache.lucene.document.LoadFirstFieldSelector; -import org.apache.lucene.document.SetBasedFieldSelector; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldSelector; +import org.apache.lucene.document2.FieldSelectorResult; +import org.apache.lucene.document2.FieldSelectorVisitor; +import org.apache.lucene.document2.LoadFirstFieldSelector; +import org.apache.lucene.document2.SetBasedFieldSelector; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; @@ -87,28 +86,28 @@ SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(loadFieldNames, lazyFieldNames); Document doc = getDocument(reader, 0, fieldSelector); assertTrue("doc is null and it shouldn't be", doc != null); - Fieldable field = doc.getFieldable(DocHelper.LAZY_FIELD_KEY); + IndexableField field = doc.getField(DocHelper.LAZY_FIELD_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("field is not lazy and it should be", field.isLazy()); + assertTrue("field is not lazy and it should be", ((Field) field).lazy()); String value = field.stringValue(); assertTrue("value is null and it shouldn't be", value != null); assertTrue(value + " is not equal to " + DocHelper.LAZY_FIELD_TEXT, value.equals(DocHelper.LAZY_FIELD_TEXT) == true); assertTrue("calling stringValue() twice should give same reference", field.stringValue() == field.stringValue()); - field = doc.getFieldable(DocHelper.TEXT_FIELD_1_KEY); + field = doc.getField(DocHelper.TEXT_FIELD_1_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("Field is lazy and it should not be", field.isLazy() == false); - field = doc.getFieldable(DocHelper.TEXT_FIELD_UTF1_KEY); + assertTrue("Field is lazy and it should not be", ((Field) field).lazy() == false); + field = doc.getField(DocHelper.TEXT_FIELD_UTF1_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("Field is lazy and it should not be", field.isLazy() == false); + assertTrue("Field is lazy and it should not be", ((Field) field).lazy() == false); assertTrue(field.stringValue() + " is not equal to " + DocHelper.FIELD_UTF1_TEXT, field.stringValue().equals(DocHelper.FIELD_UTF1_TEXT) == true); - field = doc.getFieldable(DocHelper.TEXT_FIELD_UTF2_KEY); + field = doc.getField(DocHelper.TEXT_FIELD_UTF2_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("Field is lazy and it should not be", field.isLazy() == true); + assertTrue("Field is lazy and it should not be", ((Field) field).lazy() == true); assertTrue(field.stringValue() + " is not equal to " + DocHelper.FIELD_UTF2_TEXT, field.stringValue().equals(DocHelper.FIELD_UTF2_TEXT) == true); - field = doc.getFieldable(DocHelper.LAZY_FIELD_BINARY_KEY); + field = doc.getField(DocHelper.LAZY_FIELD_BINARY_KEY); assertTrue("field is null and it shouldn't be", field != null); assertTrue("stringValue isn't null for lazy binary field", field.stringValue() == null); @@ -152,32 +151,32 @@ Document doc = getDocument(reader, 0, fieldSelector); assertTrue("doc is null and it shouldn't be", doc != null); - Fieldable field = doc.getFieldable(DocHelper.LAZY_FIELD_KEY); + IndexableField field = doc.getField(DocHelper.LAZY_FIELD_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("field is not lazy and it should be", field.isLazy()); + assertTrue("field is not lazy and it should be", ((Field) field).lazy()); String value = field.stringValue(); assertTrue("value is null and it shouldn't be", value != null); assertTrue(value + " is not equal to " + DocHelper.LAZY_FIELD_TEXT, value.equals(DocHelper.LAZY_FIELD_TEXT) == true); assertTrue("calling stringValue() twice should give different references", field.stringValue() != field.stringValue()); - field = doc.getFieldable(DocHelper.TEXT_FIELD_1_KEY); + field = doc.getField(DocHelper.TEXT_FIELD_1_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("Field is lazy and it should not be", field.isLazy() == false); + assertTrue("Field is lazy and it should not be", ((Field) field).lazy() == false); assertTrue("calling stringValue() twice should give same reference", field.stringValue() == field.stringValue()); - field = doc.getFieldable(DocHelper.TEXT_FIELD_UTF1_KEY); + field = doc.getField(DocHelper.TEXT_FIELD_UTF1_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("Field is lazy and it should not be", field.isLazy() == false); + assertTrue("Field is lazy and it should not be", ((Field) field).lazy() == false); assertTrue(field.stringValue() + " is not equal to " + DocHelper.FIELD_UTF1_TEXT, field.stringValue().equals(DocHelper.FIELD_UTF1_TEXT) == true); assertTrue("calling stringValue() twice should give same reference", field.stringValue() == field.stringValue()); - field = doc.getFieldable(DocHelper.TEXT_FIELD_UTF2_KEY); + field = doc.getField(DocHelper.TEXT_FIELD_UTF2_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("Field is lazy and it should not be", field.isLazy() == true); + assertTrue("Field is lazy and it should not be", ((Field) field).lazy() == true); assertTrue(field.stringValue() + " is not equal to " + DocHelper.FIELD_UTF2_TEXT, field.stringValue().equals(DocHelper.FIELD_UTF2_TEXT) == true); assertTrue("calling stringValue() twice should give different references", field.stringValue() != field.stringValue()); - field = doc.getFieldable(DocHelper.LAZY_FIELD_BINARY_KEY); + field = doc.getField(DocHelper.LAZY_FIELD_BINARY_KEY); assertTrue("field is null and it shouldn't be", field != null); assertTrue("stringValue isn't null for lazy binary field", field.stringValue() == null); assertTrue("calling binaryValue() twice should give different references", field.binaryValue(null).bytes != field.binaryValue(null).bytes); @@ -200,9 +199,9 @@ Document doc = getDocument(reader, 0, fieldSelector); assertTrue("doc is null and it shouldn't be", doc != null); int count = 0; - List l = doc.getFields(); - for (final Fieldable fieldable : l ) { - Field field = (Field) fieldable; + List l = doc.getFields(); + for (final IndexableField IndexableField : l ) { + Field field = (Field) IndexableField; assertTrue("field is null and it shouldn't be", field != null); String sv = field.stringValue(); @@ -244,11 +243,11 @@ IndexReader reader = IndexReader.open(tmpDir); Document doc; - doc = reader.document(0);//Load all of them + doc = reader.document2(0);//Load all of them assertTrue("doc is null and it shouldn't be", doc != null); - Fieldable field = doc.getFieldable(DocHelper.LARGE_LAZY_FIELD_KEY); + IndexableField field = doc.getField(DocHelper.LARGE_LAZY_FIELD_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("field is lazy", field.isLazy() == false); + assertTrue("field is lazy", ((Field) field).lazy() == false); String value; long start; long finish; @@ -265,8 +264,8 @@ System.gc(); reader = IndexReader.open(tmpDir); doc = getDocument(reader, 0, fieldSelector); - field = doc.getFieldable(DocHelper.LARGE_LAZY_FIELD_KEY); - assertTrue("field is not lazy", field.isLazy() == true); + field = doc.getField(DocHelper.LARGE_LAZY_FIELD_KEY); + assertTrue("field is not lazy", ((Field) field).lazy() == true); start = System.currentTimeMillis(); //On my machine this took around 50 - 70ms value = field.stringValue(); @@ -298,12 +297,12 @@ return FieldSelectorResult.NO_LOAD; } }); - Fieldable f1 = doc.getFieldable(DocHelper.TEXT_FIELD_1_KEY); - Fieldable f3 = doc.getFieldable(DocHelper.TEXT_FIELD_3_KEY); - Fieldable fb = doc.getFieldable(DocHelper.LAZY_FIELD_BINARY_KEY); - assertTrue(f1.isBinary()); - assertTrue(!f3.isBinary()); - assertTrue(fb.isBinary()); + IndexableField f1 = doc.getField(DocHelper.TEXT_FIELD_1_KEY); + IndexableField f3 = doc.getField(DocHelper.TEXT_FIELD_3_KEY); + IndexableField fb = doc.getField(DocHelper.LAZY_FIELD_BINARY_KEY); + assertTrue(f1.binaryValue(null)!=null); + assertTrue(f3.binaryValue(null)==null); + assertTrue(fb.binaryValue(null)!=null); assertSizeEquals(2*DocHelper.FIELD_1_TEXT.length(), f1.binaryValue(null).bytes); assertEquals(DocHelper.FIELD_3_TEXT, f3.stringValue()); assertSizeEquals(DocHelper.LAZY_FIELD_BINARY_BYTES.length, fb.binaryValue(null).bytes); Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribIndexReader.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribIndexReader.java (revision 1150855) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribIndexReader.java (working copy) @@ -23,10 +23,10 @@ import java.util.Set; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Fieldable; -import org.apache.lucene.document.FieldSelector; -import org.apache.lucene.document.FieldSelectorVisitor; -import org.apache.lucene.document.SetBasedFieldSelector; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldSelector; +import org.apache.lucene.document2.FieldSelectorVisitor; +import org.apache.lucene.document2.SetBasedFieldSelector; import org.apache.lucene.document2.BinaryField; import org.apache.lucene.document2.Document; import org.apache.lucene.document2.FieldType; @@ -37,7 +37,7 @@ import org.apache.lucene.util.LuceneTestCase; public class TestContribIndexReader extends LuceneTestCase { - private org.apache.lucene.document.Document getDocument(IndexReader ir, int docID, FieldSelector selector) throws IOException { + private Document getDocument(IndexReader ir, int docID, FieldSelector selector) throws IOException { final FieldSelectorVisitor visitor = new FieldSelectorVisitor(selector); ir.document(docID, visitor); return visitor.getDocument(); @@ -137,11 +137,11 @@ writer.addDocument(doc); writer.close(); IndexReader reader = IndexReader.open(dir, false); - org.apache.lucene.document.Document doc2 = reader.document(reader.maxDoc() - 1); - org.apache.lucene.document.Field[] fields = doc2.getFields("bin1"); + Document doc2 = reader.document2(reader.maxDoc() - 1); + IndexableField[] fields = doc2.getFields("bin1"); assertNotNull(fields); assertEquals(1, fields.length); - org.apache.lucene.document.Field b1 = fields[0]; + Field b1 = (Field) fields[0]; assertTrue(b1.isBinary()); BytesRef bytesRef = b1.binaryValue(null); assertEquals(bin.length, bytesRef.length); @@ -152,11 +152,11 @@ lazyFields.add("bin1"); FieldSelector sel = new SetBasedFieldSelector(new HashSet(), lazyFields); doc2 = getDocument(reader, reader.maxDoc() - 1, sel); - Fieldable[] fieldables = doc2.getFieldables("bin1"); + IndexableField[] fieldables = doc2.getFields("bin1"); assertNotNull(fieldables); assertEquals(1, fieldables.length); - Fieldable fb1 = fieldables[0]; - assertTrue(fb1.isBinary()); + IndexableField fb1 = fieldables[0]; + assertTrue(fb1.binaryValue(null)!=null); bytesRef = fb1.binaryValue(null); assertEquals(bin.length, bytesRef.bytes.length); assertEquals(bin.length, bytesRef.length); @@ -171,11 +171,11 @@ writer.optimize(); writer.close(); reader = IndexReader.open(dir, false); - doc2 = reader.document(reader.maxDoc() - 1); + doc2 = reader.document2(reader.maxDoc() - 1); fields = doc2.getFields("bin1"); assertNotNull(fields); assertEquals(1, fields.length); - b1 = fields[0]; + b1 = (Field) fields[0]; assertTrue(b1.isBinary()); bytesRef = b1.binaryValue(null); assertEquals(bin.length, bytesRef.length); Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java (revision 1150855) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java (working copy) @@ -77,7 +77,7 @@ IndexReader ir; ir = IndexReader.open(dirs[0], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); // rounding error - org.apache.lucene.document.Document doc = ir.document(0); + Document doc = ir.document2(0); assertEquals("0", doc.get("id")); TermsEnum te = MultiFields.getTerms(ir, "id").iterator(); assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seek(new BytesRef("1"))); @@ -85,7 +85,7 @@ ir.close(); ir = IndexReader.open(dirs[1], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - doc = ir.document(0); + doc = ir.document2(0); assertEquals("1", doc.get("id")); te = MultiFields.getTerms(ir, "id").iterator(); assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seek(new BytesRef("0"))); @@ -94,7 +94,7 @@ ir.close(); ir = IndexReader.open(dirs[2], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - doc = ir.document(0); + doc = ir.document2(0); assertEquals("2", doc.get("id")); te = MultiFields.getTerms(ir, "id").iterator(); @@ -122,19 +122,19 @@ IndexReader ir; ir = IndexReader.open(dirs[0], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - org.apache.lucene.document.Document doc = ir.document(0); + Document doc = ir.document2(0); assertEquals("0", doc.get("id")); int start = ir.numDocs(); ir.close(); ir = IndexReader.open(dirs[1], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - doc = ir.document(0); + doc = ir.document2(0); assertEquals(start + "", doc.get("id")); start += ir.numDocs(); ir.close(); ir = IndexReader.open(dirs[2], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - doc = ir.document(0); + doc = ir.document2(0); assertEquals(start + "", doc.get("id")); // make sure the deleted doc is not here TermsEnum te = MultiFields.getTerms(ir, "id").iterator(); Index: lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java =================================================================== --- lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (revision 1150855) +++ lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (working copy) @@ -33,8 +33,9 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermFreqVector; import org.apache.lucene.search.BooleanClause; @@ -800,7 +801,7 @@ o.println(); ScoreDoc[] scoreDocs = hits.scoreDocs; for (int i = 0; i < Math.min(25, len); i++) { - Document d = searcher.doc(scoreDocs[i].doc); + Document d = searcher.doc2(scoreDocs[i].doc); String summary = d.get( "summary"); o.println("score : " + scoreDocs[i].score); o.println("url : " + d.get("url")); @@ -824,12 +825,12 @@ // field does not store term vector info if (vector == null) { - Document d=ir.document(docNum); - String text[]=d.getValues(fieldName); + Document d=ir.document2(docNum); + IndexableField text[]=d.getFields(fieldName); if(text!=null) { for (int j = 0; j < text.length; j++) { - addTermFrequencies(new StringReader(text[j]), termFreqMap, fieldName); + addTermFrequencies(new StringReader(text[j].stringValue()), termFreqMap, fieldName); } } } Index: lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java (revision 1150855) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java (working copy) @@ -131,13 +131,13 @@ TopDocs hits = searcher.search(query, chain, 1000); numHits = hits.totalHits; assertEquals(MAX / 2, numHits); - assertEquals("bob", searcher.doc(hits.scoreDocs[0].doc).get("owner")); + assertEquals("bob", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); chain = getChainedFilter(new Filter[] {bobFilter}, new int[] {ChainedFilter.ANDNOT}); hits = searcher.search(query, chain, 1000); numHits = hits.totalHits; assertEquals(MAX / 2, numHits); - assertEquals("sue", searcher.doc(hits.scoreDocs[0].doc).get("owner")); + assertEquals("sue", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); } public void testOR() throws Exception { @@ -154,7 +154,7 @@ TopDocs hits = searcher.search(query, chain, 1000); assertEquals("AND matches just bob", MAX / 2, hits.totalHits); - assertEquals("bob", searcher.doc(hits.scoreDocs[0].doc).get("owner")); + assertEquals("bob", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); } public void testXOR() throws Exception { @@ -163,7 +163,7 @@ TopDocs hits = searcher.search(query, chain, 1000); assertEquals("XOR matches sue", MAX / 2, hits.totalHits); - assertEquals("sue", searcher.doc(hits.scoreDocs[0].doc).get("owner")); + assertEquals("sue", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); } public void testANDNOT() throws Exception { @@ -174,7 +174,7 @@ TopDocs hits = searcher.search(query, chain, 1000); assertEquals("ANDNOT matches just bob", MAX / 2, hits.totalHits); - assertEquals("bob", searcher.doc(hits.scoreDocs[0].doc).get("owner")); + assertEquals("bob", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); chain = getChainedFilter( new Filter[]{bobFilter, bobFilter}, @@ -183,7 +183,7 @@ hits = searcher.search(query, chain, 1000); assertEquals("ANDNOT bob ANDNOT bob matches all sues", MAX / 2, hits.totalHits); - assertEquals("sue", searcher.doc(hits.scoreDocs[0].doc).get("owner")); + assertEquals("sue", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); } /* Index: lucene/contrib/queries/src/test/org/apache/lucene/search/TestSlowCollationMethods.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/TestSlowCollationMethods.java (revision 1150855) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/TestSlowCollationMethods.java (working copy) @@ -92,7 +92,7 @@ TopFieldDocs docs = searcher.search(new MatchAllDocsQuery(), null, numDocs, new Sort(sf)); String prev = ""; for (ScoreDoc doc : docs.scoreDocs) { - String value = reader.document(doc.doc).get("field"); + String value = reader.document2(doc.doc).get("field"); assertTrue(collator.compare(value, prev) >= 0); prev = value; } @@ -102,7 +102,7 @@ // positive test TopDocs docs = searcher.search(query, numDocs); for (ScoreDoc doc : docs.scoreDocs) { - String value = reader.document(doc.doc).get("field"); + String value = reader.document2(doc.doc).get("field"); assertTrue(collator.compare(value, startPoint) >= 0); assertTrue(collator.compare(value, endPoint) <= 0); } @@ -113,7 +113,7 @@ bq.add(query, Occur.MUST_NOT); docs = searcher.search(bq, numDocs); for (ScoreDoc doc : docs.scoreDocs) { - String value = reader.document(doc.doc).get("field"); + String value = reader.document2(doc.doc).get("field"); assertTrue(collator.compare(value, startPoint) < 0 || collator.compare(value, endPoint) > 0); } } Index: lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java =================================================================== --- lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java (revision 1150855) +++ lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java (working copy) @@ -30,8 +30,9 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.search.BooleanClause; @@ -147,11 +148,11 @@ @Override public void collect(int doc) throws IOException { - Document d = reader.document(doc); - String[] values = d.getValues( Syns2Index.F_SYN); + Document d = reader.document2(doc); + IndexableField[] values = d.getFields( Syns2Index.F_SYN); for ( int j = 0; j < values.length; j++) { - String syn = values[ j]; + String syn = values[ j].stringValue(); if ( already.add( syn)) // avoid dups of top level words and synonyms { TermQuery tq = new TermQuery( new Term( field, syn)); Index: lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynLookup.java =================================================================== --- lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynLookup.java (revision 1150855) +++ lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynLookup.java (working copy) @@ -29,8 +29,9 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.search.BooleanClause; @@ -90,12 +91,12 @@ ScoreDoc[] hits = searcher.search(query, countingCollector.numHits).scoreDocs; for (int i = 0; i < hits.length; i++) { - Document doc = searcher.doc(hits[i].doc); + Document doc = searcher.doc2(hits[i].doc); - String[] values = doc.getValues(Syns2Index.F_SYN); + IndexableField[] values = doc.getFields(Syns2Index.F_SYN); for (int j = 0; j < values.length; j++) { - System.out.println(values[j]); + System.out.println(values[j].stringValue()); } } @@ -154,11 +155,11 @@ @Override public void collect(int doc) throws IOException { - Document d = reader.document(doc); - String[] values = d.getValues( Syns2Index.F_SYN); + Document d = reader.document2(doc); + IndexableField[] values = d.getFields( Syns2Index.F_SYN); for ( int j = 0; j < values.length; j++) { - String syn = values[ j]; + String syn = values[ j].stringValue(); if ( already.add( syn)) { TermQuery tq = new TermQuery( new Term( field, syn)); Index: lucene/src/java/org/apache/lucene/document2/CompressionTools.java =================================================================== --- lucene/src/java/org/apache/lucene/document2/CompressionTools.java (revision 0) +++ lucene/src/java/org/apache/lucene/document2/CompressionTools.java (revision 0) @@ -0,0 +1,127 @@ +package org.apache.lucene.document2; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.zip.Deflater; +import java.util.zip.Inflater; +import java.util.zip.DataFormatException; +import java.io.ByteArrayOutputStream; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.CharsRef; +import org.apache.lucene.util.UnicodeUtil; + +/** Simple utility class providing static methods to + * compress and decompress binary data for stored fields. + * This class uses java.util.zip.Deflater and Inflater + * classes to compress and decompress. + */ + +public class CompressionTools { + + // Export only static methods + private CompressionTools() {} + + /** Compresses the specified byte range using the + * specified compressionLevel (constants are defined in + * java.util.zip.Deflater). */ + public static byte[] compress(byte[] value, int offset, int length, int compressionLevel) { + + /* Create an expandable byte array to hold the compressed data. + * You cannot use an array that's the same size as the orginal because + * there is no guarantee that the compressed data will be smaller than + * the uncompressed data. */ + ByteArrayOutputStream bos = new ByteArrayOutputStream(length); + + Deflater compressor = new Deflater(); + + try { + compressor.setLevel(compressionLevel); + compressor.setInput(value, offset, length); + compressor.finish(); + + // Compress the data + final byte[] buf = new byte[1024]; + while (!compressor.finished()) { + int count = compressor.deflate(buf); + bos.write(buf, 0, count); + } + } finally { + compressor.end(); + } + + return bos.toByteArray(); + } + + /** Compresses the specified byte range, with default BEST_COMPRESSION level */ + public static byte[] compress(byte[] value, int offset, int length) { + return compress(value, offset, length, Deflater.BEST_COMPRESSION); + } + + /** Compresses all bytes in the array, with default BEST_COMPRESSION level */ + public static byte[] compress(byte[] value) { + return compress(value, 0, value.length, Deflater.BEST_COMPRESSION); + } + + /** Compresses the String value, with default BEST_COMPRESSION level */ + public static byte[] compressString(String value) { + return compressString(value, Deflater.BEST_COMPRESSION); + } + + /** Compresses the String value using the specified + * compressionLevel (constants are defined in + * java.util.zip.Deflater). */ + public static byte[] compressString(String value, int compressionLevel) { + BytesRef result = new BytesRef(); + UnicodeUtil.UTF16toUTF8(value, 0, value.length(), result); + return compress(result.bytes, 0, result.length, compressionLevel); + } + + /** Decompress the byte array previously returned by + * compress */ + public static byte[] decompress(byte[] value) throws DataFormatException { + // Create an expandable byte array to hold the decompressed data + ByteArrayOutputStream bos = new ByteArrayOutputStream(value.length); + + Inflater decompressor = new Inflater(); + + try { + decompressor.setInput(value); + + // Decompress the data + final byte[] buf = new byte[1024]; + while (!decompressor.finished()) { + int count = decompressor.inflate(buf); + bos.write(buf, 0, count); + } + } finally { + decompressor.end(); + } + + return bos.toByteArray(); + } + + /** Decompress the byte array previously returned by + * compressString back into a String */ + public static String decompressString(byte[] value) throws DataFormatException { + final byte[] bytes = decompress(value); + CharsRef result = new CharsRef(bytes.length); + UnicodeUtil.UTF8toUTF16(bytes, 0, bytes.length, result); + return new String(result.chars, 0, result.length); + } +} Index: lucene/src/java/org/apache/lucene/document2/Field.java =================================================================== --- lucene/src/java/org/apache/lucene/document2/Field.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/document2/Field.java (working copy) @@ -94,10 +94,20 @@ } public Field(String name, boolean internName, FieldType type, String value) { - if (name == null) + if (name == null) { throw new IllegalArgumentException("name cannot be null"); - if (value == null) + } + if (value == null) { throw new IllegalArgumentException("value cannot be null"); + } + if (!type.stored() && !type.indexed()) { + throw new IllegalArgumentException("it doesn't make sense to have a field that " + + "is neither indexed nor stored"); + } + if (!type.indexed() && !type.tokenized() && (type.storeTermVectors())) { + throw new IllegalArgumentException("cannot store term vector information " + + "for a field that is not indexed"); + } this.type = type; this.name = name; Index: lucene/src/java/org/apache/lucene/index/CheckIndex.java =================================================================== --- lucene/src/java/org/apache/lucene/index/CheckIndex.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/index/CheckIndex.java (working copy) @@ -23,8 +23,7 @@ import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IndexInput; -import org.apache.lucene.document.AbstractField; // for javadocs -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.index.codecs.DefaultSegmentInfosWriter; import org.apache.lucene.util.Bits; @@ -898,7 +897,7 @@ for (int j = 0; j < info.docCount; ++j) { if (delDocs == null || !delDocs.get(j)) { status.docCount++; - Document doc = reader.document(j); + Document doc = reader.document2(j); status.totFields += doc.getFields().size(); } } Index: lucene/src/java/org/apache/lucene/index/IndexReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/IndexReader.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/index/IndexReader.java (working copy) @@ -974,12 +974,14 @@ * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ + /* public Document document(int docID) throws CorruptIndexException, IOException { ensureOpen(); final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(); document(docID, visitor); return visitor.getDocument(); } + */ public org.apache.lucene.document2.Document document2(int docID) throws CorruptIndexException, IOException { ensureOpen(); Index: lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java =================================================================== --- lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java (working copy) @@ -23,11 +23,9 @@ import java.util.Map; import java.util.Map.Entry; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.Fieldable; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockObtainFailedException; @@ -70,13 +68,13 @@ int numDocs = r.numDocs(); // index is allowed to have exactly one document or 0. if (numDocs == 1) { - Document doc = r.document(r.maxDoc() - 1); - Field sid = doc.getField(SNAPSHOTS_ID); + Document doc = r.document2(r.maxDoc() - 1); + Field sid = (Field) doc.getField(SNAPSHOTS_ID); if (sid == null) { throw new IllegalStateException("directory is not a valid snapshots store!"); } doc.removeField(SNAPSHOTS_ID); - for (Fieldable f : doc.getFields()) { + for (IndexableField f : doc) { snapshots.put(f.name(), f.stringValue()); } } else if (numDocs != 0) { @@ -189,12 +187,14 @@ private void persistSnapshotInfos(String id, String segment) throws IOException { writer.deleteAll(); Document d = new Document(); - d.add(new Field(SNAPSHOTS_ID, "", Store.YES, Index.NO)); + FieldType ft = new FieldType(); + ft.setStored(true); + d.add(new Field(SNAPSHOTS_ID, ft, "")); for (Entry e : super.getSnapshots().entrySet()) { - d.add(new Field(e.getKey(), e.getValue(), Store.YES, Index.NO)); + d.add(new Field(e.getKey(), ft, e.getValue())); } if (id != null) { - d.add(new Field(id, segment, Store.YES, Index.NO)); + d.add(new Field(id, ft, segment)); } writer.addDocument(d); writer.commit(); Index: lucene/src/java/org/apache/lucene/index/SegmentMerger.java =================================================================== --- lucene/src/java/org/apache/lucene/index/SegmentMerger.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/index/SegmentMerger.java (working copy) @@ -23,7 +23,7 @@ import java.util.Collection; import java.util.List; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader.FieldOption; import org.apache.lucene.index.MergePolicy.MergeAbortedException; import org.apache.lucene.index.codecs.Codec; @@ -316,7 +316,7 @@ // on the fly? // NOTE: it's very important to first assign to doc then pass it to // termVectorsWriter.addAllDocVectors; see LUCENE-1282 - Document doc = reader.document(j); + Document doc = reader.document2(j); fieldsWriter.addDocument(doc, fieldInfos); docCount++; checkAbort.work(300); @@ -343,7 +343,7 @@ for (; docCount < maxDoc; docCount++) { // NOTE: it's very important to first assign to doc then pass it to // termVectorsWriter.addAllDocVectors; see LUCENE-1282 - Document doc = reader.document(docCount); + Document doc = reader.document2(docCount); fieldsWriter.addDocument(doc, fieldInfos); checkAbort.work(300); } Index: lucene/src/java/org/apache/lucene/search/IndexSearcher.java =================================================================== --- lucene/src/java/org/apache/lucene/search/IndexSearcher.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/search/IndexSearcher.java (working copy) @@ -239,9 +239,11 @@ } /* Sugar for .getIndexReader().document(docID) */ + /* public Document doc(int docID) throws CorruptIndexException, IOException { return reader.document(docID); } + */ public org.apache.lucene.document2.Document doc2(int docID) throws CorruptIndexException, IOException { return reader.document2(docID); Index: lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (revision 1150855) +++ lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (working copy) @@ -1073,7 +1073,7 @@ newType.setStored(true); // randomly store it } - if (!newType.storeTermVectors()) { + if (newType.indexed() && !newType.storeTermVectors()) { newType.setStoreTermVectors(random.nextBoolean()); } Index: lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java =================================================================== --- lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java (working copy) @@ -1,107 +0,0 @@ -package org.apache.lucene.document; - -import org.apache.lucene.util.LuceneTestCase; - -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.store.Directory; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Tests {@link Document} class. - */ -public class TestBinaryDocument extends LuceneTestCase { - - String binaryValStored = "this text will be stored as a byte array in the index"; - String binaryValCompressed = "this text will be also stored and compressed as a byte array in the index"; - - public void testBinaryFieldInIndex() - throws Exception - { - Fieldable binaryFldStored = new Field("binaryStored", binaryValStored.getBytes()); - Fieldable stringFldStored = new Field("stringStored", binaryValStored, Field.Store.YES, Field.Index.NO, Field.TermVector.NO); - - Document doc = new Document(); - - doc.add(binaryFldStored); - - doc.add(stringFldStored); - - /** test for field count */ - assertEquals(2, doc.fields.size()); - - /** add the doc to a ram index */ - Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); - writer.addDocument(doc); - - /** open a reader and fetch the document */ - IndexReader reader = writer.getReader(); - Document docFromReader = reader.document(0); - assertTrue(docFromReader != null); - - /** fetch the binary stored field and compare it's content with the original one */ - String binaryFldStoredTest = new String(docFromReader.getBinaryValue("binaryStored")); - assertTrue(binaryFldStoredTest.equals(binaryValStored)); - - /** fetch the string field and compare it's content with the original one */ - String stringFldStoredTest = docFromReader.get("stringStored"); - assertTrue(stringFldStoredTest.equals(binaryValStored)); - - writer.close(); - reader.close(); - - reader = IndexReader.open(dir, false); - /** delete the document from index */ - reader.deleteDocument(0); - assertEquals(0, reader.numDocs()); - - reader.close(); - dir.close(); - } - - public void testCompressionTools() throws Exception { - Fieldable binaryFldCompressed = new Field("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes())); - Fieldable stringFldCompressed = new Field("stringCompressed", CompressionTools.compressString(binaryValCompressed)); - - Document doc = new Document(); - - doc.add(binaryFldCompressed); - doc.add(stringFldCompressed); - - /** add the doc to a ram index */ - Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); - writer.addDocument(doc); - - /** open a reader and fetch the document */ - IndexReader reader = writer.getReader(); - Document docFromReader = reader.document(0); - assertTrue(docFromReader != null); - - /** fetch the binary compressed field and compare it's content with the original one */ - String binaryFldCompressedTest = new String(CompressionTools.decompress(docFromReader.getBinaryValue("binaryCompressed"))); - assertTrue(binaryFldCompressedTest.equals(binaryValCompressed)); - assertTrue(CompressionTools.decompressString(docFromReader.getBinaryValue("stringCompressed")).equals(binaryValCompressed)); - - writer.close(); - reader.close(); - dir.close(); - } -} Index: lucene/src/test/org/apache/lucene/document/TestDateTools.java =================================================================== --- lucene/src/test/org/apache/lucene/document/TestDateTools.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/document/TestDateTools.java (working copy) @@ -1,199 +0,0 @@ -package org.apache.lucene.document; - -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.Calendar; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.TimeZone; -import java.util.Locale; - -import org.apache.lucene.util.LuceneTestCase; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -public class TestDateTools extends LuceneTestCase { - - public void testStringToDate() throws ParseException { - - Date d = null; - d = DateTools.stringToDate("2004"); - assertEquals("2004-01-01 00:00:00:000", isoFormat(d)); - d = DateTools.stringToDate("20040705"); - assertEquals("2004-07-05 00:00:00:000", isoFormat(d)); - d = DateTools.stringToDate("200407050910"); - assertEquals("2004-07-05 09:10:00:000", isoFormat(d)); - d = DateTools.stringToDate("20040705091055990"); - assertEquals("2004-07-05 09:10:55:990", isoFormat(d)); - - try { - d = DateTools.stringToDate("97"); // no date - fail(); - } catch(ParseException e) { /* expected exception */ } - try { - d = DateTools.stringToDate("200401011235009999"); // no date - fail(); - } catch(ParseException e) { /* expected exception */ } - try { - d = DateTools.stringToDate("aaaa"); // no date - fail(); - } catch(ParseException e) { /* expected exception */ } - - } - - public void testStringtoTime() throws ParseException { - long time = DateTools.stringToTime("197001010000"); - Calendar cal = new GregorianCalendar(); - cal.clear(); - cal.set(1970, 0, 1, // year=1970, month=january, day=1 - 0, 0, 0); // hour, minute, second - cal.set(Calendar.MILLISECOND, 0); - cal.setTimeZone(TimeZone.getTimeZone("GMT")); - assertEquals(cal.getTime().getTime(), time); - cal.set(1980, 1, 2, // year=1980, month=february, day=2 - 11, 5, 0); // hour, minute, second - cal.set(Calendar.MILLISECOND, 0); - time = DateTools.stringToTime("198002021105"); - assertEquals(cal.getTime().getTime(), time); - } - - public void testDateAndTimetoString() throws ParseException { - Calendar cal = new GregorianCalendar(); - cal.clear(); - cal.setTimeZone(TimeZone.getTimeZone("GMT")); - cal.set(2004, 1, 3, // year=2004, month=february(!), day=3 - 22, 8, 56); // hour, minute, second - cal.set(Calendar.MILLISECOND, 333); - - String dateString; - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.YEAR); - assertEquals("2004", dateString); - assertEquals("2004-01-01 00:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MONTH); - assertEquals("200402", dateString); - assertEquals("2004-02-01 00:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.DAY); - assertEquals("20040203", dateString); - assertEquals("2004-02-03 00:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.HOUR); - assertEquals("2004020322", dateString); - assertEquals("2004-02-03 22:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MINUTE); - assertEquals("200402032208", dateString); - assertEquals("2004-02-03 22:08:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.SECOND); - assertEquals("20040203220856", dateString); - assertEquals("2004-02-03 22:08:56:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MILLISECOND); - assertEquals("20040203220856333", dateString); - assertEquals("2004-02-03 22:08:56:333", isoFormat(DateTools.stringToDate(dateString))); - - // date before 1970: - cal.set(1961, 2, 5, // year=1961, month=march(!), day=5 - 23, 9, 51); // hour, minute, second - cal.set(Calendar.MILLISECOND, 444); - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MILLISECOND); - assertEquals("19610305230951444", dateString); - assertEquals("1961-03-05 23:09:51:444", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.HOUR); - assertEquals("1961030523", dateString); - assertEquals("1961-03-05 23:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - // timeToString: - cal.set(1970, 0, 1, // year=1970, month=january, day=1 - 0, 0, 0); // hour, minute, second - cal.set(Calendar.MILLISECOND, 0); - dateString = DateTools.timeToString(cal.getTime().getTime(), - DateTools.Resolution.MILLISECOND); - assertEquals("19700101000000000", dateString); - - cal.set(1970, 0, 1, // year=1970, month=january, day=1 - 1, 2, 3); // hour, minute, second - cal.set(Calendar.MILLISECOND, 0); - dateString = DateTools.timeToString(cal.getTime().getTime(), - DateTools.Resolution.MILLISECOND); - assertEquals("19700101010203000", dateString); - } - - public void testRound() { - Calendar cal = new GregorianCalendar(); - cal.clear(); - cal.setTimeZone(TimeZone.getTimeZone("GMT")); - cal.set(2004, 1, 3, // year=2004, month=february(!), day=3 - 22, 8, 56); // hour, minute, second - cal.set(Calendar.MILLISECOND, 333); - Date date = cal.getTime(); - assertEquals("2004-02-03 22:08:56:333", isoFormat(date)); - - Date dateYear = DateTools.round(date, DateTools.Resolution.YEAR); - assertEquals("2004-01-01 00:00:00:000", isoFormat(dateYear)); - - Date dateMonth = DateTools.round(date, DateTools.Resolution.MONTH); - assertEquals("2004-02-01 00:00:00:000", isoFormat(dateMonth)); - - Date dateDay = DateTools.round(date, DateTools.Resolution.DAY); - assertEquals("2004-02-03 00:00:00:000", isoFormat(dateDay)); - - Date dateHour = DateTools.round(date, DateTools.Resolution.HOUR); - assertEquals("2004-02-03 22:00:00:000", isoFormat(dateHour)); - - Date dateMinute = DateTools.round(date, DateTools.Resolution.MINUTE); - assertEquals("2004-02-03 22:08:00:000", isoFormat(dateMinute)); - - Date dateSecond = DateTools.round(date, DateTools.Resolution.SECOND); - assertEquals("2004-02-03 22:08:56:000", isoFormat(dateSecond)); - - Date dateMillisecond = DateTools.round(date, DateTools.Resolution.MILLISECOND); - assertEquals("2004-02-03 22:08:56:333", isoFormat(dateMillisecond)); - - // long parameter: - long dateYearLong = DateTools.round(date.getTime(), DateTools.Resolution.YEAR); - assertEquals("2004-01-01 00:00:00:000", isoFormat(new Date(dateYearLong))); - - long dateMillisecondLong = DateTools.round(date.getTime(), DateTools.Resolution.MILLISECOND); - assertEquals("2004-02-03 22:08:56:333", isoFormat(new Date(dateMillisecondLong))); - } - - private String isoFormat(Date date) { - SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss:SSS", Locale.US); - sdf.setTimeZone(TimeZone.getTimeZone("GMT")); - return sdf.format(date); - } - - public void testDateToolsUTC() throws Exception { - // Sun, 30 Oct 2005 00:00:00 +0000 -- the last second of 2005's DST in Europe/London - long time = 1130630400; - try { - TimeZone.setDefault(TimeZone.getTimeZone(/* "GMT" */ "Europe/London")); - String d1 = DateTools.dateToString(new Date(time*1000), DateTools.Resolution.MINUTE); - String d2 = DateTools.dateToString(new Date((time+3600)*1000), DateTools.Resolution.MINUTE); - assertFalse("different times", d1.equals(d2)); - assertEquals("midnight", DateTools.stringToTime(d1), time*1000); - assertEquals("later", DateTools.stringToTime(d2), (time+3600)*1000); - } finally { - TimeZone.setDefault(null); - } - } - -} \ No newline at end of file Index: lucene/src/test/org/apache/lucene/document/TestDocument.java =================================================================== --- lucene/src/test/org/apache/lucene/document/TestDocument.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/document/TestDocument.java (working copy) @@ -1,281 +0,0 @@ -package org.apache.lucene.document; - -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.store.Directory; -import org.apache.lucene.util.LuceneTestCase; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Tests {@link Document} class. - */ -public class TestDocument extends LuceneTestCase { - - String binaryVal = "this text will be stored as a byte array in the index"; - String binaryVal2 = "this text will be also stored as a byte array in the index"; - - public void testBinaryField() throws Exception { - Document doc = new Document(); - Fieldable stringFld = new Field("string", binaryVal, Field.Store.YES, - Field.Index.NO); - Fieldable binaryFld = new Field("binary", binaryVal.getBytes()); - Fieldable binaryFld2 = new Field("binary", binaryVal2.getBytes()); - - doc.add(stringFld); - doc.add(binaryFld); - - assertEquals(2, doc.fields.size()); - - assertTrue(binaryFld.isBinary()); - assertTrue(binaryFld.isStored()); - assertFalse(binaryFld.isIndexed()); - assertFalse(binaryFld.isTokenized()); - - String binaryTest = new String(doc.getBinaryValue("binary")); - assertTrue(binaryTest.equals(binaryVal)); - - String stringTest = doc.get("string"); - assertTrue(binaryTest.equals(stringTest)); - - doc.add(binaryFld2); - - assertEquals(3, doc.fields.size()); - - byte[][] binaryTests = doc.getBinaryValues("binary"); - - assertEquals(2, binaryTests.length); - - binaryTest = new String(binaryTests[0]); - String binaryTest2 = new String(binaryTests[1]); - - assertFalse(binaryTest.equals(binaryTest2)); - - assertTrue(binaryTest.equals(binaryVal)); - assertTrue(binaryTest2.equals(binaryVal2)); - - doc.removeField("string"); - assertEquals(2, doc.fields.size()); - - doc.removeFields("binary"); - assertEquals(0, doc.fields.size()); - } - - /** - * Tests {@link Document#removeField(String)} method for a brand new Document - * that has not been indexed yet. - * - * @throws Exception on error - */ - public void testRemoveForNewDocument() throws Exception { - Document doc = makeDocumentWithFields(); - assertEquals(8, doc.fields.size()); - doc.removeFields("keyword"); - assertEquals(6, doc.fields.size()); - doc.removeFields("doesnotexists"); // removing non-existing fields is - // siltenlty ignored - doc.removeFields("keyword"); // removing a field more than once - assertEquals(6, doc.fields.size()); - doc.removeField("text"); - assertEquals(5, doc.fields.size()); - doc.removeField("text"); - assertEquals(4, doc.fields.size()); - doc.removeField("text"); - assertEquals(4, doc.fields.size()); - doc.removeField("doesnotexists"); // removing non-existing fields is - // siltenlty ignored - assertEquals(4, doc.fields.size()); - doc.removeFields("unindexed"); - assertEquals(2, doc.fields.size()); - doc.removeFields("unstored"); - assertEquals(0, doc.fields.size()); - doc.removeFields("doesnotexists"); // removing non-existing fields is - // siltenlty ignored - assertEquals(0, doc.fields.size()); - } - - public void testConstructorExceptions() { - new Field("name", "value", Field.Store.YES, Field.Index.NO); // okay - new Field("name", "value", Field.Store.NO, Field.Index.NOT_ANALYZED); // okay - try { - new Field("name", "value", Field.Store.NO, Field.Index.NO); - fail(); - } catch (IllegalArgumentException e) { - // expected exception - } - new Field("name", "value", Field.Store.YES, Field.Index.NO, - Field.TermVector.NO); // okay - try { - new Field("name", "value", Field.Store.YES, Field.Index.NO, - Field.TermVector.YES); - fail(); - } catch (IllegalArgumentException e) { - // expected exception - } - } - - /** - * Tests {@link Document#getValues(String)} method for a brand new Document - * that has not been indexed yet. - * - * @throws Exception on error - */ - public void testGetValuesForNewDocument() throws Exception { - doAssert(makeDocumentWithFields(), false); - } - - /** - * Tests {@link Document#getValues(String)} method for a Document retrieved - * from an index. - * - * @throws Exception on error - */ - public void testGetValuesForIndexedDocument() throws Exception { - Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); - writer.addDocument(makeDocumentWithFields()); - IndexReader reader = writer.getReader(); - - IndexSearcher searcher = newSearcher(reader); - - // search for something that does exists - Query query = new TermQuery(new Term("keyword", "test1")); - - // ensure that queries return expected results without DateFilter first - ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; - assertEquals(1, hits.length); - - doAssert(searcher.doc(hits[0].doc), true); - writer.close(); - searcher.close(); - reader.close(); - dir.close(); - } - - private Document makeDocumentWithFields() { - Document doc = new Document(); - doc.add(new Field("keyword", "test1", Field.Store.YES, - Field.Index.NOT_ANALYZED)); - doc.add(new Field("keyword", "test2", Field.Store.YES, - Field.Index.NOT_ANALYZED)); - doc.add(new Field("text", "test1", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("text", "test2", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("unindexed", "test1", Field.Store.YES, Field.Index.NO)); - doc.add(new Field("unindexed", "test2", Field.Store.YES, Field.Index.NO)); - doc - .add(new Field("unstored", "test1", Field.Store.NO, - Field.Index.ANALYZED)); - doc - .add(new Field("unstored", "test2", Field.Store.NO, - Field.Index.ANALYZED)); - return doc; - } - - private void doAssert(Document doc, boolean fromIndex) { - String[] keywordFieldValues = doc.getValues("keyword"); - String[] textFieldValues = doc.getValues("text"); - String[] unindexedFieldValues = doc.getValues("unindexed"); - String[] unstoredFieldValues = doc.getValues("unstored"); - - assertTrue(keywordFieldValues.length == 2); - assertTrue(textFieldValues.length == 2); - assertTrue(unindexedFieldValues.length == 2); - // this test cannot work for documents retrieved from the index - // since unstored fields will obviously not be returned - if (!fromIndex) { - assertTrue(unstoredFieldValues.length == 2); - } - - assertTrue(keywordFieldValues[0].equals("test1")); - assertTrue(keywordFieldValues[1].equals("test2")); - assertTrue(textFieldValues[0].equals("test1")); - assertTrue(textFieldValues[1].equals("test2")); - assertTrue(unindexedFieldValues[0].equals("test1")); - assertTrue(unindexedFieldValues[1].equals("test2")); - // this test cannot work for documents retrieved from the index - // since unstored fields will obviously not be returned - if (!fromIndex) { - assertTrue(unstoredFieldValues[0].equals("test1")); - assertTrue(unstoredFieldValues[1].equals("test2")); - } - } - - public void testFieldSetValue() throws Exception { - - Field field = new Field("id", "id1", Field.Store.YES, - Field.Index.NOT_ANALYZED); - Document doc = new Document(); - doc.add(field); - doc.add(new Field("keyword", "test", Field.Store.YES, - Field.Index.NOT_ANALYZED)); - - Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); - writer.addDocument(doc); - field.setValue("id2"); - writer.addDocument(doc); - field.setValue("id3"); - writer.addDocument(doc); - - IndexReader reader = writer.getReader(); - IndexSearcher searcher = newSearcher(reader); - - Query query = new TermQuery(new Term("keyword", "test")); - - // ensure that queries return expected results without DateFilter first - ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; - assertEquals(3, hits.length); - int result = 0; - for (int i = 0; i < 3; i++) { - Document doc2 = searcher.doc(hits[i].doc); - Field f = doc2.getField("id"); - if (f.stringValue().equals("id1")) result |= 1; - else if (f.stringValue().equals("id2")) result |= 2; - else if (f.stringValue().equals("id3")) result |= 4; - else fail("unexpected id field"); - } - writer.close(); - searcher.close(); - reader.close(); - dir.close(); - assertEquals("did not see all IDs", 7, result); - } - - public void testFieldSetValueChangeBinary() { - Field field1 = new Field("field1", new byte[0]); - Field field2 = new Field("field2", "", Field.Store.YES, - Field.Index.ANALYZED); - try { - field1.setValue("abc"); - fail("did not hit expected exception"); - } catch (IllegalArgumentException iae) { - // expected - } - try { - field2.setValue(new byte[0]); - fail("did not hit expected exception"); - } catch (IllegalArgumentException iae) { - // expected - } - } -} Index: lucene/src/test/org/apache/lucene/document2/TestBinaryDocument.java =================================================================== --- lucene/src/test/org/apache/lucene/document2/TestBinaryDocument.java (revision 0) +++ lucene/src/test/org/apache/lucene/document2/TestBinaryDocument.java (working copy) @@ -1,8 +1,9 @@ -package org.apache.lucene.document; +package org.apache.lucene.document2; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.store.Directory; @@ -34,8 +35,10 @@ public void testBinaryFieldInIndex() throws Exception { - Fieldable binaryFldStored = new Field("binaryStored", binaryValStored.getBytes()); - Fieldable stringFldStored = new Field("stringStored", binaryValStored, Field.Store.YES, Field.Index.NO, Field.TermVector.NO); + FieldType ft = new FieldType(); + ft.setStored(true); + IndexableField binaryFldStored = new BinaryField("binaryStored", binaryValStored.getBytes()); + IndexableField stringFldStored = new Field("stringStored", ft, binaryValStored); Document doc = new Document(); @@ -53,7 +56,7 @@ /** open a reader and fetch the document */ IndexReader reader = writer.getReader(); - Document docFromReader = reader.document(0); + Document docFromReader = reader.document2(0); assertTrue(docFromReader != null); /** fetch the binary stored field and compare it's content with the original one */ @@ -77,8 +80,8 @@ } public void testCompressionTools() throws Exception { - Fieldable binaryFldCompressed = new Field("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes())); - Fieldable stringFldCompressed = new Field("stringCompressed", CompressionTools.compressString(binaryValCompressed)); + IndexableField binaryFldCompressed = new BinaryField("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes())); + IndexableField stringFldCompressed = new BinaryField("stringCompressed", CompressionTools.compressString(binaryValCompressed)); Document doc = new Document(); @@ -92,7 +95,7 @@ /** open a reader and fetch the document */ IndexReader reader = writer.getReader(); - Document docFromReader = reader.document(0); + Document docFromReader = reader.document2(0); assertTrue(docFromReader != null); /** fetch the binary compressed field and compare it's content with the original one */ Index: lucene/src/test/org/apache/lucene/document2/TestDateTools.java =================================================================== --- lucene/src/test/org/apache/lucene/document2/TestDateTools.java (revision 0) +++ lucene/src/test/org/apache/lucene/document2/TestDateTools.java (working copy) @@ -1,4 +1,4 @@ -package org.apache.lucene.document; +package org.apache.lucene.document2; import java.text.ParseException; import java.text.SimpleDateFormat; Index: lucene/src/test/org/apache/lucene/document2/TestDocument.java =================================================================== --- lucene/src/test/org/apache/lucene/document2/TestDocument.java (revision 0) +++ lucene/src/test/org/apache/lucene/document2/TestDocument.java (working copy) @@ -1,6 +1,7 @@ -package org.apache.lucene.document; +package org.apache.lucene.document2; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; @@ -37,20 +38,22 @@ public void testBinaryField() throws Exception { Document doc = new Document(); - Fieldable stringFld = new Field("string", binaryVal, Field.Store.YES, - Field.Index.NO); - Fieldable binaryFld = new Field("binary", binaryVal.getBytes()); - Fieldable binaryFld2 = new Field("binary", binaryVal2.getBytes()); + FieldType ft = new FieldType(); + ft.setStored(true); + IndexableField stringFld = new Field("string", ft, binaryVal); + IndexableField binaryFld = new BinaryField("binary", binaryVal.getBytes()); + IndexableField binaryFld2 = new BinaryField("binary", binaryVal2.getBytes()); + doc.add(stringFld); doc.add(binaryFld); assertEquals(2, doc.fields.size()); - assertTrue(binaryFld.isBinary()); - assertTrue(binaryFld.isStored()); - assertFalse(binaryFld.isIndexed()); - assertFalse(binaryFld.isTokenized()); + assertTrue(binaryFld.binaryValue(null) != null); + assertTrue(binaryFld.stored()); + assertFalse(binaryFld.indexed()); + assertFalse(binaryFld.tokenized()); String binaryTest = new String(doc.getBinaryValue("binary")); assertTrue(binaryTest.equals(binaryVal)); @@ -115,19 +118,22 @@ } public void testConstructorExceptions() { - new Field("name", "value", Field.Store.YES, Field.Index.NO); // okay - new Field("name", "value", Field.Store.NO, Field.Index.NOT_ANALYZED); // okay + FieldType ft = new FieldType(); + ft.setStored(true); + new Field("name", ft, "value"); // okay + new StringField("name", "value"); // okay try { - new Field("name", "value", Field.Store.NO, Field.Index.NO); + new Field("name", new FieldType(), "value"); fail(); } catch (IllegalArgumentException e) { // expected exception } - new Field("name", "value", Field.Store.YES, Field.Index.NO, - Field.TermVector.NO); // okay + new Field("name", ft, "value"); // okay try { - new Field("name", "value", Field.Store.YES, Field.Index.NO, - Field.TermVector.YES); + FieldType ft2 = new FieldType(); + ft2.setStored(true); + ft2.setStoreTermVectors(true); + new Field("name", ft2, "value"); fail(); } catch (IllegalArgumentException e) { // expected exception @@ -165,7 +171,7 @@ ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - doAssert(searcher.doc(hits[0].doc), true); + doAssert(searcher.doc2(hits[0].doc), true); writer.close(); searcher.close(); reader.close(); @@ -174,28 +180,26 @@ private Document makeDocumentWithFields() { Document doc = new Document(); - doc.add(new Field("keyword", "test1", Field.Store.YES, - Field.Index.NOT_ANALYZED)); - doc.add(new Field("keyword", "test2", Field.Store.YES, - Field.Index.NOT_ANALYZED)); - doc.add(new Field("text", "test1", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("text", "test2", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("unindexed", "test1", Field.Store.YES, Field.Index.NO)); - doc.add(new Field("unindexed", "test2", Field.Store.YES, Field.Index.NO)); + FieldType stored = new FieldType(); + stored.setStored(true); + doc.add(new Field("keyword", StringField.TYPE_STORED, "test1")); + doc.add(new Field("keyword", StringField.TYPE_STORED, "test2")); + doc.add(new Field("text", TextField.TYPE_STORED, "test1")); + doc.add(new Field("text", TextField.TYPE_STORED, "test2")); + doc.add(new Field("unindexed", stored, "test1")); + doc.add(new Field("unindexed", stored, "test2")); doc - .add(new Field("unstored", "test1", Field.Store.NO, - Field.Index.ANALYZED)); + .add(new TextField("unstored", "test1")); doc - .add(new Field("unstored", "test2", Field.Store.NO, - Field.Index.ANALYZED)); + .add(new TextField("unstored", "test2")); return doc; } private void doAssert(Document doc, boolean fromIndex) { - String[] keywordFieldValues = doc.getValues("keyword"); - String[] textFieldValues = doc.getValues("text"); - String[] unindexedFieldValues = doc.getValues("unindexed"); - String[] unstoredFieldValues = doc.getValues("unstored"); + IndexableField[] keywordFieldValues = doc.getFields("keyword"); + IndexableField[] textFieldValues = doc.getFields("text"); + IndexableField[] unindexedFieldValues = doc.getFields("unindexed"); + IndexableField[] unstoredFieldValues = doc.getFields("unstored"); assertTrue(keywordFieldValues.length == 2); assertTrue(textFieldValues.length == 2); @@ -206,28 +210,26 @@ assertTrue(unstoredFieldValues.length == 2); } - assertTrue(keywordFieldValues[0].equals("test1")); - assertTrue(keywordFieldValues[1].equals("test2")); - assertTrue(textFieldValues[0].equals("test1")); - assertTrue(textFieldValues[1].equals("test2")); - assertTrue(unindexedFieldValues[0].equals("test1")); - assertTrue(unindexedFieldValues[1].equals("test2")); + assertTrue(keywordFieldValues[0].stringValue().equals("test1")); + assertTrue(keywordFieldValues[1].stringValue().equals("test2")); + assertTrue(textFieldValues[0].stringValue().equals("test1")); + assertTrue(textFieldValues[1].stringValue().equals("test2")); + assertTrue(unindexedFieldValues[0].stringValue().equals("test1")); + assertTrue(unindexedFieldValues[1].stringValue().equals("test2")); // this test cannot work for documents retrieved from the index // since unstored fields will obviously not be returned if (!fromIndex) { - assertTrue(unstoredFieldValues[0].equals("test1")); - assertTrue(unstoredFieldValues[1].equals("test2")); + assertTrue(unstoredFieldValues[0].stringValue().equals("test1")); + assertTrue(unstoredFieldValues[1].stringValue().equals("test2")); } } public void testFieldSetValue() throws Exception { - Field field = new Field("id", "id1", Field.Store.YES, - Field.Index.NOT_ANALYZED); + Field field = new Field("id", StringField.TYPE_STORED, "id1"); Document doc = new Document(); doc.add(field); - doc.add(new Field("keyword", "test", Field.Store.YES, - Field.Index.NOT_ANALYZED)); + doc.add(new Field("keyword", StringField.TYPE_STORED, "test")); Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, dir); @@ -247,8 +249,8 @@ assertEquals(3, hits.length); int result = 0; for (int i = 0; i < 3; i++) { - Document doc2 = searcher.doc(hits[i].doc); - Field f = doc2.getField("id"); + Document doc2 = searcher.doc2(hits[i].doc); + Field f = (Field) doc2.getField("id"); if (f.stringValue().equals("id1")) result |= 1; else if (f.stringValue().equals("id2")) result |= 2; else if (f.stringValue().equals("id3")) result |= 4; @@ -262,9 +264,8 @@ } public void testFieldSetValueChangeBinary() { - Field field1 = new Field("field1", new byte[0]); - Field field2 = new Field("field2", "", Field.Store.YES, - Field.Index.ANALYZED); + Field field1 = new BinaryField("field1", new byte[0]); + Field field2 = new Field("field2", TextField.TYPE_STORED, ""); try { field1.setValue("abc"); fail("did not hit expected exception"); Index: lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (working copy) @@ -267,7 +267,7 @@ final int hitCount = hits.length; assertEquals("wrong number of hits", expectedCount, hitCount); for(int i=0;i 0) { - searcher.doc(hits[0].doc); + searcher.doc2(hits[0].doc); } searcher.close(); if (refreshed != r) { @@ -1105,7 +1105,7 @@ assertTrue(r1 != r3); r1.close(); try { - r1.document(2); + r1.document2(2); fail("did not hit exception"); } catch (AlreadyClosedException ace) { // expected Index: lucene/src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -1289,13 +1289,13 @@ assertEquals(17, b.length, 17); assertEquals(87, b[0]); - assertTrue(ir.document(0).getFieldable("binary").isBinary()); - assertTrue(ir.document(1).getFieldable("binary").isBinary()); - assertTrue(ir.document(2).getFieldable("binary").isBinary()); + assertTrue(ir.document2(0).getField("binary").binaryValue(null)!=null); + assertTrue(ir.document2(1).getField("binary").binaryValue(null)!=null); + assertTrue(ir.document2(2).getField("binary").binaryValue(null)!=null); - assertEquals("value", ir.document(0).get("string")); - assertEquals("value", ir.document(1).get("string")); - assertEquals("value", ir.document(2).get("string")); + assertEquals("value", ir.document2(0).get("string")); + assertEquals("value", ir.document2(1).get("string")); + assertEquals("value", ir.document2(2).get("string")); // test that the terms were indexed. Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (working copy) @@ -60,21 +60,20 @@ /* private field types */ /* private field types */ - private static final FieldType custom = new FieldType(TextField.TYPE_UNSTORED); private static final FieldType custom1 = new FieldType(TextField.TYPE_UNSTORED); - private static final FieldType custom2 = new FieldType(StringField.TYPE_UNSTORED); + private static final FieldType custom2 = new FieldType(); private static final FieldType custom3 = new FieldType(); private static final FieldType custom4 = new FieldType(StringField.TYPE_UNSTORED); private static final FieldType custom5 = new FieldType(TextField.TYPE_UNSTORED); static { - custom.setStored(true); custom1.setStoreTermVectors(true); custom1.setStoreTermVectorPositions(true); custom1.setStoreTermVectorOffsets(true); - + custom2.setStored(true); + custom2.setIndexed(true); custom3.setStored(true); @@ -134,7 +133,7 @@ final Document doc = new Document(); - doc.add(newField("content1", "aaa bbb ccc ddd", DocCopyIterator.custom)); + doc.add(newField("content1", "aaa bbb ccc ddd", TextField.TYPE_STORED)); doc.add(newField("content6", "aaa bbb ccc ddd", DocCopyIterator.custom1)); doc.add(newField("content2", "aaa bbb ccc ddd", DocCopyIterator.custom2)); doc.add(newField("content3", "aaa bbb ccc ddd", DocCopyIterator.custom3)); @@ -370,7 +369,7 @@ MockIndexWriter2 w = new MockIndexWriter2(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); w.setInfoStream(VERBOSE ? System.out : null); Document doc = new Document(); - doc.add(newField("field", "a field", DocCopyIterator.custom)); + doc.add(newField("field", "a field", TextField.TYPE_STORED)); w.addDocument(doc); w.doFail = true; try { @@ -389,7 +388,7 @@ MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); w.setInfoStream(VERBOSE ? System.out : null); Document doc = new Document(); - doc.add(newField("field", "a field", DocCopyIterator.custom)); + doc.add(newField("field", "a field", TextField.TYPE_STORED)); w.addDocument(doc); Analyzer analyzer = new Analyzer() { @@ -402,7 +401,7 @@ }; Document crashDoc = new Document(); - crashDoc.add(newField("crash", "do it on token 4", DocCopyIterator.custom)); + crashDoc.add(newField("crash", "do it on token 4", TextField.TYPE_STORED)); try { w.addDocument(crashDoc, analyzer); fail("did not hit expected exception"); @@ -443,7 +442,7 @@ MockIndexWriter3 w = new MockIndexWriter3(dir, conf); w.doFail = true; Document doc = new Document(); - doc.add(newField("field", "a field", DocCopyIterator.custom)); + doc.add(newField("field", "a field", TextField.TYPE_STORED)); for(int i=0;i<10;i++) try { w.addDocument(doc); @@ -652,7 +651,7 @@ if (delDocs.get(j)) numDel++; else { - reader.document(j); + reader.document2(j); reader.getTermFreqVectors(j); } } @@ -676,7 +675,7 @@ int numDel = 0; assertNull(MultiFields.getDeletedDocs(reader)); for(int j=0;j? " + score1, score > score1); @@ -394,10 +394,10 @@ float score2 = h[2].score; float score3 = h[3].score; - String doc0 = s.doc(h[0].doc).get("id"); - String doc1 = s.doc(h[1].doc).get("id"); - String doc2 = s.doc(h[2].doc).get("id"); - String doc3 = s.doc(h[3].doc).get("id"); + String doc0 = s.doc2(h[0].doc).get("id"); + String doc1 = s.doc2(h[1].doc).get("id"); + String doc2 = s.doc2(h[2].doc).get("id"); + String doc3 = s.doc2(h[3].doc).get("id"); assertTrue("doc0 should be d2 or d4: " + doc0, doc0.equals("d2") || doc0.equals("d4")); @@ -448,10 +448,10 @@ float score2 = h[2].score; float score3 = h[3].score; - String doc0 = s.doc(h[0].doc).get("id"); - String doc1 = s.doc(h[1].doc).get("id"); - String doc2 = s.doc(h[2].doc).get("id"); - String doc3 = s.doc(h[3].doc).get("id"); + String doc0 = s.doc2(h[0].doc).get("id"); + String doc1 = s.doc2(h[1].doc).get("id"); + String doc2 = s.doc2(h[2].doc).get("id"); + String doc3 = s.doc2(h[3].doc).get("id"); assertEquals("doc0 should be d4: ", "d4", doc0); assertEquals("doc1 should be d3: ", "d3", doc1); Index: lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java (working copy) @@ -84,7 +84,7 @@ assertEquals("3 documents should match", 3, hits.length); List order = Arrays.asList("bbbbb","abbbb","aabbb"); for (int i = 0; i < hits.length; i++) { - final String term = searcher.doc(hits[i].doc).get("field"); + final String term = searcher.doc2(hits[i].doc).get("field"); //System.out.println(hits[i].score); assertEquals(order.get(i), term); } @@ -96,7 +96,7 @@ assertEquals("only 2 documents should match", 2, hits.length); order = Arrays.asList("bbbbb","abbbb"); for (int i = 0; i < hits.length; i++) { - final String term = searcher.doc(hits[i].doc).get("field"); + final String term = searcher.doc2(hits[i].doc).get("field"); //System.out.println(hits[i].score); assertEquals(order.get(i), term); } @@ -113,43 +113,43 @@ query = new FuzzyQuery(new Term("field", "aaaaa"), FuzzyQuery.defaultMinSimilarity, 0); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); // default allows for up to two edits: - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); + assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc2(hits[2].doc).get("field"), ("aaabb")); // query similar to a word in the index: query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 0); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc2(hits[2].doc).get("field"), ("aaabb")); // now with prefix query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 1); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc2(hits[2].doc).get("field"), ("aaabb")); query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 2); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc2(hits[2].doc).get("field"), ("aaabb")); query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 3); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc2(hits[2].doc).get("field"), ("aaabb")); query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 4); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(2, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 5); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(0, hits.length); @@ -158,25 +158,25 @@ query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 0); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("ddddd")); // now with prefix query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 1); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("ddddd")); query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 2); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("ddddd")); query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 3); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("ddddd")); query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 4); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("ddddd")); query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 5); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(0, hits.length); @@ -212,17 +212,17 @@ query = new FuzzyQuery(new Term("field", "aaaaccc"), 0.5f, 0); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaaaa")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaaaa")); // now with prefix query = new FuzzyQuery(new Term("field", "aaaaccc"), 0.5f, 1); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaaaa")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaaaa")); query = new FuzzyQuery(new Term("field", "aaaaccc"), 0.5f, 4); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaaaa")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaaaa")); query = new FuzzyQuery(new Term("field", "aaaaccc"), 0.5f, 5); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(0, hits.length); @@ -377,9 +377,9 @@ ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); // normally, 'Lucenne' would be the first result as IDF will skew the score. - assertEquals("Lucene", reader.document(hits[0].doc).get("field")); - assertEquals("Lucene", reader.document(hits[1].doc).get("field")); - assertEquals("Lucenne", reader.document(hits[2].doc).get("field")); + assertEquals("Lucene", reader.document2(hits[0].doc).get("field")); + assertEquals("Lucene", reader.document2(hits[1].doc).get("field")); + assertEquals("Lucenne", reader.document2(hits[2].doc).get("field")); searcher.close(); reader.close(); directory.close(); @@ -417,7 +417,7 @@ IndexSearcher searcher = newSearcher(r); ScoreDoc[] hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("Giga byte", searcher.doc(hits[0].doc).get("field")); + assertEquals("Giga byte", searcher.doc2(hits[0].doc).get("field")); searcher.close(); r.close(); index.close(); @@ -443,28 +443,28 @@ FuzzyQuery q = (FuzzyQuery) qp.parse("fouba~2"); ScoreDoc[] hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("foobar", searcher.doc(hits[0].doc).get("field")); + assertEquals("foobar", searcher.doc2(hits[0].doc).get("field")); q = (FuzzyQuery) qp.parse("foubara~2"); hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("foobar", searcher.doc(hits[0].doc).get("field")); + assertEquals("foobar", searcher.doc2(hits[0].doc).get("field")); q = (FuzzyQuery) qp.parse("t~3"); hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("test", searcher.doc(hits[0].doc).get("field")); + assertEquals("test", searcher.doc2(hits[0].doc).get("field")); q = new FuzzyQuery(new Term("field", "a"), 4f, 0, 50); hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("test", searcher.doc(hits[0].doc).get("field")); + assertEquals("test", searcher.doc2(hits[0].doc).get("field")); q = new FuzzyQuery(new Term("field", "a"), 6f, 0, 50); hits = searcher.search(q, 10).scoreDocs; assertEquals(2, hits.length); - assertEquals("test", searcher.doc(hits[0].doc).get("field")); - assertEquals("foobar", searcher.doc(hits[1].doc).get("field")); + assertEquals("test", searcher.doc2(hits[0].doc).get("field")); + assertEquals("foobar", searcher.doc2(hits[1].doc).get("field")); searcher.close(); reader.close(); Index: lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java (working copy) @@ -55,9 +55,9 @@ hits = is.search(new MatchAllDocsQuery(), null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals("one", is.doc(hits[0].doc).get("key")); - assertEquals("two", is.doc(hits[1].doc).get("key")); - assertEquals("three four", is.doc(hits[2].doc).get("key")); + assertEquals("one", is.doc2(hits[0].doc).get("key")); + assertEquals("two", is.doc2(hits[1].doc).get("key")); + assertEquals("three four", is.doc2(hits[2].doc).get("key")); // assert with norms scoring turned on @@ -65,9 +65,9 @@ hits = is.search(normsQuery, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals("three four", is.doc(hits[0].doc).get("key")); - assertEquals("two", is.doc(hits[1].doc).get("key")); - assertEquals("one", is.doc(hits[2].doc).get("key")); + assertEquals("three four", is.doc2(hits[0].doc).get("key")); + assertEquals("two", is.doc2(hits[1].doc).get("key")); + assertEquals("one", is.doc2(hits[2].doc).get("key")); // change norm & retest is.getIndexReader().setNorm(0, "key", is.getSimilarityProvider().get("key").encodeNormValue(400f)); @@ -75,9 +75,9 @@ hits = is.search(normsQuery, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals("one", is.doc(hits[0].doc).get("key")); - assertEquals("three four", is.doc(hits[1].doc).get("key")); - assertEquals("two", is.doc(hits[2].doc).get("key")); + assertEquals("one", is.doc2(hits[0].doc).get("key")); + assertEquals("three four", is.doc2(hits[1].doc).get("key")); + assertEquals("two", is.doc2(hits[2].doc).get("key")); // some artificial queries to trigger the use of skipTo(): Index: lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java (working copy) @@ -502,9 +502,9 @@ if (topDocs.totalHits==0) continue; ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); - int last=Integer.parseInt(searcher.doc(sd[0].doc).get(field)); + int last=Integer.parseInt(searcher.doc2(sd[0].doc).get(field)); for (int j=1; jact ); last=act; } Index: lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (working copy) @@ -534,9 +534,9 @@ if (topDocs.totalHits==0) continue; ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); - long last=Long.parseLong(searcher.doc(sd[0].doc).get(field)); + long last=Long.parseLong(searcher.doc2(sd[0].doc).get(field)); for (int j=1; jact ); last=act; } Index: lucene/src/test/org/apache/lucene/search/TestSort.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestSort.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/search/TestSort.java (working copy) @@ -161,6 +161,7 @@ setMergePolicy(newLogMergePolicy(97)) ); FieldType customType = new FieldType(); + customType.setStored(true); for (int i=0; i 0); Index: lucene/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java (working copy) @@ -86,7 +86,7 @@ assertEquals("All docs should be matched!",N_DOCS,h.length); String prevID = "ID"+(N_DOCS+1); // greater than all ids of docs in this test for (int i=0; i 7.0 assertEquals("score of " + id + " shuould be " + expectedScore + " != " + score, expectedScore, score, TEST_SCORE_TOLERANCE_DELTA); } Index: lucene/src/test/org/apache/lucene/search/function/TestOrdValues.java =================================================================== --- lucene/src/test/org/apache/lucene/search/function/TestOrdValues.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/search/function/TestOrdValues.java (working copy) @@ -78,7 +78,7 @@ : "IC"; // smaller than all ids of docs in this test ("ID0001", etc.) for (int i = 0; i < h.length; i++) { - String resID = s.doc(h[i].doc).get(ID_FIELD); + String resID = s.doc2(h[i].doc).get(ID_FIELD); log(i + ". score=" + h[i].score + " - " + resID); log(s.explain(q, h[i].doc)); if (inOrder) { @@ -123,7 +123,7 @@ ScoreDoc sd[] = td.scoreDocs; for (int i = 0; i < sd.length; i++) { float score = sd[i].score; - String id = s.getIndexReader().document(sd[i].doc).get(ID_FIELD); + String id = s.getIndexReader().document2(sd[i].doc).get(ID_FIELD); log("-------- " + i + ". Explain doc " + id); log(s.explain(q, sd[i].doc)); float expectedScore = N_DOCS - i; Index: lucene/src/test/org/apache/lucene/store/TestMultiMMap.java =================================================================== --- lucene/src/test/org/apache/lucene/store/TestMultiMMap.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/store/TestMultiMMap.java (working copy) @@ -80,7 +80,7 @@ int numAsserts = atLeast(100); for (int i = 0; i < numAsserts; i++) { int docID = random.nextInt(numDocs); - assertEquals("" + docID, reader.document(docID).get("docid")); + assertEquals("" + docID, reader.document2(docID).get("docid")); } reader.close(); dir.close(); Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java (revision 1150855) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java (working copy) @@ -211,7 +211,7 @@ // Set ID_FIELD FieldType ft = new FieldType(valType); - ft.setIndexed(false); + ft.setIndexed(true); Field idField = ds.getField(ID_FIELD, ft); int id; Index: modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/LineDocSourceTest.java =================================================================== --- modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/LineDocSourceTest.java (revision 1150855) +++ modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/LineDocSourceTest.java (working copy) @@ -148,7 +148,7 @@ if (storedField==null) { storedField = DocMaker.BODY_FIELD; // added to all docs and satisfies field-name == value } - assertEquals("Wrong field value", storedField, searcher.doc(0).get(storedField)); + assertEquals("Wrong field value", storedField, searcher.doc2(0).get(storedField)); searcher.close(); } Index: modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java =================================================================== --- modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java (revision 1150855) +++ modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java (working copy) @@ -387,7 +387,7 @@ SuggestWord sugWord = new SuggestWord(); for (int i = 0; i < stop; i++) { - sugWord.string = indexSearcher.doc(hits[i].doc).get(F_WORD); // get orig word + sugWord.string = indexSearcher.doc2(hits[i].doc).get(F_WORD); // get orig word // don't suggest a word for itself, that would be silly if (sugWord.string.equals(word)) { Index: solr/src/java/org/apache/solr/core/QuerySenderListener.java =================================================================== --- solr/src/java/org/apache/solr/core/QuerySenderListener.java (revision 1150855) +++ solr/src/java/org/apache/solr/core/QuerySenderListener.java (working copy) @@ -65,7 +65,7 @@ if (o instanceof DocList) { DocList docs = (DocList)o; for (DocIterator iter = docs.iterator(); iter.hasNext();) { - newSearcher.doc(iter.nextDoc()); + newSearcher.doc2(iter.nextDoc()); } } } Index: solr/src/java/org/apache/solr/handler/MoreLikeThisHandler.java =================================================================== --- solr/src/java/org/apache/solr/handler/MoreLikeThisHandler.java (revision 1150855) +++ solr/src/java/org/apache/solr/handler/MoreLikeThisHandler.java (working copy) @@ -29,7 +29,7 @@ import java.util.Map; import java.util.regex.Pattern; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.queryParser.ParseException; @@ -342,7 +342,7 @@ public DocListAndSet getMoreLikeThis( int id, int start, int rows, List filters, List terms, int flags ) throws IOException { - Document doc = reader.document(id); + Document doc = reader.document2(id); rawMLTQuery = mlt.like(id); boostedMLTQuery = getBoostedQuery( rawMLTQuery ); if( terms != null ) { @@ -353,7 +353,7 @@ realMLTQuery = new BooleanQuery(); realMLTQuery.add(boostedMLTQuery, BooleanClause.Occur.MUST); realMLTQuery.add( - new TermQuery(new Term(uniqueKeyField.getName(), uniqueKeyField.getType().storedToIndexed(doc.getFieldable(uniqueKeyField.getName())))), + new TermQuery(new Term(uniqueKeyField.getName(), uniqueKeyField.getType().storedToIndexed(doc.getField(uniqueKeyField.getName())))), BooleanClause.Occur.MUST_NOT); DocListAndSet results = new DocListAndSet(); @@ -391,7 +391,7 @@ int id = iterator.nextDoc(); DocListAndSet sim = getMoreLikeThis( id, 0, rows, null, null, flags ); - String name = schema.printableUniqueKey( reader.document( id ) ); + String name = schema.printableUniqueKey( reader.document2( id ) ); mlt.add(name, sim.docList); } Index: solr/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java =================================================================== --- solr/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java (revision 1150855) +++ solr/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java (working copy) @@ -33,9 +33,10 @@ import org.slf4j.LoggerFactory; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Fieldable; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.index.Fields; import org.apache.lucene.index.FieldsEnum; @@ -121,7 +122,7 @@ if( docId != null ) { Document doc = null; try { - doc = reader.document( docId ); + doc = reader.document2( docId ); } catch( Exception ex ) {} if( doc == null ) { @@ -164,19 +165,19 @@ /** * @return a string representing a Fieldable's flags. */ - private static String getFieldFlags( Fieldable f ) + private static String getFieldFlags( IndexableField f ) { StringBuilder flags = new StringBuilder(); - flags.append( (f != null && f.isIndexed()) ? FieldFlag.INDEXED.getAbbreviation() : '-' ); - flags.append( (f != null && f.isTokenized()) ? FieldFlag.TOKENIZED.getAbbreviation() : '-' ); - flags.append( (f != null && f.isStored()) ? FieldFlag.STORED.getAbbreviation() : '-' ); + flags.append( (f != null && f.indexed()) ? FieldFlag.INDEXED.getAbbreviation() : '-' ); + flags.append( (f != null && f.tokenized()) ? FieldFlag.TOKENIZED.getAbbreviation() : '-' ); + flags.append( (f != null && f.stored()) ? FieldFlag.STORED.getAbbreviation() : '-' ); flags.append( (false) ? FieldFlag.MULTI_VALUED.getAbbreviation() : '-' ); // SchemaField Specific - flags.append( (f != null && f.isTermVectorStored()) ? FieldFlag.TERM_VECTOR_STORED.getAbbreviation() : '-' ); - flags.append( (f != null && f.isStoreOffsetWithTermVector()) ? FieldFlag.TERM_VECTOR_OFFSET.getAbbreviation() : '-' ); - flags.append( (f != null && f.isStorePositionWithTermVector()) ? FieldFlag.TERM_VECTOR_POSITION.getAbbreviation() : '-' ); - flags.append( (f != null && f.getOmitNorms()) ? FieldFlag.OMIT_NORMS.getAbbreviation() : '-' ); - flags.append( (f != null && f.isLazy()) ? FieldFlag.LAZY.getAbbreviation() : '-' ); - flags.append( (f != null && f.isBinary()) ? FieldFlag.BINARY.getAbbreviation() : '-' ); + flags.append( (f != null && f.storeTermVectors()) ? FieldFlag.TERM_VECTOR_STORED.getAbbreviation() : '-' ); + flags.append( (f != null && f.storeTermVectorOffsets()) ? FieldFlag.TERM_VECTOR_OFFSET.getAbbreviation() : '-' ); + flags.append( (f != null && f.storeTermVectorPositions()) ? FieldFlag.TERM_VECTOR_POSITION.getAbbreviation() : '-' ); + flags.append( (f != null && f.omitNorms()) ? FieldFlag.OMIT_NORMS.getAbbreviation() : '-' ); + flags.append( (f != null && ((Field) f).lazy()) ? FieldFlag.LAZY.getAbbreviation() : '-' ); + flags.append( (f != null && f.binaryValue(null)!=null) ? FieldFlag.BINARY.getAbbreviation() : '-' ); flags.append( (false) ? FieldFlag.SORT_MISSING_FIRST.getAbbreviation() : '-' ); // SchemaField Specific flags.append( (false) ? FieldFlag.SORT_MISSING_LAST.getAbbreviation() : '-' ); // SchemaField Specific return flags.toString(); @@ -236,7 +237,7 @@ final CharsRef spare = new CharsRef(); SimpleOrderedMap finfo = new SimpleOrderedMap(); for( Object o : doc.getFields() ) { - Fieldable fieldable = (Fieldable)o; + Field fieldable = (Field)o; SimpleOrderedMap f = new SimpleOrderedMap(); SchemaField sfield = schema.getFieldOrNull( fieldable.name() ); @@ -257,11 +258,11 @@ if (bytes != null) { f.add( "binary", Base64.byteArrayToBase64(bytes.bytes, bytes.offset, bytes.length)); } - f.add( "boost", fieldable.getBoost() ); + f.add( "boost", fieldable.boost() ); f.add( "docFreq", t.text()==null ? 0 : reader.docFreq( t ) ); // this can be 0 for non-indexed fields // If we have a term vector, return that - if( fieldable.isTermVectorStored() ) { + if( fieldable.storeTermVectors() ) { try { TermFreqVector v = reader.getTermFreqVector( docId, fieldable.name() ); if( v != null ) { @@ -320,8 +321,8 @@ if( top.totalHits > 0 ) { // Find a document with this field try { - Document doc = searcher.doc( top.scoreDocs[0].doc ); - Fieldable fld = doc.getFieldable( fieldName ); + Document doc = searcher.doc2( top.scoreDocs[0].doc ); + IndexableField fld = doc.getField( fieldName ); if( fld != null ) { f.add( "index", getFieldFlags( fld ) ); } Index: solr/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java =================================================================== --- solr/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java (revision 1150855) +++ solr/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java (working copy) @@ -80,7 +80,7 @@ int id = iterator.nextDoc(); int rows = p.getInt( MoreLikeThisParams.DOC_COUNT, 5 ); DocListAndSet sim = mltHelper.getMoreLikeThis( id, 0, rows, null, null, flags ); - String name = schema.printableUniqueKey( searcher.doc( id ) ); + String name = schema.printableUniqueKey( searcher.doc2( id ) ); mlt.add(name, sim.docList); if( dbg != null ){ @@ -92,7 +92,7 @@ DocIterator mltIte = sim.docList.iterator(); while( mltIte.hasNext() ){ int mltid = mltIte.nextDoc(); - String key = schema.printableUniqueKey( searcher.doc( mltid ) ); + String key = schema.printableUniqueKey( searcher.doc2( mltid ) ); explains.add( key, searcher.explain( mltHelper.getRealMLTQuery(), mltid ) ); } docDbg.add( "explain", explains );