Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (revision 1150855) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (working copy) @@ -185,7 +185,7 @@ Highlighter highlighter = new Highlighter(scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -234,7 +234,7 @@ Highlighter highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -256,7 +256,7 @@ highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -278,7 +278,7 @@ highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -303,7 +303,7 @@ Highlighter highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -327,7 +327,7 @@ Highlighter highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -352,7 +352,7 @@ Highlighter highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(NUMERIC_FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(NUMERIC_FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -375,7 +375,7 @@ highlighter.setTextFragmenter(new SimpleFragmenter(40)); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, @@ -393,7 +393,7 @@ int maxNumFragmentsRequired = 2; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); QueryScorer scorer = new QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(this, scorer); @@ -418,7 +418,7 @@ Highlighter highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer, 5)); @@ -437,7 +437,7 @@ highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer, 20)); @@ -459,7 +459,7 @@ Highlighter highlighter = new Highlighter(this,scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME,new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -530,7 +530,7 @@ highlighter.setTextFragmenter(new SimpleFragmenter(40)); int maxNumFragmentsRequired = 2; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, @@ -688,7 +688,7 @@ hits = searcher.search(query, null, 1000); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); int maxNumFragmentsRequired = 2; String fragmentSeparator = "..."; QueryScorer scorer; @@ -716,7 +716,7 @@ numHighlights = 0; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); int maxNumFragmentsRequired = 2; String fragmentSeparator = "..."; QueryScorer scorer; @@ -744,7 +744,7 @@ numHighlights = 0; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); int maxNumFragmentsRequired = 2; String fragmentSeparator = "..."; QueryScorer scorer; @@ -908,7 +908,7 @@ doSearching("Kennedy"); numHighlights = 0; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, @@ -922,7 +922,7 @@ numHighlights = 0; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this); @@ -933,7 +933,7 @@ numHighlights = 0; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, @@ -1041,7 +1041,7 @@ // new Highlighter(HighlighterTest.this, new QueryTermScorer(query)); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this); @@ -1064,7 +1064,7 @@ doSearching("Kennedy"); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, @@ -1216,7 +1216,7 @@ int maxNumFragmentsRequired = 3; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this, false); @@ -1596,7 +1596,7 @@ * QueryFragmentScorer(query)); * * for (int i = 0; i < hits.totalHits; i++) { String text = - * searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream + * searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream * tokenStream=bigramAnalyzer.tokenStream(FIELD_NAME,new StringReader(text)); * String highlightedText = highlighter.getBestFragment(tokenStream,text); * System.out.println(highlightedText); } } @@ -1631,7 +1631,7 @@ public void assertExpectedHighlightCount(final int maxNumFragmentsRequired, final int expectedHighlights) throws Exception { for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); QueryScorer scorer = new QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(this, scorer); @@ -1864,7 +1864,7 @@ throws Exception { for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); + String text = searcher.doc2(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); int maxNumFragmentsRequired = 2; String fragmentSeparator = "..."; Scorer scorer = null; Index: lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocument.java =================================================================== --- lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocument.java (revision 1150855) +++ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocument.java (working copy) @@ -64,14 +64,9 @@ this.vectorSpace = vectorSpace; } - public Document getDocument2() { + public Document getDocument() { return document; } - - //nocommit this code is temporary!! - public org.apache.lucene.document.Document getDocument() { - return null; - } @Override public String toString() { Index: lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java =================================================================== --- lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java (revision 1150855) +++ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java (working copy) @@ -189,15 +189,15 @@ } else { InstantiatedDocument document = new InstantiatedDocument(); // copy stored fields from source reader - org.apache.lucene.document.Document sourceDocument = sourceIndexReader.document(i); + Document sourceDocument = sourceIndexReader.document2(i); for (IndexableField field : sourceDocument) { if (fields == null || fields.contains(field.name())) { - document.getDocument2().add(field); + document.getDocument().add(field); } } document.setDocumentNumber(i); documentsByNumber[i] = document; - for (IndexableField field : document.getDocument2()) { + for (IndexableField field : document.getDocument()) { if (fields == null || fields.contains(field.name())) { if (field.storeTermVectors()) { if (document.getVectorSpace() == null) { @@ -290,7 +290,7 @@ if (document == null) { continue; // deleted } - for (IndexableField field : document.getDocument2()) { + for (IndexableField field : document.getDocument()) { if (field.storeTermVectors() && field.storeTermVectorOffsets()) { TermPositionVector termPositionVector = (TermPositionVector) sourceIndexReader.getTermFreqVector(document.getDocumentNumber(), field.name()); if (termPositionVector != null) { Index: lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java =================================================================== --- lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (revision 1150855) +++ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (working copy) @@ -29,7 +29,7 @@ import java.util.Set; import java.util.Comparator; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.*; import org.apache.lucene.index.IndexReader.ReaderContext; import org.apache.lucene.store.Directory; @@ -272,14 +272,9 @@ */ @Override - public Document document(int n) throws IOException { + public Document document2(int n) throws IOException { return getIndex().getDocumentsByNumber()[n].getDocument(); } - - @Override - public org.apache.lucene.document2.Document document2(int n) throws IOException { - return getIndex().getDocumentsByNumber()[n].getDocument2(); - } @Override public void document(int docID, StoredFieldVisitor visitor) throws IOException { Index: lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java =================================================================== --- lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java (revision 1150855) +++ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java (working copy) @@ -471,7 +471,7 @@ // normalize settings per field name in document Map fieldSettingsByFieldName = new HashMap(); - for (IndexableField field : document.getDocument2()) { + for (IndexableField field : document.getDocument()) { FieldSetting fieldSetting = fieldSettingsByFieldName.get(field.name()); if (fieldSetting == null) { fieldSetting = new FieldSetting(); @@ -515,7 +515,7 @@ Map> tokensByField = new LinkedHashMap>(20); // tokenize indexed fields. - for (Iterator it = document.getDocument2().iterator(); it.hasNext();) { + for (Iterator it = document.getDocument().iterator(); it.hasNext();) { IndexableField field = it.next(); Index: lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java =================================================================== --- lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (revision 1150855) +++ lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (working copy) @@ -34,7 +34,7 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.FieldInvertState; Index: lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelectorVisitor.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelectorVisitor.java (revision 1150855) +++ lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelectorVisitor.java (working copy) @@ -213,7 +213,7 @@ return null; } - public NumericField.DataType getDataType() { + public org.apache.lucene.document2.NumericField.DataType getDataType() { return null; } Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorVisitor.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorVisitor.java (revision 1150855) +++ lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorVisitor.java (working copy) @@ -20,7 +20,7 @@ import java.io.Reader; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.document.NumericField.DataType; +import org.apache.lucene.document2.NumericField.DataType; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldReaderException; import org.apache.lucene.index.IndexReader; Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribFieldsReader.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribFieldsReader.java (revision 1150855) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribFieldsReader.java (working copy) @@ -22,14 +22,13 @@ import java.util.*; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.FieldSelector; -import org.apache.lucene.document.FieldSelectorResult; -import org.apache.lucene.document.FieldSelectorVisitor; -import org.apache.lucene.document.Fieldable; -import org.apache.lucene.document.LoadFirstFieldSelector; -import org.apache.lucene.document.SetBasedFieldSelector; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldSelector; +import org.apache.lucene.document2.FieldSelectorResult; +import org.apache.lucene.document2.FieldSelectorVisitor; +import org.apache.lucene.document2.LoadFirstFieldSelector; +import org.apache.lucene.document2.SetBasedFieldSelector; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; @@ -87,28 +86,28 @@ SetBasedFieldSelector fieldSelector = new SetBasedFieldSelector(loadFieldNames, lazyFieldNames); Document doc = getDocument(reader, 0, fieldSelector); assertTrue("doc is null and it shouldn't be", doc != null); - Fieldable field = doc.getFieldable(DocHelper.LAZY_FIELD_KEY); + IndexableField field = doc.getField(DocHelper.LAZY_FIELD_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("field is not lazy and it should be", field.isLazy()); + assertTrue("field is not lazy and it should be", ((Field) field).lazy()); String value = field.stringValue(); assertTrue("value is null and it shouldn't be", value != null); assertTrue(value + " is not equal to " + DocHelper.LAZY_FIELD_TEXT, value.equals(DocHelper.LAZY_FIELD_TEXT) == true); assertTrue("calling stringValue() twice should give same reference", field.stringValue() == field.stringValue()); - field = doc.getFieldable(DocHelper.TEXT_FIELD_1_KEY); + field = doc.getField(DocHelper.TEXT_FIELD_1_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("Field is lazy and it should not be", field.isLazy() == false); - field = doc.getFieldable(DocHelper.TEXT_FIELD_UTF1_KEY); + assertTrue("Field is lazy and it should not be", ((Field) field).lazy() == false); + field = doc.getField(DocHelper.TEXT_FIELD_UTF1_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("Field is lazy and it should not be", field.isLazy() == false); + assertTrue("Field is lazy and it should not be", ((Field) field).lazy() == false); assertTrue(field.stringValue() + " is not equal to " + DocHelper.FIELD_UTF1_TEXT, field.stringValue().equals(DocHelper.FIELD_UTF1_TEXT) == true); - field = doc.getFieldable(DocHelper.TEXT_FIELD_UTF2_KEY); + field = doc.getField(DocHelper.TEXT_FIELD_UTF2_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("Field is lazy and it should not be", field.isLazy() == true); + assertTrue("Field is lazy and it should not be", ((Field) field).lazy() == true); assertTrue(field.stringValue() + " is not equal to " + DocHelper.FIELD_UTF2_TEXT, field.stringValue().equals(DocHelper.FIELD_UTF2_TEXT) == true); - field = doc.getFieldable(DocHelper.LAZY_FIELD_BINARY_KEY); + field = doc.getField(DocHelper.LAZY_FIELD_BINARY_KEY); assertTrue("field is null and it shouldn't be", field != null); assertTrue("stringValue isn't null for lazy binary field", field.stringValue() == null); @@ -152,32 +151,32 @@ Document doc = getDocument(reader, 0, fieldSelector); assertTrue("doc is null and it shouldn't be", doc != null); - Fieldable field = doc.getFieldable(DocHelper.LAZY_FIELD_KEY); + IndexableField field = doc.getField(DocHelper.LAZY_FIELD_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("field is not lazy and it should be", field.isLazy()); + assertTrue("field is not lazy and it should be", ((Field) field).lazy()); String value = field.stringValue(); assertTrue("value is null and it shouldn't be", value != null); assertTrue(value + " is not equal to " + DocHelper.LAZY_FIELD_TEXT, value.equals(DocHelper.LAZY_FIELD_TEXT) == true); assertTrue("calling stringValue() twice should give different references", field.stringValue() != field.stringValue()); - field = doc.getFieldable(DocHelper.TEXT_FIELD_1_KEY); + field = doc.getField(DocHelper.TEXT_FIELD_1_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("Field is lazy and it should not be", field.isLazy() == false); + assertTrue("Field is lazy and it should not be", ((Field) field).lazy() == false); assertTrue("calling stringValue() twice should give same reference", field.stringValue() == field.stringValue()); - field = doc.getFieldable(DocHelper.TEXT_FIELD_UTF1_KEY); + field = doc.getField(DocHelper.TEXT_FIELD_UTF1_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("Field is lazy and it should not be", field.isLazy() == false); + assertTrue("Field is lazy and it should not be", ((Field) field).lazy() == false); assertTrue(field.stringValue() + " is not equal to " + DocHelper.FIELD_UTF1_TEXT, field.stringValue().equals(DocHelper.FIELD_UTF1_TEXT) == true); assertTrue("calling stringValue() twice should give same reference", field.stringValue() == field.stringValue()); - field = doc.getFieldable(DocHelper.TEXT_FIELD_UTF2_KEY); + field = doc.getField(DocHelper.TEXT_FIELD_UTF2_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("Field is lazy and it should not be", field.isLazy() == true); + assertTrue("Field is lazy and it should not be", ((Field) field).lazy() == true); assertTrue(field.stringValue() + " is not equal to " + DocHelper.FIELD_UTF2_TEXT, field.stringValue().equals(DocHelper.FIELD_UTF2_TEXT) == true); assertTrue("calling stringValue() twice should give different references", field.stringValue() != field.stringValue()); - field = doc.getFieldable(DocHelper.LAZY_FIELD_BINARY_KEY); + field = doc.getField(DocHelper.LAZY_FIELD_BINARY_KEY); assertTrue("field is null and it shouldn't be", field != null); assertTrue("stringValue isn't null for lazy binary field", field.stringValue() == null); assertTrue("calling binaryValue() twice should give different references", field.binaryValue(null).bytes != field.binaryValue(null).bytes); @@ -200,9 +199,9 @@ Document doc = getDocument(reader, 0, fieldSelector); assertTrue("doc is null and it shouldn't be", doc != null); int count = 0; - List l = doc.getFields(); - for (final Fieldable fieldable : l ) { - Field field = (Field) fieldable; + List l = doc.getFields(); + for (final IndexableField IndexableField : l ) { + Field field = (Field) IndexableField; assertTrue("field is null and it shouldn't be", field != null); String sv = field.stringValue(); @@ -244,11 +243,11 @@ IndexReader reader = IndexReader.open(tmpDir); Document doc; - doc = reader.document(0);//Load all of them + doc = reader.document2(0);//Load all of them assertTrue("doc is null and it shouldn't be", doc != null); - Fieldable field = doc.getFieldable(DocHelper.LARGE_LAZY_FIELD_KEY); + IndexableField field = doc.getField(DocHelper.LARGE_LAZY_FIELD_KEY); assertTrue("field is null and it shouldn't be", field != null); - assertTrue("field is lazy", field.isLazy() == false); + assertTrue("field is lazy", ((Field) field).lazy() == false); String value; long start; long finish; @@ -265,8 +264,8 @@ System.gc(); reader = IndexReader.open(tmpDir); doc = getDocument(reader, 0, fieldSelector); - field = doc.getFieldable(DocHelper.LARGE_LAZY_FIELD_KEY); - assertTrue("field is not lazy", field.isLazy() == true); + field = doc.getField(DocHelper.LARGE_LAZY_FIELD_KEY); + assertTrue("field is not lazy", ((Field) field).lazy() == true); start = System.currentTimeMillis(); //On my machine this took around 50 - 70ms value = field.stringValue(); @@ -298,12 +297,12 @@ return FieldSelectorResult.NO_LOAD; } }); - Fieldable f1 = doc.getFieldable(DocHelper.TEXT_FIELD_1_KEY); - Fieldable f3 = doc.getFieldable(DocHelper.TEXT_FIELD_3_KEY); - Fieldable fb = doc.getFieldable(DocHelper.LAZY_FIELD_BINARY_KEY); - assertTrue(f1.isBinary()); - assertTrue(!f3.isBinary()); - assertTrue(fb.isBinary()); + IndexableField f1 = doc.getField(DocHelper.TEXT_FIELD_1_KEY); + IndexableField f3 = doc.getField(DocHelper.TEXT_FIELD_3_KEY); + IndexableField fb = doc.getField(DocHelper.LAZY_FIELD_BINARY_KEY); + assertTrue(f1.binaryValue(null)!=null); + assertTrue(f3.binaryValue(null)==null); + assertTrue(fb.binaryValue(null)!=null); assertSizeEquals(2*DocHelper.FIELD_1_TEXT.length(), f1.binaryValue(null).bytes); assertEquals(DocHelper.FIELD_3_TEXT, f3.stringValue()); assertSizeEquals(DocHelper.LAZY_FIELD_BINARY_BYTES.length, fb.binaryValue(null).bytes); Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribIndexReader.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribIndexReader.java (revision 1150855) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribIndexReader.java (working copy) @@ -23,10 +23,10 @@ import java.util.Set; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Fieldable; -import org.apache.lucene.document.FieldSelector; -import org.apache.lucene.document.FieldSelectorVisitor; -import org.apache.lucene.document.SetBasedFieldSelector; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldSelector; +import org.apache.lucene.document2.FieldSelectorVisitor; +import org.apache.lucene.document2.SetBasedFieldSelector; import org.apache.lucene.document2.BinaryField; import org.apache.lucene.document2.Document; import org.apache.lucene.document2.FieldType; @@ -37,7 +37,7 @@ import org.apache.lucene.util.LuceneTestCase; public class TestContribIndexReader extends LuceneTestCase { - private org.apache.lucene.document.Document getDocument(IndexReader ir, int docID, FieldSelector selector) throws IOException { + private Document getDocument(IndexReader ir, int docID, FieldSelector selector) throws IOException { final FieldSelectorVisitor visitor = new FieldSelectorVisitor(selector); ir.document(docID, visitor); return visitor.getDocument(); @@ -137,11 +137,11 @@ writer.addDocument(doc); writer.close(); IndexReader reader = IndexReader.open(dir, false); - org.apache.lucene.document.Document doc2 = reader.document(reader.maxDoc() - 1); - org.apache.lucene.document.Field[] fields = doc2.getFields("bin1"); + Document doc2 = reader.document2(reader.maxDoc() - 1); + IndexableField[] fields = doc2.getFields("bin1"); assertNotNull(fields); assertEquals(1, fields.length); - org.apache.lucene.document.Field b1 = fields[0]; + Field b1 = (Field) fields[0]; assertTrue(b1.isBinary()); BytesRef bytesRef = b1.binaryValue(null); assertEquals(bin.length, bytesRef.length); @@ -152,11 +152,11 @@ lazyFields.add("bin1"); FieldSelector sel = new SetBasedFieldSelector(new HashSet(), lazyFields); doc2 = getDocument(reader, reader.maxDoc() - 1, sel); - Fieldable[] fieldables = doc2.getFieldables("bin1"); + IndexableField[] fieldables = doc2.getFields("bin1"); assertNotNull(fieldables); assertEquals(1, fieldables.length); - Fieldable fb1 = fieldables[0]; - assertTrue(fb1.isBinary()); + IndexableField fb1 = fieldables[0]; + assertTrue(fb1.binaryValue(null)!=null); bytesRef = fb1.binaryValue(null); assertEquals(bin.length, bytesRef.bytes.length); assertEquals(bin.length, bytesRef.length); @@ -171,11 +171,11 @@ writer.optimize(); writer.close(); reader = IndexReader.open(dir, false); - doc2 = reader.document(reader.maxDoc() - 1); + doc2 = reader.document2(reader.maxDoc() - 1); fields = doc2.getFields("bin1"); assertNotNull(fields); assertEquals(1, fields.length); - b1 = fields[0]; + b1 = (Field) fields[0]; assertTrue(b1.isBinary()); bytesRef = b1.binaryValue(null); assertEquals(bin.length, bytesRef.length); Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribParallelReader.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribParallelReader.java (revision 1150855) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribParallelReader.java (working copy) @@ -25,9 +25,9 @@ import org.apache.lucene.document2.Document; import org.apache.lucene.document2.FieldType; import org.apache.lucene.document2.TextField; -import org.apache.lucene.document.FieldSelector; -import org.apache.lucene.document.FieldSelectorVisitor; -import org.apache.lucene.document.MapFieldSelector; +import org.apache.lucene.document2.FieldSelector; +import org.apache.lucene.document2.FieldSelectorVisitor; +import org.apache.lucene.document2.MapFieldSelector; import org.apache.lucene.search.*; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; @@ -90,7 +90,7 @@ return newSearcher(pr); } - private org.apache.lucene.document.Document getDocument(IndexReader ir, int docID, FieldSelector selector) throws IOException { + private Document getDocument(IndexReader ir, int docID, FieldSelector selector) throws IOException { final FieldSelectorVisitor visitor = new FieldSelectorVisitor(selector); ir.document(docID, visitor); return visitor.getDocument(); @@ -103,9 +103,9 @@ pr.add(IndexReader.open(dir1, false)); pr.add(IndexReader.open(dir2, false)); - org.apache.lucene.document.Document doc11 = getDocument(pr, 0, new MapFieldSelector("f1")); - org.apache.lucene.document.Document doc24 = getDocument(pr, 1, new MapFieldSelector(Arrays.asList("f4"))); - org.apache.lucene.document.Document doc223 = getDocument(pr, 1, new MapFieldSelector("f2", "f3")); + Document doc11 = getDocument(pr, 0, new MapFieldSelector("f1")); + Document doc24 = getDocument(pr, 1, new MapFieldSelector(Arrays.asList("f4"))); + Document doc223 = getDocument(pr, 1, new MapFieldSelector("f2", "f3")); assertEquals(1, doc11.getFields().size()); assertEquals(1, doc24.getFields().size()); Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestLazyBug.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestLazyBug.java (revision 1150855) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestLazyBug.java (working copy) @@ -22,10 +22,9 @@ import java.util.Set; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Fieldable; -import org.apache.lucene.document.FieldSelector; -import org.apache.lucene.document.FieldSelectorResult; -import org.apache.lucene.document.FieldSelectorVisitor; +import org.apache.lucene.document2.FieldSelector; +import org.apache.lucene.document2.FieldSelectorResult; +import org.apache.lucene.document2.FieldSelectorVisitor; import org.apache.lucene.document2.Document; import org.apache.lucene.document2.TextField; import org.apache.lucene.store.Directory; @@ -108,12 +107,12 @@ for (int i = 0; i < docs.length; i++) { final FieldSelectorVisitor visitor = new FieldSelectorVisitor(SELECTOR); reader.document(docs[i], visitor); - org.apache.lucene.document.Document d = visitor.getDocument(); + Document d = visitor.getDocument(); d.get(MAGIC_FIELD); - List fields = d.getFields(); - for (Iterator fi = fields.iterator(); fi.hasNext(); ) { - Fieldable f=null; + List fields = d.getFields(); + for (Iterator fi = fields.iterator(); fi.hasNext(); ) { + IndexableField f=null; try { f = fi.next(); String fname = f.name(); Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java (revision 1150855) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java (working copy) @@ -77,7 +77,7 @@ IndexReader ir; ir = IndexReader.open(dirs[0], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); // rounding error - org.apache.lucene.document.Document doc = ir.document(0); + Document doc = ir.document2(0); assertEquals("0", doc.get("id")); TermsEnum te = MultiFields.getTerms(ir, "id").iterator(); assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seek(new BytesRef("1"))); @@ -85,7 +85,7 @@ ir.close(); ir = IndexReader.open(dirs[1], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - doc = ir.document(0); + doc = ir.document2(0); assertEquals("1", doc.get("id")); te = MultiFields.getTerms(ir, "id").iterator(); assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seek(new BytesRef("0"))); @@ -94,7 +94,7 @@ ir.close(); ir = IndexReader.open(dirs[2], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - doc = ir.document(0); + doc = ir.document2(0); assertEquals("2", doc.get("id")); te = MultiFields.getTerms(ir, "id").iterator(); @@ -122,19 +122,19 @@ IndexReader ir; ir = IndexReader.open(dirs[0], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - org.apache.lucene.document.Document doc = ir.document(0); + Document doc = ir.document2(0); assertEquals("0", doc.get("id")); int start = ir.numDocs(); ir.close(); ir = IndexReader.open(dirs[1], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - doc = ir.document(0); + doc = ir.document2(0); assertEquals(start + "", doc.get("id")); start += ir.numDocs(); ir.close(); ir = IndexReader.open(dirs[2], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - doc = ir.document(0); + doc = ir.document2(0); assertEquals(start + "", doc.get("id")); // make sure the deleted doc is not here TermsEnum te = MultiFields.getTerms(ir, "id").iterator(); Index: lucene/contrib/misc/src/test/org/apache/lucene/search/TestThreadSafe.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/search/TestThreadSafe.java (revision 1150855) +++ lucene/contrib/misc/src/test/org/apache/lucene/search/TestThreadSafe.java (working copy) @@ -22,10 +22,8 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexWriterConfig.OpenMode; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Fieldable; -import org.apache.lucene.document.FieldSelector; -import org.apache.lucene.document.FieldSelectorVisitor; import org.apache.lucene.document2.*; import java.util.Random; @@ -71,7 +69,7 @@ } - private org.apache.lucene.document.Document getDocument(IndexReader ir, int docID, FieldSelector selector) throws IOException { + private Document getDocument(IndexReader ir, int docID, FieldSelector selector) throws IOException { final FieldSelectorVisitor visitor = new FieldSelectorVisitor(selector); ir.document(docID, visitor); return visitor.getDocument(); @@ -79,21 +77,20 @@ void loadDoc(IndexReader ir) throws IOException { // beware of deleted docs in the future - org.apache.lucene.document.Document doc = getDocument(ir, rand.nextInt(ir.maxDoc()), - new org.apache.lucene.document.FieldSelector() { - public org.apache.lucene.document.FieldSelectorResult accept(String fieldName) { + Document doc = getDocument(ir, rand.nextInt(ir.maxDoc()), + new FieldSelector() { + public FieldSelectorResult accept(String fieldName) { switch(rand.nextInt(2)) { - case 0: return org.apache.lucene.document.FieldSelectorResult.LAZY_LOAD; - case 1: return org.apache.lucene.document.FieldSelectorResult.LOAD; + case 0: return FieldSelectorResult.LAZY_LOAD; + case 1: return FieldSelectorResult.LOAD; // TODO: add other options - default: return org.apache.lucene.document.FieldSelectorResult.LOAD; + default: return FieldSelectorResult.LOAD; } } } ); - List fields = doc.getFields(); - for (final Fieldable f : fields ) { + for (final IndexableField f : doc ) { validateField(f); } @@ -102,7 +99,7 @@ } - void validateField(Fieldable f) { + void validateField(IndexableField f) { String val = f.stringValue(); if (!val.startsWith("^") || !val.endsWith("$")) { throw new RuntimeException("Invalid field:" + f.toString() + " val=" +val); Index: lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java =================================================================== --- lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (revision 1150855) +++ lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (working copy) @@ -33,8 +33,9 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermFreqVector; import org.apache.lucene.search.BooleanClause; @@ -800,7 +801,7 @@ o.println(); ScoreDoc[] scoreDocs = hits.scoreDocs; for (int i = 0; i < Math.min(25, len); i++) { - Document d = searcher.doc(scoreDocs[i].doc); + Document d = searcher.doc2(scoreDocs[i].doc); String summary = d.get( "summary"); o.println("score : " + scoreDocs[i].score); o.println("url : " + d.get("url")); @@ -824,12 +825,12 @@ // field does not store term vector info if (vector == null) { - Document d=ir.document(docNum); - String text[]=d.getValues(fieldName); + Document d=ir.document2(docNum); + IndexableField text[]=d.getFields(fieldName); if(text!=null) { for (int j = 0; j < text.length; j++) { - addTermFrequencies(new StringReader(text[j]), termFreqMap, fieldName); + addTermFrequencies(new StringReader(text[j].stringValue()), termFreqMap, fieldName); } } } Index: lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java (revision 1150855) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java (working copy) @@ -131,13 +131,13 @@ TopDocs hits = searcher.search(query, chain, 1000); numHits = hits.totalHits; assertEquals(MAX / 2, numHits); - assertEquals("bob", searcher.doc(hits.scoreDocs[0].doc).get("owner")); + assertEquals("bob", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); chain = getChainedFilter(new Filter[] {bobFilter}, new int[] {ChainedFilter.ANDNOT}); hits = searcher.search(query, chain, 1000); numHits = hits.totalHits; assertEquals(MAX / 2, numHits); - assertEquals("sue", searcher.doc(hits.scoreDocs[0].doc).get("owner")); + assertEquals("sue", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); } public void testOR() throws Exception { @@ -154,7 +154,7 @@ TopDocs hits = searcher.search(query, chain, 1000); assertEquals("AND matches just bob", MAX / 2, hits.totalHits); - assertEquals("bob", searcher.doc(hits.scoreDocs[0].doc).get("owner")); + assertEquals("bob", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); } public void testXOR() throws Exception { @@ -163,7 +163,7 @@ TopDocs hits = searcher.search(query, chain, 1000); assertEquals("XOR matches sue", MAX / 2, hits.totalHits); - assertEquals("sue", searcher.doc(hits.scoreDocs[0].doc).get("owner")); + assertEquals("sue", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); } public void testANDNOT() throws Exception { @@ -174,7 +174,7 @@ TopDocs hits = searcher.search(query, chain, 1000); assertEquals("ANDNOT matches just bob", MAX / 2, hits.totalHits); - assertEquals("bob", searcher.doc(hits.scoreDocs[0].doc).get("owner")); + assertEquals("bob", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); chain = getChainedFilter( new Filter[]{bobFilter, bobFilter}, @@ -183,7 +183,7 @@ hits = searcher.search(query, chain, 1000); assertEquals("ANDNOT bob ANDNOT bob matches all sues", MAX / 2, hits.totalHits); - assertEquals("sue", searcher.doc(hits.scoreDocs[0].doc).get("owner")); + assertEquals("sue", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); } /* Index: lucene/contrib/queries/src/test/org/apache/lucene/search/TestSlowCollationMethods.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/TestSlowCollationMethods.java (revision 1150855) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/TestSlowCollationMethods.java (working copy) @@ -92,7 +92,7 @@ TopFieldDocs docs = searcher.search(new MatchAllDocsQuery(), null, numDocs, new Sort(sf)); String prev = ""; for (ScoreDoc doc : docs.scoreDocs) { - String value = reader.document(doc.doc).get("field"); + String value = reader.document2(doc.doc).get("field"); assertTrue(collator.compare(value, prev) >= 0); prev = value; } @@ -102,7 +102,7 @@ // positive test TopDocs docs = searcher.search(query, numDocs); for (ScoreDoc doc : docs.scoreDocs) { - String value = reader.document(doc.doc).get("field"); + String value = reader.document2(doc.doc).get("field"); assertTrue(collator.compare(value, startPoint) >= 0); assertTrue(collator.compare(value, endPoint) <= 0); } @@ -113,7 +113,7 @@ bq.add(query, Occur.MUST_NOT); docs = searcher.search(bq, numDocs); for (ScoreDoc doc : docs.scoreDocs) { - String value = reader.document(doc.doc).get("field"); + String value = reader.document2(doc.doc).get("field"); assertTrue(collator.compare(value, startPoint) < 0 || collator.compare(value, endPoint) > 0); } } Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/StandardQueryParser.java =================================================================== --- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/StandardQueryParser.java (revision 1150855) +++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/StandardQueryParser.java (working copy) @@ -22,7 +22,7 @@ import java.util.TooManyListenersException; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document.DateTools; +import org.apache.lucene.document2.DateTools; import org.apache.lucene.queryParser.core.QueryNodeException; import org.apache.lucene.queryParser.core.QueryParserHelper; import org.apache.lucene.queryParser.core.config.QueryConfigHandler; Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DateResolutionAttribute.java =================================================================== --- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DateResolutionAttribute.java (revision 1150855) +++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DateResolutionAttribute.java (working copy) @@ -17,8 +17,8 @@ * limitations under the License. */ -import org.apache.lucene.document.DateTools; -import org.apache.lucene.document.DateTools.Resolution; +import org.apache.lucene.document2.DateTools; +import org.apache.lucene.document2.DateTools.Resolution; import org.apache.lucene.queryParser.core.config.QueryConfigHandler; import org.apache.lucene.queryParser.standard.nodes.RangeQueryNode; import org.apache.lucene.queryParser.standard.processors.ParametricRangeQueryNodeProcessor; Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DateResolutionAttributeImpl.java =================================================================== --- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DateResolutionAttributeImpl.java (revision 1150855) +++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DateResolutionAttributeImpl.java (working copy) @@ -17,8 +17,8 @@ * limitations under the License. */ -import org.apache.lucene.document.DateTools; -import org.apache.lucene.document.DateTools.Resolution; +import org.apache.lucene.document2.DateTools; +import org.apache.lucene.document2.DateTools.Resolution; import org.apache.lucene.queryParser.core.config.QueryConfigHandler; import org.apache.lucene.queryParser.standard.processors.ParametricRangeQueryNodeProcessor; import org.apache.lucene.util.AttributeImpl; Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionFCListener.java =================================================================== --- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionFCListener.java (revision 1150855) +++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionFCListener.java (working copy) @@ -17,7 +17,7 @@ * limitations under the License. */ -import org.apache.lucene.document.DateTools; +import org.apache.lucene.document2.DateTools; import org.apache.lucene.queryParser.core.config.FieldConfig; import org.apache.lucene.queryParser.core.config.FieldConfigListener; import org.apache.lucene.queryParser.core.config.QueryConfigHandler; Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttribute.java =================================================================== --- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttribute.java (revision 1150855) +++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttribute.java (working copy) @@ -19,7 +19,7 @@ import java.util.Map; -import org.apache.lucene.document.DateTools; +import org.apache.lucene.document2.DateTools; import org.apache.lucene.util.Attribute; /** Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttributeImpl.java =================================================================== --- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttributeImpl.java (revision 1150855) +++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttributeImpl.java (working copy) @@ -20,8 +20,8 @@ import java.util.HashMap; import java.util.Map; -import org.apache.lucene.document.DateTools; -import org.apache.lucene.document.DateTools.Resolution; +import org.apache.lucene.document2.DateTools; +import org.apache.lucene.document2.DateTools.Resolution; import org.apache.lucene.util.AttributeImpl; /** Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/ParametricRangeQueryNodeProcessor.java =================================================================== --- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/ParametricRangeQueryNodeProcessor.java (revision 1150855) +++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/ParametricRangeQueryNodeProcessor.java (working copy) @@ -23,8 +23,8 @@ import java.util.List; import java.util.Locale; -import org.apache.lucene.document.DateTools; -import org.apache.lucene.document.DateTools.Resolution; +import org.apache.lucene.document2.DateTools; +import org.apache.lucene.document2.DateTools.Resolution; import org.apache.lucene.queryParser.core.QueryNodeException; import org.apache.lucene.queryParser.core.config.FieldConfig; import org.apache.lucene.queryParser.core.config.QueryConfigHandler; Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java =================================================================== --- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java (revision 1150855) +++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java (working copy) @@ -34,7 +34,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document.DateTools; +import org.apache.lucene.document2.DateTools; import org.apache.lucene.queryParser.TestQueryParser; import org.apache.lucene.queryParser.core.QueryNodeException; import org.apache.lucene.queryParser.core.QueryNodeParseException; Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java =================================================================== --- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java (revision 1150855) +++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java (working copy) @@ -39,7 +39,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.document.DateTools; +import org.apache.lucene.document2.DateTools; import org.apache.lucene.document2.Document; import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriter; Index: lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java =================================================================== --- lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java (revision 1150855) +++ lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java (working copy) @@ -30,8 +30,9 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.search.BooleanClause; @@ -147,11 +148,11 @@ @Override public void collect(int doc) throws IOException { - Document d = reader.document(doc); - String[] values = d.getValues( Syns2Index.F_SYN); + Document d = reader.document2(doc); + IndexableField[] values = d.getFields( Syns2Index.F_SYN); for ( int j = 0; j < values.length; j++) { - String syn = values[ j]; + String syn = values[ j].stringValue(); if ( already.add( syn)) // avoid dups of top level words and synonyms { TermQuery tq = new TermQuery( new Term( field, syn)); Index: lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynLookup.java =================================================================== --- lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynLookup.java (revision 1150855) +++ lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynLookup.java (working copy) @@ -29,8 +29,9 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.search.BooleanClause; @@ -90,12 +91,12 @@ ScoreDoc[] hits = searcher.search(query, countingCollector.numHits).scoreDocs; for (int i = 0; i < hits.length; i++) { - Document doc = searcher.doc(hits[i].doc); + Document doc = searcher.doc2(hits[i].doc); - String[] values = doc.getValues(Syns2Index.F_SYN); + IndexableField[] values = doc.getFields(Syns2Index.F_SYN); for (int j = 0; j < values.length; j++) { - System.out.println(values[j]); + System.out.println(values[j].stringValue()); } } @@ -154,11 +155,11 @@ @Override public void collect(int doc) throws IOException { - Document d = reader.document(doc); - String[] values = d.getValues( Syns2Index.F_SYN); + Document d = reader.document2(doc); + IndexableField[] values = d.getFields( Syns2Index.F_SYN); for ( int j = 0; j < values.length; j++) { - String syn = values[ j]; + String syn = values[ j].stringValue(); if ( already.add( syn)) { TermQuery tq = new TermQuery( new Term( field, syn)); Index: lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java =================================================================== --- lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java (revision 1150855) +++ lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java (working copy) @@ -32,8 +32,10 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.StringField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.TieredMergePolicy; @@ -260,7 +262,7 @@ int n = index(word2Nums, num2Words, g, doc); if (n > 0) { - doc.add( new Field( F_WORD, g, Field.Store.YES, Field.Index.NOT_ANALYZED)); + doc.add( new Field( F_WORD, StringField.TYPE_STORED, g)); if ((++row % mod) == 0) { o.println("\trow=" + row + "/" + word2Nums.size() + " doc= " + doc); @@ -305,7 +307,9 @@ continue; } num++; - doc.add( new Field( F_SYN, cur, Field.Store.YES, Field.Index.NO)); + FieldType ft = new FieldType(); + ft.setStored(true); + doc.add( new Field( F_SYN, ft, cur)); } return num; } Index: lucene/contrib/xml-query-parser/src/demo/java/org/apache/lucene/xmlparser/webdemo/FormBasedXmlQueryDemo.java =================================================================== --- lucene/contrib/xml-query-parser/src/demo/java/org/apache/lucene/xmlparser/webdemo/FormBasedXmlQueryDemo.java (revision 1150855) +++ lucene/contrib/xml-query-parser/src/demo/java/org/apache/lucene/xmlparser/webdemo/FormBasedXmlQueryDemo.java (working copy) @@ -34,8 +34,8 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; Index: lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java =================================================================== --- lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/analysis/NumericTokenStream.java (working copy) @@ -22,7 +22,7 @@ import org.apache.lucene.util.AttributeReflector; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; -import org.apache.lucene.document.NumericField; // for javadocs +import org.apache.lucene.document2.NumericField; // for javadocs import org.apache.lucene.search.NumericRangeQuery; // for javadocs import org.apache.lucene.search.NumericRangeFilter; // for javadocs import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; Index: lucene/src/java/org/apache/lucene/analysis/TokenStream.java =================================================================== --- lucene/src/java/org/apache/lucene/analysis/TokenStream.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/analysis/TokenStream.java (working copy) @@ -21,8 +21,8 @@ import java.io.Closeable; import java.lang.reflect.Modifier; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.util.Attribute; import org.apache.lucene.util.AttributeImpl; Index: lucene/src/java/org/apache/lucene/document/Document.java =================================================================== --- lucene/src/java/org/apache/lucene/document/Document.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/document/Document.java (working copy) @@ -102,7 +102,7 @@ return field instanceof NumericField; } - public NumericField.DataType numericDataType() { + public org.apache.lucene.document2.NumericField.DataType numericDataType() { return field.getDataType(); } Index: lucene/src/java/org/apache/lucene/document/Field.java =================================================================== --- lucene/src/java/org/apache/lucene/document/Field.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/document/Field.java (working copy) @@ -20,6 +20,7 @@ import java.io.Reader; import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.document2.NumericField.DataType; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.util.StringHelper; @@ -279,7 +280,7 @@ return null; } - public NumericField.DataType getDataType() { + public DataType getDataType() { return null; } Index: lucene/src/java/org/apache/lucene/document/Fieldable.java =================================================================== --- lucene/src/java/org/apache/lucene/document/Fieldable.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/document/Fieldable.java (working copy) @@ -19,6 +19,7 @@ import java.io.Reader; import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.document2.NumericField.DataType; import org.apache.lucene.index.FieldInvertState; // for javadocs import org.apache.lucene.search.PhraseQuery; // for javadocs import org.apache.lucene.search.spans.SpanQuery; // for javadocs @@ -171,7 +172,7 @@ // nocommit api break abstract BytesRef binaryValue(BytesRef reuse); - abstract NumericField.DataType getDataType(); + abstract DataType getDataType(); abstract Number getNumericValue(); Index: lucene/src/java/org/apache/lucene/document/NumericField.java =================================================================== --- lucene/src/java/org/apache/lucene/document/NumericField.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/document/NumericField.java (working copy) @@ -21,6 +21,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.NumericTokenStream; +import org.apache.lucene.document2.NumericField.DataType; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.search.NumericRangeQuery; // javadocs import org.apache.lucene.search.NumericRangeFilter; // javadocs @@ -134,10 +135,8 @@ /** Data type of the value in {@link NumericField}. * @since 3.2 */ - public static enum DataType { INT, LONG, FLOAT, DOUBLE } - private transient NumericTokenStream numericTS; - private DataType type; + private org.apache.lucene.document2.NumericField.DataType type; private final int precisionStep; /** @@ -251,7 +250,7 @@ /** Returns the data type of the current value, {@code null} if not yet set. * @since 3.2 */ - public DataType getDataType() { + public org.apache.lucene.document2.NumericField.DataType getDataType() { return type; } Index: lucene/src/java/org/apache/lucene/document2/CompressionTools.java =================================================================== --- lucene/src/java/org/apache/lucene/document2/CompressionTools.java (revision 0) +++ lucene/src/java/org/apache/lucene/document2/CompressionTools.java (revision 0) @@ -0,0 +1,127 @@ +package org.apache.lucene.document2; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.zip.Deflater; +import java.util.zip.Inflater; +import java.util.zip.DataFormatException; +import java.io.ByteArrayOutputStream; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.CharsRef; +import org.apache.lucene.util.UnicodeUtil; + +/** Simple utility class providing static methods to + * compress and decompress binary data for stored fields. + * This class uses java.util.zip.Deflater and Inflater + * classes to compress and decompress. + */ + +public class CompressionTools { + + // Export only static methods + private CompressionTools() {} + + /** Compresses the specified byte range using the + * specified compressionLevel (constants are defined in + * java.util.zip.Deflater). */ + public static byte[] compress(byte[] value, int offset, int length, int compressionLevel) { + + /* Create an expandable byte array to hold the compressed data. + * You cannot use an array that's the same size as the orginal because + * there is no guarantee that the compressed data will be smaller than + * the uncompressed data. */ + ByteArrayOutputStream bos = new ByteArrayOutputStream(length); + + Deflater compressor = new Deflater(); + + try { + compressor.setLevel(compressionLevel); + compressor.setInput(value, offset, length); + compressor.finish(); + + // Compress the data + final byte[] buf = new byte[1024]; + while (!compressor.finished()) { + int count = compressor.deflate(buf); + bos.write(buf, 0, count); + } + } finally { + compressor.end(); + } + + return bos.toByteArray(); + } + + /** Compresses the specified byte range, with default BEST_COMPRESSION level */ + public static byte[] compress(byte[] value, int offset, int length) { + return compress(value, offset, length, Deflater.BEST_COMPRESSION); + } + + /** Compresses all bytes in the array, with default BEST_COMPRESSION level */ + public static byte[] compress(byte[] value) { + return compress(value, 0, value.length, Deflater.BEST_COMPRESSION); + } + + /** Compresses the String value, with default BEST_COMPRESSION level */ + public static byte[] compressString(String value) { + return compressString(value, Deflater.BEST_COMPRESSION); + } + + /** Compresses the String value using the specified + * compressionLevel (constants are defined in + * java.util.zip.Deflater). */ + public static byte[] compressString(String value, int compressionLevel) { + BytesRef result = new BytesRef(); + UnicodeUtil.UTF16toUTF8(value, 0, value.length(), result); + return compress(result.bytes, 0, result.length, compressionLevel); + } + + /** Decompress the byte array previously returned by + * compress */ + public static byte[] decompress(byte[] value) throws DataFormatException { + // Create an expandable byte array to hold the decompressed data + ByteArrayOutputStream bos = new ByteArrayOutputStream(value.length); + + Inflater decompressor = new Inflater(); + + try { + decompressor.setInput(value); + + // Decompress the data + final byte[] buf = new byte[1024]; + while (!decompressor.finished()) { + int count = decompressor.inflate(buf); + bos.write(buf, 0, count); + } + } finally { + decompressor.end(); + } + + return bos.toByteArray(); + } + + /** Decompress the byte array previously returned by + * compressString back into a String */ + public static String decompressString(byte[] value) throws DataFormatException { + final byte[] bytes = decompress(value); + CharsRef result = new CharsRef(bytes.length); + UnicodeUtil.UTF8toUTF16(bytes, 0, bytes.length, result); + return new String(result.chars, 0, result.length); + } +} Index: lucene/src/java/org/apache/lucene/document2/Field.java =================================================================== --- lucene/src/java/org/apache/lucene/document2/Field.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/document2/Field.java (working copy) @@ -20,7 +20,7 @@ import java.io.Reader; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.document.NumericField; +import org.apache.lucene.document2.NumericField; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.StringHelper; @@ -94,10 +94,20 @@ } public Field(String name, boolean internName, FieldType type, String value) { - if (name == null) + if (name == null) { throw new IllegalArgumentException("name cannot be null"); - if (value == null) + } + if (value == null) { throw new IllegalArgumentException("value cannot be null"); + } + if (!type.stored() && !type.indexed()) { + throw new IllegalArgumentException("it doesn't make sense to have a field that " + + "is neither indexed nor stored"); + } + if (!type.indexed() && !type.tokenized() && (type.storeTermVectors())) { + throw new IllegalArgumentException("cannot store term vector information " + + "for a field that is not indexed"); + } this.type = type; this.name = name; Index: lucene/src/java/org/apache/lucene/document2/NumericField.java =================================================================== --- lucene/src/java/org/apache/lucene/document2/NumericField.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/document2/NumericField.java (working copy) @@ -21,7 +21,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.NumericTokenStream; -import org.apache.lucene.document.NumericField.DataType; +import org.apache.lucene.document2.NumericField.DataType; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.search.NumericRangeQuery; // javadocs import org.apache.lucene.search.NumericRangeFilter; // javadocs @@ -134,17 +134,10 @@ */ public final class NumericField extends Field { - /** - * Data type of the value in {@link NumericField}. - * + /** Data type of the value in {@link NumericField}. * @since 3.2 */ - - /* - public static enum DataType { - INT, LONG, FLOAT, DOUBLE - } - */ + public static enum DataType { INT, LONG, FLOAT, DOUBLE } public static final FieldType TYPE_UNSTORED = new FieldType(); public static final FieldType TYPE_STORED = new FieldType(); Index: lucene/src/java/org/apache/lucene/index/CheckIndex.java =================================================================== --- lucene/src/java/org/apache/lucene/index/CheckIndex.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/index/CheckIndex.java (working copy) @@ -23,8 +23,7 @@ import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IndexInput; -import org.apache.lucene.document.AbstractField; // for javadocs -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.index.codecs.DefaultSegmentInfosWriter; import org.apache.lucene.util.Bits; @@ -898,7 +897,7 @@ for (int j = 0; j < info.docCount; ++j) { if (delDocs == null || !delDocs.get(j)) { status.docCount++; - Document doc = reader.document(j); + Document doc = reader.document2(j); status.totFields += doc.getFields().size(); } } Index: lucene/src/java/org/apache/lucene/index/IndexReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/IndexReader.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/index/IndexReader.java (working copy) @@ -26,7 +26,7 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.codecs.Codec; import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.search.FieldCache; // javadocs @@ -974,12 +974,14 @@ * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ + /* public Document document(int docID) throws CorruptIndexException, IOException { ensureOpen(); final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(); document(docID, visitor); return visitor.getDocument(); } + */ public org.apache.lucene.document2.Document document2(int docID) throws CorruptIndexException, IOException { ensureOpen(); Index: lucene/src/java/org/apache/lucene/index/IndexableField.java =================================================================== --- lucene/src/java/org/apache/lucene/index/IndexableField.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/index/IndexableField.java (working copy) @@ -20,7 +20,8 @@ import java.io.Reader; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.document.NumericField; +import org.apache.lucene.document2.NumericField; +import org.apache.lucene.document2.NumericField.DataType; import org.apache.lucene.util.BytesRef; // nocommit jdocs @@ -66,7 +67,7 @@ // Numeric field: public boolean numeric(); - public NumericField.DataType numericDataType(); + public DataType numericDataType(); public Number numericValue(); // If this returns true then we index this field: Index: lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java =================================================================== --- lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java (working copy) @@ -23,11 +23,9 @@ import java.util.Map; import java.util.Map.Entry; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.Fieldable; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockObtainFailedException; @@ -70,13 +68,13 @@ int numDocs = r.numDocs(); // index is allowed to have exactly one document or 0. if (numDocs == 1) { - Document doc = r.document(r.maxDoc() - 1); - Field sid = doc.getField(SNAPSHOTS_ID); + Document doc = r.document2(r.maxDoc() - 1); + Field sid = (Field) doc.getField(SNAPSHOTS_ID); if (sid == null) { throw new IllegalStateException("directory is not a valid snapshots store!"); } doc.removeField(SNAPSHOTS_ID); - for (Fieldable f : doc.getFields()) { + for (IndexableField f : doc) { snapshots.put(f.name(), f.stringValue()); } } else if (numDocs != 0) { @@ -189,12 +187,14 @@ private void persistSnapshotInfos(String id, String segment) throws IOException { writer.deleteAll(); Document d = new Document(); - d.add(new Field(SNAPSHOTS_ID, "", Store.YES, Index.NO)); + FieldType ft = new FieldType(); + ft.setStored(true); + d.add(new Field(SNAPSHOTS_ID, ft, "")); for (Entry e : super.getSnapshots().entrySet()) { - d.add(new Field(e.getKey(), e.getValue(), Store.YES, Index.NO)); + d.add(new Field(e.getKey(), ft, e.getValue())); } if (id != null) { - d.add(new Field(id, segment, Store.YES, Index.NO)); + d.add(new Field(id, ft, segment)); } writer.addDocument(d); writer.commit(); Index: lucene/src/java/org/apache/lucene/index/SegmentMerger.java =================================================================== --- lucene/src/java/org/apache/lucene/index/SegmentMerger.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/index/SegmentMerger.java (working copy) @@ -23,7 +23,7 @@ import java.util.Collection; import java.util.List; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader.FieldOption; import org.apache.lucene.index.MergePolicy.MergeAbortedException; import org.apache.lucene.index.codecs.Codec; @@ -316,7 +316,7 @@ // on the fly? // NOTE: it's very important to first assign to doc then pass it to // termVectorsWriter.addAllDocVectors; see LUCENE-1282 - Document doc = reader.document(j); + Document doc = reader.document2(j); fieldsWriter.addDocument(doc, fieldInfos); docCount++; checkAbort.work(300); @@ -343,7 +343,7 @@ for (; docCount < maxDoc; docCount++) { // NOTE: it's very important to first assign to doc then pass it to // termVectorsWriter.addAllDocVectors; see LUCENE-1282 - Document doc = reader.document(docCount); + Document doc = reader.document2(docCount); fieldsWriter.addDocument(doc, fieldInfos); checkAbort.work(300); } Index: lucene/src/java/org/apache/lucene/index/StoredFieldVisitor.java =================================================================== --- lucene/src/java/org/apache/lucene/index/StoredFieldVisitor.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/index/StoredFieldVisitor.java (working copy) @@ -19,7 +19,7 @@ import java.io.IOException; -import org.apache.lucene.document.Document; // javadocs +import org.apache.lucene.document2.Document; // javadocs import org.apache.lucene.store.IndexInput; /** Index: lucene/src/java/org/apache/lucene/queryParser/QueryParser.java =================================================================== --- lucene/src/java/org/apache/lucene/queryParser/QueryParser.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/queryParser/QueryParser.java (working copy) @@ -7,7 +7,7 @@ import java.util.Locale; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document.DateTools; +import org.apache.lucene.document2.DateTools; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; Index: lucene/src/java/org/apache/lucene/queryParser/QueryParser.jj =================================================================== --- lucene/src/java/org/apache/lucene/queryParser/QueryParser.jj (revision 1150855) +++ lucene/src/java/org/apache/lucene/queryParser/QueryParser.jj (working copy) @@ -31,7 +31,7 @@ import java.util.Locale; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document.DateTools; +import org.apache.lucene.document2.DateTools; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; Index: lucene/src/java/org/apache/lucene/queryParser/QueryParserBase.java =================================================================== --- lucene/src/java/org/apache/lucene/queryParser/QueryParserBase.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/queryParser/QueryParserBase.java (working copy) @@ -28,7 +28,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; -import org.apache.lucene.document.DateTools; +import org.apache.lucene.document2.DateTools; import org.apache.lucene.index.Term; import org.apache.lucene.queryParser.QueryParser.Operator; import org.apache.lucene.search.*; Index: lucene/src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java =================================================================== --- lucene/src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java (working copy) @@ -5,7 +5,7 @@ import java.util.List; import java.util.Locale; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document.DateTools; +import org.apache.lucene.document2.DateTools; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; Index: lucene/src/java/org/apache/lucene/search/FieldCache.java =================================================================== --- lucene/src/java/org/apache/lucene/search/FieldCache.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/search/FieldCache.java (working copy) @@ -24,7 +24,7 @@ import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.RamUsageEstimator; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.document.NumericField; // for javadocs +import org.apache.lucene.document2.NumericField; // for javadocs import org.apache.lucene.analysis.NumericTokenStream; // for javadocs import org.apache.lucene.util.packed.PackedInts; Index: lucene/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java =================================================================== --- lucene/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java (working copy) @@ -24,7 +24,7 @@ import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.document.NumericField; // for javadocs +import org.apache.lucene.document2.NumericField; // for javadocs /** * A range filter built on top of a cached single term field (in {@link FieldCache}). Index: lucene/src/java/org/apache/lucene/search/IndexSearcher.java =================================================================== --- lucene/src/java/org/apache/lucene/search/IndexSearcher.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/search/IndexSearcher.java (working copy) @@ -29,7 +29,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexReader.ReaderContext; @@ -239,9 +239,11 @@ } /* Sugar for .getIndexReader().document(docID) */ + /* public Document doc(int docID) throws CorruptIndexException, IOException { return reader.document(docID); } + */ public org.apache.lucene.document2.Document doc2(int docID) throws CorruptIndexException, IOException { return reader.document2(docID); Index: lucene/src/java/org/apache/lucene/util/NumericUtils.java =================================================================== --- lucene/src/java/org/apache/lucene/util/NumericUtils.java (revision 1150855) +++ lucene/src/java/org/apache/lucene/util/NumericUtils.java (working copy) @@ -18,7 +18,7 @@ */ import org.apache.lucene.analysis.NumericTokenStream; -import org.apache.lucene.document.NumericField; +import org.apache.lucene.document2.NumericField; import org.apache.lucene.search.NumericRangeFilter; import org.apache.lucene.search.NumericRangeQuery; // for javadocs Index: lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java (revision 1150855) +++ lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java (working copy) @@ -23,7 +23,7 @@ import junit.framework.Assert; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexWriter; Index: lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (revision 1150855) +++ lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (working copy) @@ -1073,7 +1073,7 @@ newType.setStored(true); // randomly store it } - if (!newType.storeTermVectors()) { + if (newType.indexed() && !newType.storeTermVectors()) { newType.setStoreTermVectors(random.nextBoolean()); } Index: lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java =================================================================== --- lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java (working copy) @@ -1,107 +0,0 @@ -package org.apache.lucene.document; - -import org.apache.lucene.util.LuceneTestCase; - -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.store.Directory; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Tests {@link Document} class. - */ -public class TestBinaryDocument extends LuceneTestCase { - - String binaryValStored = "this text will be stored as a byte array in the index"; - String binaryValCompressed = "this text will be also stored and compressed as a byte array in the index"; - - public void testBinaryFieldInIndex() - throws Exception - { - Fieldable binaryFldStored = new Field("binaryStored", binaryValStored.getBytes()); - Fieldable stringFldStored = new Field("stringStored", binaryValStored, Field.Store.YES, Field.Index.NO, Field.TermVector.NO); - - Document doc = new Document(); - - doc.add(binaryFldStored); - - doc.add(stringFldStored); - - /** test for field count */ - assertEquals(2, doc.fields.size()); - - /** add the doc to a ram index */ - Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); - writer.addDocument(doc); - - /** open a reader and fetch the document */ - IndexReader reader = writer.getReader(); - Document docFromReader = reader.document(0); - assertTrue(docFromReader != null); - - /** fetch the binary stored field and compare it's content with the original one */ - String binaryFldStoredTest = new String(docFromReader.getBinaryValue("binaryStored")); - assertTrue(binaryFldStoredTest.equals(binaryValStored)); - - /** fetch the string field and compare it's content with the original one */ - String stringFldStoredTest = docFromReader.get("stringStored"); - assertTrue(stringFldStoredTest.equals(binaryValStored)); - - writer.close(); - reader.close(); - - reader = IndexReader.open(dir, false); - /** delete the document from index */ - reader.deleteDocument(0); - assertEquals(0, reader.numDocs()); - - reader.close(); - dir.close(); - } - - public void testCompressionTools() throws Exception { - Fieldable binaryFldCompressed = new Field("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes())); - Fieldable stringFldCompressed = new Field("stringCompressed", CompressionTools.compressString(binaryValCompressed)); - - Document doc = new Document(); - - doc.add(binaryFldCompressed); - doc.add(stringFldCompressed); - - /** add the doc to a ram index */ - Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); - writer.addDocument(doc); - - /** open a reader and fetch the document */ - IndexReader reader = writer.getReader(); - Document docFromReader = reader.document(0); - assertTrue(docFromReader != null); - - /** fetch the binary compressed field and compare it's content with the original one */ - String binaryFldCompressedTest = new String(CompressionTools.decompress(docFromReader.getBinaryValue("binaryCompressed"))); - assertTrue(binaryFldCompressedTest.equals(binaryValCompressed)); - assertTrue(CompressionTools.decompressString(docFromReader.getBinaryValue("stringCompressed")).equals(binaryValCompressed)); - - writer.close(); - reader.close(); - dir.close(); - } -} Index: lucene/src/test/org/apache/lucene/document/TestDateTools.java =================================================================== --- lucene/src/test/org/apache/lucene/document/TestDateTools.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/document/TestDateTools.java (working copy) @@ -1,199 +0,0 @@ -package org.apache.lucene.document; - -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.Calendar; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.TimeZone; -import java.util.Locale; - -import org.apache.lucene.util.LuceneTestCase; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -public class TestDateTools extends LuceneTestCase { - - public void testStringToDate() throws ParseException { - - Date d = null; - d = DateTools.stringToDate("2004"); - assertEquals("2004-01-01 00:00:00:000", isoFormat(d)); - d = DateTools.stringToDate("20040705"); - assertEquals("2004-07-05 00:00:00:000", isoFormat(d)); - d = DateTools.stringToDate("200407050910"); - assertEquals("2004-07-05 09:10:00:000", isoFormat(d)); - d = DateTools.stringToDate("20040705091055990"); - assertEquals("2004-07-05 09:10:55:990", isoFormat(d)); - - try { - d = DateTools.stringToDate("97"); // no date - fail(); - } catch(ParseException e) { /* expected exception */ } - try { - d = DateTools.stringToDate("200401011235009999"); // no date - fail(); - } catch(ParseException e) { /* expected exception */ } - try { - d = DateTools.stringToDate("aaaa"); // no date - fail(); - } catch(ParseException e) { /* expected exception */ } - - } - - public void testStringtoTime() throws ParseException { - long time = DateTools.stringToTime("197001010000"); - Calendar cal = new GregorianCalendar(); - cal.clear(); - cal.set(1970, 0, 1, // year=1970, month=january, day=1 - 0, 0, 0); // hour, minute, second - cal.set(Calendar.MILLISECOND, 0); - cal.setTimeZone(TimeZone.getTimeZone("GMT")); - assertEquals(cal.getTime().getTime(), time); - cal.set(1980, 1, 2, // year=1980, month=february, day=2 - 11, 5, 0); // hour, minute, second - cal.set(Calendar.MILLISECOND, 0); - time = DateTools.stringToTime("198002021105"); - assertEquals(cal.getTime().getTime(), time); - } - - public void testDateAndTimetoString() throws ParseException { - Calendar cal = new GregorianCalendar(); - cal.clear(); - cal.setTimeZone(TimeZone.getTimeZone("GMT")); - cal.set(2004, 1, 3, // year=2004, month=february(!), day=3 - 22, 8, 56); // hour, minute, second - cal.set(Calendar.MILLISECOND, 333); - - String dateString; - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.YEAR); - assertEquals("2004", dateString); - assertEquals("2004-01-01 00:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MONTH); - assertEquals("200402", dateString); - assertEquals("2004-02-01 00:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.DAY); - assertEquals("20040203", dateString); - assertEquals("2004-02-03 00:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.HOUR); - assertEquals("2004020322", dateString); - assertEquals("2004-02-03 22:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MINUTE); - assertEquals("200402032208", dateString); - assertEquals("2004-02-03 22:08:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.SECOND); - assertEquals("20040203220856", dateString); - assertEquals("2004-02-03 22:08:56:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MILLISECOND); - assertEquals("20040203220856333", dateString); - assertEquals("2004-02-03 22:08:56:333", isoFormat(DateTools.stringToDate(dateString))); - - // date before 1970: - cal.set(1961, 2, 5, // year=1961, month=march(!), day=5 - 23, 9, 51); // hour, minute, second - cal.set(Calendar.MILLISECOND, 444); - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MILLISECOND); - assertEquals("19610305230951444", dateString); - assertEquals("1961-03-05 23:09:51:444", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.HOUR); - assertEquals("1961030523", dateString); - assertEquals("1961-03-05 23:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - // timeToString: - cal.set(1970, 0, 1, // year=1970, month=january, day=1 - 0, 0, 0); // hour, minute, second - cal.set(Calendar.MILLISECOND, 0); - dateString = DateTools.timeToString(cal.getTime().getTime(), - DateTools.Resolution.MILLISECOND); - assertEquals("19700101000000000", dateString); - - cal.set(1970, 0, 1, // year=1970, month=january, day=1 - 1, 2, 3); // hour, minute, second - cal.set(Calendar.MILLISECOND, 0); - dateString = DateTools.timeToString(cal.getTime().getTime(), - DateTools.Resolution.MILLISECOND); - assertEquals("19700101010203000", dateString); - } - - public void testRound() { - Calendar cal = new GregorianCalendar(); - cal.clear(); - cal.setTimeZone(TimeZone.getTimeZone("GMT")); - cal.set(2004, 1, 3, // year=2004, month=february(!), day=3 - 22, 8, 56); // hour, minute, second - cal.set(Calendar.MILLISECOND, 333); - Date date = cal.getTime(); - assertEquals("2004-02-03 22:08:56:333", isoFormat(date)); - - Date dateYear = DateTools.round(date, DateTools.Resolution.YEAR); - assertEquals("2004-01-01 00:00:00:000", isoFormat(dateYear)); - - Date dateMonth = DateTools.round(date, DateTools.Resolution.MONTH); - assertEquals("2004-02-01 00:00:00:000", isoFormat(dateMonth)); - - Date dateDay = DateTools.round(date, DateTools.Resolution.DAY); - assertEquals("2004-02-03 00:00:00:000", isoFormat(dateDay)); - - Date dateHour = DateTools.round(date, DateTools.Resolution.HOUR); - assertEquals("2004-02-03 22:00:00:000", isoFormat(dateHour)); - - Date dateMinute = DateTools.round(date, DateTools.Resolution.MINUTE); - assertEquals("2004-02-03 22:08:00:000", isoFormat(dateMinute)); - - Date dateSecond = DateTools.round(date, DateTools.Resolution.SECOND); - assertEquals("2004-02-03 22:08:56:000", isoFormat(dateSecond)); - - Date dateMillisecond = DateTools.round(date, DateTools.Resolution.MILLISECOND); - assertEquals("2004-02-03 22:08:56:333", isoFormat(dateMillisecond)); - - // long parameter: - long dateYearLong = DateTools.round(date.getTime(), DateTools.Resolution.YEAR); - assertEquals("2004-01-01 00:00:00:000", isoFormat(new Date(dateYearLong))); - - long dateMillisecondLong = DateTools.round(date.getTime(), DateTools.Resolution.MILLISECOND); - assertEquals("2004-02-03 22:08:56:333", isoFormat(new Date(dateMillisecondLong))); - } - - private String isoFormat(Date date) { - SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss:SSS", Locale.US); - sdf.setTimeZone(TimeZone.getTimeZone("GMT")); - return sdf.format(date); - } - - public void testDateToolsUTC() throws Exception { - // Sun, 30 Oct 2005 00:00:00 +0000 -- the last second of 2005's DST in Europe/London - long time = 1130630400; - try { - TimeZone.setDefault(TimeZone.getTimeZone(/* "GMT" */ "Europe/London")); - String d1 = DateTools.dateToString(new Date(time*1000), DateTools.Resolution.MINUTE); - String d2 = DateTools.dateToString(new Date((time+3600)*1000), DateTools.Resolution.MINUTE); - assertFalse("different times", d1.equals(d2)); - assertEquals("midnight", DateTools.stringToTime(d1), time*1000); - assertEquals("later", DateTools.stringToTime(d2), (time+3600)*1000); - } finally { - TimeZone.setDefault(null); - } - } - -} \ No newline at end of file Index: lucene/src/test/org/apache/lucene/document/TestDocument.java =================================================================== --- lucene/src/test/org/apache/lucene/document/TestDocument.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/document/TestDocument.java (working copy) @@ -1,281 +0,0 @@ -package org.apache.lucene.document; - -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.store.Directory; -import org.apache.lucene.util.LuceneTestCase; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Tests {@link Document} class. - */ -public class TestDocument extends LuceneTestCase { - - String binaryVal = "this text will be stored as a byte array in the index"; - String binaryVal2 = "this text will be also stored as a byte array in the index"; - - public void testBinaryField() throws Exception { - Document doc = new Document(); - Fieldable stringFld = new Field("string", binaryVal, Field.Store.YES, - Field.Index.NO); - Fieldable binaryFld = new Field("binary", binaryVal.getBytes()); - Fieldable binaryFld2 = new Field("binary", binaryVal2.getBytes()); - - doc.add(stringFld); - doc.add(binaryFld); - - assertEquals(2, doc.fields.size()); - - assertTrue(binaryFld.isBinary()); - assertTrue(binaryFld.isStored()); - assertFalse(binaryFld.isIndexed()); - assertFalse(binaryFld.isTokenized()); - - String binaryTest = new String(doc.getBinaryValue("binary")); - assertTrue(binaryTest.equals(binaryVal)); - - String stringTest = doc.get("string"); - assertTrue(binaryTest.equals(stringTest)); - - doc.add(binaryFld2); - - assertEquals(3, doc.fields.size()); - - byte[][] binaryTests = doc.getBinaryValues("binary"); - - assertEquals(2, binaryTests.length); - - binaryTest = new String(binaryTests[0]); - String binaryTest2 = new String(binaryTests[1]); - - assertFalse(binaryTest.equals(binaryTest2)); - - assertTrue(binaryTest.equals(binaryVal)); - assertTrue(binaryTest2.equals(binaryVal2)); - - doc.removeField("string"); - assertEquals(2, doc.fields.size()); - - doc.removeFields("binary"); - assertEquals(0, doc.fields.size()); - } - - /** - * Tests {@link Document#removeField(String)} method for a brand new Document - * that has not been indexed yet. - * - * @throws Exception on error - */ - public void testRemoveForNewDocument() throws Exception { - Document doc = makeDocumentWithFields(); - assertEquals(8, doc.fields.size()); - doc.removeFields("keyword"); - assertEquals(6, doc.fields.size()); - doc.removeFields("doesnotexists"); // removing non-existing fields is - // siltenlty ignored - doc.removeFields("keyword"); // removing a field more than once - assertEquals(6, doc.fields.size()); - doc.removeField("text"); - assertEquals(5, doc.fields.size()); - doc.removeField("text"); - assertEquals(4, doc.fields.size()); - doc.removeField("text"); - assertEquals(4, doc.fields.size()); - doc.removeField("doesnotexists"); // removing non-existing fields is - // siltenlty ignored - assertEquals(4, doc.fields.size()); - doc.removeFields("unindexed"); - assertEquals(2, doc.fields.size()); - doc.removeFields("unstored"); - assertEquals(0, doc.fields.size()); - doc.removeFields("doesnotexists"); // removing non-existing fields is - // siltenlty ignored - assertEquals(0, doc.fields.size()); - } - - public void testConstructorExceptions() { - new Field("name", "value", Field.Store.YES, Field.Index.NO); // okay - new Field("name", "value", Field.Store.NO, Field.Index.NOT_ANALYZED); // okay - try { - new Field("name", "value", Field.Store.NO, Field.Index.NO); - fail(); - } catch (IllegalArgumentException e) { - // expected exception - } - new Field("name", "value", Field.Store.YES, Field.Index.NO, - Field.TermVector.NO); // okay - try { - new Field("name", "value", Field.Store.YES, Field.Index.NO, - Field.TermVector.YES); - fail(); - } catch (IllegalArgumentException e) { - // expected exception - } - } - - /** - * Tests {@link Document#getValues(String)} method for a brand new Document - * that has not been indexed yet. - * - * @throws Exception on error - */ - public void testGetValuesForNewDocument() throws Exception { - doAssert(makeDocumentWithFields(), false); - } - - /** - * Tests {@link Document#getValues(String)} method for a Document retrieved - * from an index. - * - * @throws Exception on error - */ - public void testGetValuesForIndexedDocument() throws Exception { - Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); - writer.addDocument(makeDocumentWithFields()); - IndexReader reader = writer.getReader(); - - IndexSearcher searcher = newSearcher(reader); - - // search for something that does exists - Query query = new TermQuery(new Term("keyword", "test1")); - - // ensure that queries return expected results without DateFilter first - ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; - assertEquals(1, hits.length); - - doAssert(searcher.doc(hits[0].doc), true); - writer.close(); - searcher.close(); - reader.close(); - dir.close(); - } - - private Document makeDocumentWithFields() { - Document doc = new Document(); - doc.add(new Field("keyword", "test1", Field.Store.YES, - Field.Index.NOT_ANALYZED)); - doc.add(new Field("keyword", "test2", Field.Store.YES, - Field.Index.NOT_ANALYZED)); - doc.add(new Field("text", "test1", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("text", "test2", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("unindexed", "test1", Field.Store.YES, Field.Index.NO)); - doc.add(new Field("unindexed", "test2", Field.Store.YES, Field.Index.NO)); - doc - .add(new Field("unstored", "test1", Field.Store.NO, - Field.Index.ANALYZED)); - doc - .add(new Field("unstored", "test2", Field.Store.NO, - Field.Index.ANALYZED)); - return doc; - } - - private void doAssert(Document doc, boolean fromIndex) { - String[] keywordFieldValues = doc.getValues("keyword"); - String[] textFieldValues = doc.getValues("text"); - String[] unindexedFieldValues = doc.getValues("unindexed"); - String[] unstoredFieldValues = doc.getValues("unstored"); - - assertTrue(keywordFieldValues.length == 2); - assertTrue(textFieldValues.length == 2); - assertTrue(unindexedFieldValues.length == 2); - // this test cannot work for documents retrieved from the index - // since unstored fields will obviously not be returned - if (!fromIndex) { - assertTrue(unstoredFieldValues.length == 2); - } - - assertTrue(keywordFieldValues[0].equals("test1")); - assertTrue(keywordFieldValues[1].equals("test2")); - assertTrue(textFieldValues[0].equals("test1")); - assertTrue(textFieldValues[1].equals("test2")); - assertTrue(unindexedFieldValues[0].equals("test1")); - assertTrue(unindexedFieldValues[1].equals("test2")); - // this test cannot work for documents retrieved from the index - // since unstored fields will obviously not be returned - if (!fromIndex) { - assertTrue(unstoredFieldValues[0].equals("test1")); - assertTrue(unstoredFieldValues[1].equals("test2")); - } - } - - public void testFieldSetValue() throws Exception { - - Field field = new Field("id", "id1", Field.Store.YES, - Field.Index.NOT_ANALYZED); - Document doc = new Document(); - doc.add(field); - doc.add(new Field("keyword", "test", Field.Store.YES, - Field.Index.NOT_ANALYZED)); - - Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); - writer.addDocument(doc); - field.setValue("id2"); - writer.addDocument(doc); - field.setValue("id3"); - writer.addDocument(doc); - - IndexReader reader = writer.getReader(); - IndexSearcher searcher = newSearcher(reader); - - Query query = new TermQuery(new Term("keyword", "test")); - - // ensure that queries return expected results without DateFilter first - ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; - assertEquals(3, hits.length); - int result = 0; - for (int i = 0; i < 3; i++) { - Document doc2 = searcher.doc(hits[i].doc); - Field f = doc2.getField("id"); - if (f.stringValue().equals("id1")) result |= 1; - else if (f.stringValue().equals("id2")) result |= 2; - else if (f.stringValue().equals("id3")) result |= 4; - else fail("unexpected id field"); - } - writer.close(); - searcher.close(); - reader.close(); - dir.close(); - assertEquals("did not see all IDs", 7, result); - } - - public void testFieldSetValueChangeBinary() { - Field field1 = new Field("field1", new byte[0]); - Field field2 = new Field("field2", "", Field.Store.YES, - Field.Index.ANALYZED); - try { - field1.setValue("abc"); - fail("did not hit expected exception"); - } catch (IllegalArgumentException iae) { - // expected - } - try { - field2.setValue(new byte[0]); - fail("did not hit expected exception"); - } catch (IllegalArgumentException iae) { - // expected - } - } -} Index: lucene/src/test/org/apache/lucene/document2/TestBinaryDocument.java =================================================================== --- lucene/src/test/org/apache/lucene/document2/TestBinaryDocument.java (revision 0) +++ lucene/src/test/org/apache/lucene/document2/TestBinaryDocument.java (working copy) @@ -1,8 +1,9 @@ -package org.apache.lucene.document; +package org.apache.lucene.document2; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.store.Directory; @@ -34,8 +35,10 @@ public void testBinaryFieldInIndex() throws Exception { - Fieldable binaryFldStored = new Field("binaryStored", binaryValStored.getBytes()); - Fieldable stringFldStored = new Field("stringStored", binaryValStored, Field.Store.YES, Field.Index.NO, Field.TermVector.NO); + FieldType ft = new FieldType(); + ft.setStored(true); + IndexableField binaryFldStored = new BinaryField("binaryStored", binaryValStored.getBytes()); + IndexableField stringFldStored = new Field("stringStored", ft, binaryValStored); Document doc = new Document(); @@ -53,7 +56,7 @@ /** open a reader and fetch the document */ IndexReader reader = writer.getReader(); - Document docFromReader = reader.document(0); + Document docFromReader = reader.document2(0); assertTrue(docFromReader != null); /** fetch the binary stored field and compare it's content with the original one */ @@ -77,8 +80,8 @@ } public void testCompressionTools() throws Exception { - Fieldable binaryFldCompressed = new Field("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes())); - Fieldable stringFldCompressed = new Field("stringCompressed", CompressionTools.compressString(binaryValCompressed)); + IndexableField binaryFldCompressed = new BinaryField("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes())); + IndexableField stringFldCompressed = new BinaryField("stringCompressed", CompressionTools.compressString(binaryValCompressed)); Document doc = new Document(); @@ -92,7 +95,7 @@ /** open a reader and fetch the document */ IndexReader reader = writer.getReader(); - Document docFromReader = reader.document(0); + Document docFromReader = reader.document2(0); assertTrue(docFromReader != null); /** fetch the binary compressed field and compare it's content with the original one */ Index: lucene/src/test/org/apache/lucene/document2/TestDateTools.java =================================================================== --- lucene/src/test/org/apache/lucene/document2/TestDateTools.java (revision 0) +++ lucene/src/test/org/apache/lucene/document2/TestDateTools.java (working copy) @@ -1,4 +1,4 @@ -package org.apache.lucene.document; +package org.apache.lucene.document2; import java.text.ParseException; import java.text.SimpleDateFormat; Index: lucene/src/test/org/apache/lucene/document2/TestDocument.java =================================================================== --- lucene/src/test/org/apache/lucene/document2/TestDocument.java (revision 0) +++ lucene/src/test/org/apache/lucene/document2/TestDocument.java (working copy) @@ -1,6 +1,7 @@ -package org.apache.lucene.document; +package org.apache.lucene.document2; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; @@ -37,20 +38,22 @@ public void testBinaryField() throws Exception { Document doc = new Document(); - Fieldable stringFld = new Field("string", binaryVal, Field.Store.YES, - Field.Index.NO); - Fieldable binaryFld = new Field("binary", binaryVal.getBytes()); - Fieldable binaryFld2 = new Field("binary", binaryVal2.getBytes()); + FieldType ft = new FieldType(); + ft.setStored(true); + IndexableField stringFld = new Field("string", ft, binaryVal); + IndexableField binaryFld = new BinaryField("binary", binaryVal.getBytes()); + IndexableField binaryFld2 = new BinaryField("binary", binaryVal2.getBytes()); + doc.add(stringFld); doc.add(binaryFld); assertEquals(2, doc.fields.size()); - assertTrue(binaryFld.isBinary()); - assertTrue(binaryFld.isStored()); - assertFalse(binaryFld.isIndexed()); - assertFalse(binaryFld.isTokenized()); + assertTrue(binaryFld.binaryValue(null) != null); + assertTrue(binaryFld.stored()); + assertFalse(binaryFld.indexed()); + assertFalse(binaryFld.tokenized()); String binaryTest = new String(doc.getBinaryValue("binary")); assertTrue(binaryTest.equals(binaryVal)); @@ -115,19 +118,22 @@ } public void testConstructorExceptions() { - new Field("name", "value", Field.Store.YES, Field.Index.NO); // okay - new Field("name", "value", Field.Store.NO, Field.Index.NOT_ANALYZED); // okay + FieldType ft = new FieldType(); + ft.setStored(true); + new Field("name", ft, "value"); // okay + new StringField("name", "value"); // okay try { - new Field("name", "value", Field.Store.NO, Field.Index.NO); + new Field("name", new FieldType(), "value"); fail(); } catch (IllegalArgumentException e) { // expected exception } - new Field("name", "value", Field.Store.YES, Field.Index.NO, - Field.TermVector.NO); // okay + new Field("name", ft, "value"); // okay try { - new Field("name", "value", Field.Store.YES, Field.Index.NO, - Field.TermVector.YES); + FieldType ft2 = new FieldType(); + ft2.setStored(true); + ft2.setStoreTermVectors(true); + new Field("name", ft2, "value"); fail(); } catch (IllegalArgumentException e) { // expected exception @@ -165,7 +171,7 @@ ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - doAssert(searcher.doc(hits[0].doc), true); + doAssert(searcher.doc2(hits[0].doc), true); writer.close(); searcher.close(); reader.close(); @@ -174,28 +180,26 @@ private Document makeDocumentWithFields() { Document doc = new Document(); - doc.add(new Field("keyword", "test1", Field.Store.YES, - Field.Index.NOT_ANALYZED)); - doc.add(new Field("keyword", "test2", Field.Store.YES, - Field.Index.NOT_ANALYZED)); - doc.add(new Field("text", "test1", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("text", "test2", Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field("unindexed", "test1", Field.Store.YES, Field.Index.NO)); - doc.add(new Field("unindexed", "test2", Field.Store.YES, Field.Index.NO)); + FieldType stored = new FieldType(); + stored.setStored(true); + doc.add(new Field("keyword", StringField.TYPE_STORED, "test1")); + doc.add(new Field("keyword", StringField.TYPE_STORED, "test2")); + doc.add(new Field("text", TextField.TYPE_STORED, "test1")); + doc.add(new Field("text", TextField.TYPE_STORED, "test2")); + doc.add(new Field("unindexed", stored, "test1")); + doc.add(new Field("unindexed", stored, "test2")); doc - .add(new Field("unstored", "test1", Field.Store.NO, - Field.Index.ANALYZED)); + .add(new TextField("unstored", "test1")); doc - .add(new Field("unstored", "test2", Field.Store.NO, - Field.Index.ANALYZED)); + .add(new TextField("unstored", "test2")); return doc; } private void doAssert(Document doc, boolean fromIndex) { - String[] keywordFieldValues = doc.getValues("keyword"); - String[] textFieldValues = doc.getValues("text"); - String[] unindexedFieldValues = doc.getValues("unindexed"); - String[] unstoredFieldValues = doc.getValues("unstored"); + IndexableField[] keywordFieldValues = doc.getFields("keyword"); + IndexableField[] textFieldValues = doc.getFields("text"); + IndexableField[] unindexedFieldValues = doc.getFields("unindexed"); + IndexableField[] unstoredFieldValues = doc.getFields("unstored"); assertTrue(keywordFieldValues.length == 2); assertTrue(textFieldValues.length == 2); @@ -206,28 +210,26 @@ assertTrue(unstoredFieldValues.length == 2); } - assertTrue(keywordFieldValues[0].equals("test1")); - assertTrue(keywordFieldValues[1].equals("test2")); - assertTrue(textFieldValues[0].equals("test1")); - assertTrue(textFieldValues[1].equals("test2")); - assertTrue(unindexedFieldValues[0].equals("test1")); - assertTrue(unindexedFieldValues[1].equals("test2")); + assertTrue(keywordFieldValues[0].stringValue().equals("test1")); + assertTrue(keywordFieldValues[1].stringValue().equals("test2")); + assertTrue(textFieldValues[0].stringValue().equals("test1")); + assertTrue(textFieldValues[1].stringValue().equals("test2")); + assertTrue(unindexedFieldValues[0].stringValue().equals("test1")); + assertTrue(unindexedFieldValues[1].stringValue().equals("test2")); // this test cannot work for documents retrieved from the index // since unstored fields will obviously not be returned if (!fromIndex) { - assertTrue(unstoredFieldValues[0].equals("test1")); - assertTrue(unstoredFieldValues[1].equals("test2")); + assertTrue(unstoredFieldValues[0].stringValue().equals("test1")); + assertTrue(unstoredFieldValues[1].stringValue().equals("test2")); } } public void testFieldSetValue() throws Exception { - Field field = new Field("id", "id1", Field.Store.YES, - Field.Index.NOT_ANALYZED); + Field field = new Field("id", StringField.TYPE_STORED, "id1"); Document doc = new Document(); doc.add(field); - doc.add(new Field("keyword", "test", Field.Store.YES, - Field.Index.NOT_ANALYZED)); + doc.add(new Field("keyword", StringField.TYPE_STORED, "test")); Directory dir = newDirectory(); RandomIndexWriter writer = new RandomIndexWriter(random, dir); @@ -247,8 +249,8 @@ assertEquals(3, hits.length); int result = 0; for (int i = 0; i < 3; i++) { - Document doc2 = searcher.doc(hits[i].doc); - Field f = doc2.getField("id"); + Document doc2 = searcher.doc2(hits[i].doc); + Field f = (Field) doc2.getField("id"); if (f.stringValue().equals("id1")) result |= 1; else if (f.stringValue().equals("id2")) result |= 2; else if (f.stringValue().equals("id3")) result |= 4; @@ -262,9 +264,8 @@ } public void testFieldSetValueChangeBinary() { - Field field1 = new Field("field1", new byte[0]); - Field field2 = new Field("field2", "", Field.Store.YES, - Field.Index.ANALYZED); + Field field1 = new BinaryField("field1", new byte[0]); + Field field2 = new Field("field2", TextField.TYPE_STORED, ""); try { field1.setValue("abc"); fail("did not hit expected exception"); Index: lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (working copy) @@ -267,7 +267,7 @@ final int hitCount = hits.length; assertEquals("wrong number of hits", expectedCount, hitCount); for(int i=0;i 0) { - searcher.doc(hits[0].doc); + searcher.doc2(hits[0].doc); } searcher.close(); if (refreshed != r) { @@ -1105,7 +1105,7 @@ assertTrue(r1 != r3); r1.close(); try { - r1.document(2); + r1.document2(2); fail("did not hit exception"); } catch (AlreadyClosedException ace) { // expected Index: lucene/src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -1289,13 +1289,13 @@ assertEquals(17, b.length, 17); assertEquals(87, b[0]); - assertTrue(ir.document(0).getFieldable("binary").isBinary()); - assertTrue(ir.document(1).getFieldable("binary").isBinary()); - assertTrue(ir.document(2).getFieldable("binary").isBinary()); + assertTrue(ir.document2(0).getField("binary").binaryValue(null)!=null); + assertTrue(ir.document2(1).getField("binary").binaryValue(null)!=null); + assertTrue(ir.document2(2).getField("binary").binaryValue(null)!=null); - assertEquals("value", ir.document(0).get("string")); - assertEquals("value", ir.document(1).get("string")); - assertEquals("value", ir.document(2).get("string")); + assertEquals("value", ir.document2(0).get("string")); + assertEquals("value", ir.document2(1).get("string")); + assertEquals("value", ir.document2(2).get("string")); // test that the terms were indexed. Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (working copy) @@ -60,21 +60,20 @@ /* private field types */ /* private field types */ - private static final FieldType custom = new FieldType(TextField.TYPE_UNSTORED); private static final FieldType custom1 = new FieldType(TextField.TYPE_UNSTORED); - private static final FieldType custom2 = new FieldType(StringField.TYPE_UNSTORED); + private static final FieldType custom2 = new FieldType(); private static final FieldType custom3 = new FieldType(); private static final FieldType custom4 = new FieldType(StringField.TYPE_UNSTORED); private static final FieldType custom5 = new FieldType(TextField.TYPE_UNSTORED); static { - custom.setStored(true); custom1.setStoreTermVectors(true); custom1.setStoreTermVectorPositions(true); custom1.setStoreTermVectorOffsets(true); - + custom2.setStored(true); + custom2.setIndexed(true); custom3.setStored(true); @@ -134,7 +133,7 @@ final Document doc = new Document(); - doc.add(newField("content1", "aaa bbb ccc ddd", DocCopyIterator.custom)); + doc.add(newField("content1", "aaa bbb ccc ddd", TextField.TYPE_STORED)); doc.add(newField("content6", "aaa bbb ccc ddd", DocCopyIterator.custom1)); doc.add(newField("content2", "aaa bbb ccc ddd", DocCopyIterator.custom2)); doc.add(newField("content3", "aaa bbb ccc ddd", DocCopyIterator.custom3)); @@ -370,7 +369,7 @@ MockIndexWriter2 w = new MockIndexWriter2(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); w.setInfoStream(VERBOSE ? System.out : null); Document doc = new Document(); - doc.add(newField("field", "a field", DocCopyIterator.custom)); + doc.add(newField("field", "a field", TextField.TYPE_STORED)); w.addDocument(doc); w.doFail = true; try { @@ -389,7 +388,7 @@ MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2)); w.setInfoStream(VERBOSE ? System.out : null); Document doc = new Document(); - doc.add(newField("field", "a field", DocCopyIterator.custom)); + doc.add(newField("field", "a field", TextField.TYPE_STORED)); w.addDocument(doc); Analyzer analyzer = new Analyzer() { @@ -402,7 +401,7 @@ }; Document crashDoc = new Document(); - crashDoc.add(newField("crash", "do it on token 4", DocCopyIterator.custom)); + crashDoc.add(newField("crash", "do it on token 4", TextField.TYPE_STORED)); try { w.addDocument(crashDoc, analyzer); fail("did not hit expected exception"); @@ -443,7 +442,7 @@ MockIndexWriter3 w = new MockIndexWriter3(dir, conf); w.doFail = true; Document doc = new Document(); - doc.add(newField("field", "a field", DocCopyIterator.custom)); + doc.add(newField("field", "a field", TextField.TYPE_STORED)); for(int i=0;i<10;i++) try { w.addDocument(doc); @@ -652,7 +651,7 @@ if (delDocs.get(j)) numDel++; else { - reader.document(j); + reader.document2(j); reader.getTermFreqVectors(j); } } @@ -676,7 +675,7 @@ int numDel = 0; assertNull(MultiFields.getDeletedDocs(reader)); for(int j=0;j? " + score1, score > score1); @@ -394,10 +394,10 @@ float score2 = h[2].score; float score3 = h[3].score; - String doc0 = s.doc(h[0].doc).get("id"); - String doc1 = s.doc(h[1].doc).get("id"); - String doc2 = s.doc(h[2].doc).get("id"); - String doc3 = s.doc(h[3].doc).get("id"); + String doc0 = s.doc2(h[0].doc).get("id"); + String doc1 = s.doc2(h[1].doc).get("id"); + String doc2 = s.doc2(h[2].doc).get("id"); + String doc3 = s.doc2(h[3].doc).get("id"); assertTrue("doc0 should be d2 or d4: " + doc0, doc0.equals("d2") || doc0.equals("d4")); @@ -448,10 +448,10 @@ float score2 = h[2].score; float score3 = h[3].score; - String doc0 = s.doc(h[0].doc).get("id"); - String doc1 = s.doc(h[1].doc).get("id"); - String doc2 = s.doc(h[2].doc).get("id"); - String doc3 = s.doc(h[3].doc).get("id"); + String doc0 = s.doc2(h[0].doc).get("id"); + String doc1 = s.doc2(h[1].doc).get("id"); + String doc2 = s.doc2(h[2].doc).get("id"); + String doc3 = s.doc2(h[3].doc).get("id"); assertEquals("doc0 should be d4: ", "d4", doc0); assertEquals("doc1 should be d3: ", "d3", doc1); Index: lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java (working copy) @@ -84,7 +84,7 @@ assertEquals("3 documents should match", 3, hits.length); List order = Arrays.asList("bbbbb","abbbb","aabbb"); for (int i = 0; i < hits.length; i++) { - final String term = searcher.doc(hits[i].doc).get("field"); + final String term = searcher.doc2(hits[i].doc).get("field"); //System.out.println(hits[i].score); assertEquals(order.get(i), term); } @@ -96,7 +96,7 @@ assertEquals("only 2 documents should match", 2, hits.length); order = Arrays.asList("bbbbb","abbbb"); for (int i = 0; i < hits.length; i++) { - final String term = searcher.doc(hits[i].doc).get("field"); + final String term = searcher.doc2(hits[i].doc).get("field"); //System.out.println(hits[i].score); assertEquals(order.get(i), term); } @@ -113,43 +113,43 @@ query = new FuzzyQuery(new Term("field", "aaaaa"), FuzzyQuery.defaultMinSimilarity, 0); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); // default allows for up to two edits: - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); + assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc2(hits[2].doc).get("field"), ("aaabb")); // query similar to a word in the index: query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 0); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc2(hits[2].doc).get("field"), ("aaabb")); // now with prefix query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 1); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc2(hits[2].doc).get("field"), ("aaabb")); query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 2); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc2(hits[2].doc).get("field"), ("aaabb")); query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 3); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc2(hits[2].doc).get("field"), ("aaabb")); query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 4); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(2, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 5); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(0, hits.length); @@ -158,25 +158,25 @@ query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 0); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("ddddd")); // now with prefix query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 1); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("ddddd")); query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 2); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("ddddd")); query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 3); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("ddddd")); query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 4); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("ddddd")); query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 5); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(0, hits.length); @@ -212,17 +212,17 @@ query = new FuzzyQuery(new Term("field", "aaaaccc"), 0.5f, 0); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaaaa")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaaaa")); // now with prefix query = new FuzzyQuery(new Term("field", "aaaaccc"), 0.5f, 1); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaaaa")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaaaa")); query = new FuzzyQuery(new Term("field", "aaaaccc"), 0.5f, 4); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaaaa")); + assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaaaa")); query = new FuzzyQuery(new Term("field", "aaaaccc"), 0.5f, 5); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(0, hits.length); @@ -377,9 +377,9 @@ ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); // normally, 'Lucenne' would be the first result as IDF will skew the score. - assertEquals("Lucene", reader.document(hits[0].doc).get("field")); - assertEquals("Lucene", reader.document(hits[1].doc).get("field")); - assertEquals("Lucenne", reader.document(hits[2].doc).get("field")); + assertEquals("Lucene", reader.document2(hits[0].doc).get("field")); + assertEquals("Lucene", reader.document2(hits[1].doc).get("field")); + assertEquals("Lucenne", reader.document2(hits[2].doc).get("field")); searcher.close(); reader.close(); directory.close(); @@ -417,7 +417,7 @@ IndexSearcher searcher = newSearcher(r); ScoreDoc[] hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("Giga byte", searcher.doc(hits[0].doc).get("field")); + assertEquals("Giga byte", searcher.doc2(hits[0].doc).get("field")); searcher.close(); r.close(); index.close(); @@ -443,28 +443,28 @@ FuzzyQuery q = (FuzzyQuery) qp.parse("fouba~2"); ScoreDoc[] hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("foobar", searcher.doc(hits[0].doc).get("field")); + assertEquals("foobar", searcher.doc2(hits[0].doc).get("field")); q = (FuzzyQuery) qp.parse("foubara~2"); hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("foobar", searcher.doc(hits[0].doc).get("field")); + assertEquals("foobar", searcher.doc2(hits[0].doc).get("field")); q = (FuzzyQuery) qp.parse("t~3"); hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("test", searcher.doc(hits[0].doc).get("field")); + assertEquals("test", searcher.doc2(hits[0].doc).get("field")); q = new FuzzyQuery(new Term("field", "a"), 4f, 0, 50); hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("test", searcher.doc(hits[0].doc).get("field")); + assertEquals("test", searcher.doc2(hits[0].doc).get("field")); q = new FuzzyQuery(new Term("field", "a"), 6f, 0, 50); hits = searcher.search(q, 10).scoreDocs; assertEquals(2, hits.length); - assertEquals("test", searcher.doc(hits[0].doc).get("field")); - assertEquals("foobar", searcher.doc(hits[1].doc).get("field")); + assertEquals("test", searcher.doc2(hits[0].doc).get("field")); + assertEquals("foobar", searcher.doc2(hits[1].doc).get("field")); searcher.close(); reader.close(); Index: lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java (working copy) @@ -55,9 +55,9 @@ hits = is.search(new MatchAllDocsQuery(), null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals("one", is.doc(hits[0].doc).get("key")); - assertEquals("two", is.doc(hits[1].doc).get("key")); - assertEquals("three four", is.doc(hits[2].doc).get("key")); + assertEquals("one", is.doc2(hits[0].doc).get("key")); + assertEquals("two", is.doc2(hits[1].doc).get("key")); + assertEquals("three four", is.doc2(hits[2].doc).get("key")); // assert with norms scoring turned on @@ -65,9 +65,9 @@ hits = is.search(normsQuery, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals("three four", is.doc(hits[0].doc).get("key")); - assertEquals("two", is.doc(hits[1].doc).get("key")); - assertEquals("one", is.doc(hits[2].doc).get("key")); + assertEquals("three four", is.doc2(hits[0].doc).get("key")); + assertEquals("two", is.doc2(hits[1].doc).get("key")); + assertEquals("one", is.doc2(hits[2].doc).get("key")); // change norm & retest is.getIndexReader().setNorm(0, "key", is.getSimilarityProvider().get("key").encodeNormValue(400f)); @@ -75,9 +75,9 @@ hits = is.search(normsQuery, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals("one", is.doc(hits[0].doc).get("key")); - assertEquals("three four", is.doc(hits[1].doc).get("key")); - assertEquals("two", is.doc(hits[2].doc).get("key")); + assertEquals("one", is.doc2(hits[0].doc).get("key")); + assertEquals("three four", is.doc2(hits[1].doc).get("key")); + assertEquals("two", is.doc2(hits[2].doc).get("key")); // some artificial queries to trigger the use of skipTo(): Index: lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java (working copy) @@ -502,9 +502,9 @@ if (topDocs.totalHits==0) continue; ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); - int last=Integer.parseInt(searcher.doc(sd[0].doc).get(field)); + int last=Integer.parseInt(searcher.doc2(sd[0].doc).get(field)); for (int j=1; jact ); last=act; } Index: lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (working copy) @@ -534,9 +534,9 @@ if (topDocs.totalHits==0) continue; ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); - long last=Long.parseLong(searcher.doc(sd[0].doc).get(field)); + long last=Long.parseLong(searcher.doc2(sd[0].doc).get(field)); for (int j=1; jact ); last=act; } Index: lucene/src/test/org/apache/lucene/search/TestSort.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestSort.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/search/TestSort.java (working copy) @@ -161,6 +161,7 @@ setMergePolicy(newLogMergePolicy(97)) ); FieldType customType = new FieldType(); + customType.setStored(true); for (int i=0; i 0); Index: lucene/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java (working copy) @@ -86,7 +86,7 @@ assertEquals("All docs should be matched!",N_DOCS,h.length); String prevID = "ID"+(N_DOCS+1); // greater than all ids of docs in this test for (int i=0; i 7.0 assertEquals("score of " + id + " shuould be " + expectedScore + " != " + score, expectedScore, score, TEST_SCORE_TOLERANCE_DELTA); } Index: lucene/src/test/org/apache/lucene/search/function/TestOrdValues.java =================================================================== --- lucene/src/test/org/apache/lucene/search/function/TestOrdValues.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/search/function/TestOrdValues.java (working copy) @@ -78,7 +78,7 @@ : "IC"; // smaller than all ids of docs in this test ("ID0001", etc.) for (int i = 0; i < h.length; i++) { - String resID = s.doc(h[i].doc).get(ID_FIELD); + String resID = s.doc2(h[i].doc).get(ID_FIELD); log(i + ". score=" + h[i].score + " - " + resID); log(s.explain(q, h[i].doc)); if (inOrder) { @@ -123,7 +123,7 @@ ScoreDoc sd[] = td.scoreDocs; for (int i = 0; i < sd.length; i++) { float score = sd[i].score; - String id = s.getIndexReader().document(sd[i].doc).get(ID_FIELD); + String id = s.getIndexReader().document2(sd[i].doc).get(ID_FIELD); log("-------- " + i + ". Explain doc " + id); log(s.explain(q, sd[i].doc)); float expectedScore = N_DOCS - i; Index: lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java =================================================================== --- lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java (working copy) @@ -22,8 +22,9 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Payload; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.TextField; import org.apache.lucene.util.English; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.index.IndexReader; @@ -122,9 +123,9 @@ // writer.infoStream = System.out; for (int i = 0; i < numDocs; i++) { Document doc = new Document(); - doc.add(new Field(FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field(MULTI_FIELD, English.intToEnglish(i) + " " + English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED)); - doc.add(new Field(NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES, Field.Index.ANALYZED)); + doc.add(new Field(FIELD, TextField.TYPE_STORED, English.intToEnglish(i))); + doc.add(new Field(MULTI_FIELD, TextField.TYPE_STORED, English.intToEnglish(i) + " " + English.intToEnglish(i))); + doc.add(new Field(NO_PAYLOAD_FIELD, TextField.TYPE_STORED, English.intToEnglish(i))); writer.addDocument(doc); } reader = IndexReader.open(writer, true); Index: lucene/src/test/org/apache/lucene/store/TestMultiMMap.java =================================================================== --- lucene/src/test/org/apache/lucene/store/TestMultiMMap.java (revision 1150855) +++ lucene/src/test/org/apache/lucene/store/TestMultiMMap.java (working copy) @@ -80,7 +80,7 @@ int numAsserts = atLeast(100); for (int i = 0; i < numAsserts; i++) { int docID = random.nextInt(numDocs); - assertEquals("" + docID, reader.document(docID).get("docid")); + assertEquals("" + docID, reader.document2(docID).get("docid")); } reader.close(); dir.close(); Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocData.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocData.java (revision 1150855) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocData.java (working copy) @@ -20,7 +20,7 @@ import java.util.Date; import java.util.Properties; -import org.apache.lucene.document.DateTools; +import org.apache.lucene.document2.DateTools; /** Output of parsing (e.g. HTML parsing) of an input document. */ public class DocData { Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java (revision 1150855) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java (working copy) @@ -211,7 +211,7 @@ // Set ID_FIELD FieldType ft = new FieldType(valType); - ft.setIndexed(false); + ft.setIndexed(true); Field idField = ds.getField(ID_FIELD, ft); int id; Index: modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/LineDocSourceTest.java =================================================================== --- modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/LineDocSourceTest.java (revision 1150855) +++ modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/LineDocSourceTest.java (working copy) @@ -148,7 +148,7 @@ if (storedField==null) { storedField = DocMaker.BODY_FIELD; // added to all docs and satisfies field-name == value } - assertEquals("Wrong field value", storedField, searcher.doc(0).get(storedField)); + assertEquals("Wrong field value", storedField, searcher.doc2(0).get(storedField)); searcher.close(); } Index: modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TrecContentSourceTest.java =================================================================== --- modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TrecContentSourceTest.java (revision 1150855) +++ modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/TrecContentSourceTest.java (working copy) @@ -29,7 +29,7 @@ import org.apache.lucene.benchmark.byTask.feeds.TrecDocParser.ParsePathType; import org.apache.lucene.benchmark.byTask.utils.Config; -import org.apache.lucene.document.DateTools; +import org.apache.lucene.document2.DateTools; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; Index: modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java =================================================================== --- modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java (revision 1150855) +++ modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java (working copy) @@ -24,8 +24,9 @@ import java.util.List; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -387,7 +388,7 @@ SuggestWord sugWord = new SuggestWord(); for (int i = 0; i < stop; i++) { - sugWord.string = indexSearcher.doc(hits[i].doc).get(F_WORD); // get orig word + sugWord.string = indexSearcher.doc2(hits[i].doc).get(F_WORD); // get orig word // don't suggest a word for itself, that would be silly if (sugWord.string.equals(word)) { @@ -606,9 +607,7 @@ Document doc = new Document(); // the word field is never queried on... its indexed so it can be quickly // checked for rebuild (and stored for retrieval). Doesn't need norms or TF/pos - Field f = new Field(F_WORD, text, Field.Store.YES, Field.Index.NOT_ANALYZED); - f.setOmitTermFreqAndPositions(true); - f.setOmitNorms(true); + Field f = new Field(F_WORD, StringField.TYPE_STORED, text); doc.add(f); // orig term addGram(text, doc, ng1, ng2); return doc; @@ -621,21 +620,17 @@ String end = null; for (int i = 0; i < len - ng + 1; i++) { String gram = text.substring(i, i + ng); - doc.add(new Field(key, gram, Field.Store.NO, Field.Index.NOT_ANALYZED)); + doc.add(new StringField(key, gram)); if (i == 0) { // only one term possible in the startXXField, TF/pos and norms aren't needed. - Field startField = new Field("start" + ng, gram, Field.Store.NO, Field.Index.NOT_ANALYZED); - startField.setOmitTermFreqAndPositions(true); - startField.setOmitNorms(true); + Field startField = new StringField("start" + ng, gram); doc.add(startField); } end = gram; } if (end != null) { // may not be present if len==ng1 // only one term possible in the endXXField, TF/pos and norms aren't needed. - Field endField = new Field("end" + ng, end, Field.Store.NO, Field.Index.NOT_ANALYZED); - endField.setOmitTermFreqAndPositions(true); - endField.setOmitNorms(true); + Field endField = new StringField("end" + ng, end); doc.add(endField); } } Index: solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java =================================================================== --- solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java (revision 1150855) +++ solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java (working copy) @@ -27,7 +27,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; import org.apache.lucene.collation.ICUCollationKeyAnalyzer; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; import org.apache.lucene.search.SortField; Index: solr/src/java/org/apache/solr/core/QuerySenderListener.java =================================================================== --- solr/src/java/org/apache/solr/core/QuerySenderListener.java (revision 1150855) +++ solr/src/java/org/apache/solr/core/QuerySenderListener.java (working copy) @@ -65,7 +65,7 @@ if (o instanceof DocList) { DocList docs = (DocList)o; for (DocIterator iter = docs.iterator(); iter.hasNext();) { - newSearcher.doc(iter.nextDoc()); + newSearcher.doc2(iter.nextDoc()); } } } Index: solr/src/java/org/apache/solr/handler/MoreLikeThisHandler.java =================================================================== --- solr/src/java/org/apache/solr/handler/MoreLikeThisHandler.java (revision 1150855) +++ solr/src/java/org/apache/solr/handler/MoreLikeThisHandler.java (working copy) @@ -29,7 +29,7 @@ import java.util.Map; import java.util.regex.Pattern; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.queryParser.ParseException; @@ -342,7 +342,7 @@ public DocListAndSet getMoreLikeThis( int id, int start, int rows, List filters, List terms, int flags ) throws IOException { - Document doc = reader.document(id); + Document doc = reader.document2(id); rawMLTQuery = mlt.like(id); boostedMLTQuery = getBoostedQuery( rawMLTQuery ); if( terms != null ) { @@ -353,7 +353,7 @@ realMLTQuery = new BooleanQuery(); realMLTQuery.add(boostedMLTQuery, BooleanClause.Occur.MUST); realMLTQuery.add( - new TermQuery(new Term(uniqueKeyField.getName(), uniqueKeyField.getType().storedToIndexed(doc.getFieldable(uniqueKeyField.getName())))), + new TermQuery(new Term(uniqueKeyField.getName(), uniqueKeyField.getType().storedToIndexed(doc.getField(uniqueKeyField.getName())))), BooleanClause.Occur.MUST_NOT); DocListAndSet results = new DocListAndSet(); @@ -391,7 +391,7 @@ int id = iterator.nextDoc(); DocListAndSet sim = getMoreLikeThis( id, 0, rows, null, null, flags ); - String name = schema.printableUniqueKey( reader.document( id ) ); + String name = schema.printableUniqueKey( reader.document2( id ) ); mlt.add(name, sim.docList); } Index: solr/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java =================================================================== --- solr/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java (revision 1150855) +++ solr/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java (working copy) @@ -33,9 +33,10 @@ import org.slf4j.LoggerFactory; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Fieldable; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.index.Fields; import org.apache.lucene.index.FieldsEnum; @@ -121,7 +122,7 @@ if( docId != null ) { Document doc = null; try { - doc = reader.document( docId ); + doc = reader.document2( docId ); } catch( Exception ex ) {} if( doc == null ) { @@ -164,19 +165,19 @@ /** * @return a string representing a Fieldable's flags. */ - private static String getFieldFlags( Fieldable f ) + private static String getFieldFlags( IndexableField f ) { StringBuilder flags = new StringBuilder(); - flags.append( (f != null && f.isIndexed()) ? FieldFlag.INDEXED.getAbbreviation() : '-' ); - flags.append( (f != null && f.isTokenized()) ? FieldFlag.TOKENIZED.getAbbreviation() : '-' ); - flags.append( (f != null && f.isStored()) ? FieldFlag.STORED.getAbbreviation() : '-' ); + flags.append( (f != null && f.indexed()) ? FieldFlag.INDEXED.getAbbreviation() : '-' ); + flags.append( (f != null && f.tokenized()) ? FieldFlag.TOKENIZED.getAbbreviation() : '-' ); + flags.append( (f != null && f.stored()) ? FieldFlag.STORED.getAbbreviation() : '-' ); flags.append( (false) ? FieldFlag.MULTI_VALUED.getAbbreviation() : '-' ); // SchemaField Specific - flags.append( (f != null && f.isTermVectorStored()) ? FieldFlag.TERM_VECTOR_STORED.getAbbreviation() : '-' ); - flags.append( (f != null && f.isStoreOffsetWithTermVector()) ? FieldFlag.TERM_VECTOR_OFFSET.getAbbreviation() : '-' ); - flags.append( (f != null && f.isStorePositionWithTermVector()) ? FieldFlag.TERM_VECTOR_POSITION.getAbbreviation() : '-' ); - flags.append( (f != null && f.getOmitNorms()) ? FieldFlag.OMIT_NORMS.getAbbreviation() : '-' ); - flags.append( (f != null && f.isLazy()) ? FieldFlag.LAZY.getAbbreviation() : '-' ); - flags.append( (f != null && f.isBinary()) ? FieldFlag.BINARY.getAbbreviation() : '-' ); + flags.append( (f != null && f.storeTermVectors()) ? FieldFlag.TERM_VECTOR_STORED.getAbbreviation() : '-' ); + flags.append( (f != null && f.storeTermVectorOffsets()) ? FieldFlag.TERM_VECTOR_OFFSET.getAbbreviation() : '-' ); + flags.append( (f != null && f.storeTermVectorPositions()) ? FieldFlag.TERM_VECTOR_POSITION.getAbbreviation() : '-' ); + flags.append( (f != null && f.omitNorms()) ? FieldFlag.OMIT_NORMS.getAbbreviation() : '-' ); + flags.append( (f != null && ((Field) f).lazy()) ? FieldFlag.LAZY.getAbbreviation() : '-' ); + flags.append( (f != null && f.binaryValue(null)!=null) ? FieldFlag.BINARY.getAbbreviation() : '-' ); flags.append( (false) ? FieldFlag.SORT_MISSING_FIRST.getAbbreviation() : '-' ); // SchemaField Specific flags.append( (false) ? FieldFlag.SORT_MISSING_LAST.getAbbreviation() : '-' ); // SchemaField Specific return flags.toString(); @@ -236,7 +237,7 @@ final CharsRef spare = new CharsRef(); SimpleOrderedMap finfo = new SimpleOrderedMap(); for( Object o : doc.getFields() ) { - Fieldable fieldable = (Fieldable)o; + Field fieldable = (Field)o; SimpleOrderedMap f = new SimpleOrderedMap(); SchemaField sfield = schema.getFieldOrNull( fieldable.name() ); @@ -257,11 +258,11 @@ if (bytes != null) { f.add( "binary", Base64.byteArrayToBase64(bytes.bytes, bytes.offset, bytes.length)); } - f.add( "boost", fieldable.getBoost() ); + f.add( "boost", fieldable.boost() ); f.add( "docFreq", t.text()==null ? 0 : reader.docFreq( t ) ); // this can be 0 for non-indexed fields // If we have a term vector, return that - if( fieldable.isTermVectorStored() ) { + if( fieldable.storeTermVectors() ) { try { TermFreqVector v = reader.getTermFreqVector( docId, fieldable.name() ); if( v != null ) { @@ -320,8 +321,8 @@ if( top.totalHits > 0 ) { // Find a document with this field try { - Document doc = searcher.doc( top.scoreDocs[0].doc ); - Fieldable fld = doc.getFieldable( fieldName ); + Document doc = searcher.doc2( top.scoreDocs[0].doc ); + IndexableField fld = doc.getField( fieldName ); if( fld != null ) { f.add( "index", getFieldFlags( fld ) ); } Index: solr/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java =================================================================== --- solr/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java (revision 1150855) +++ solr/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java (working copy) @@ -80,7 +80,7 @@ int id = iterator.nextDoc(); int rows = p.getInt( MoreLikeThisParams.DOC_COUNT, 5 ); DocListAndSet sim = mltHelper.getMoreLikeThis( id, 0, rows, null, null, flags ); - String name = schema.printableUniqueKey( searcher.doc( id ) ); + String name = schema.printableUniqueKey( searcher.doc2( id ) ); mlt.add(name, sim.docList); if( dbg != null ){ @@ -92,7 +92,7 @@ DocIterator mltIte = sim.docList.iterator(); while( mltIte.hasNext() ){ int mltid = mltIte.nextDoc(); - String key = schema.printableUniqueKey( searcher.doc( mltid ) ); + String key = schema.printableUniqueKey( searcher.doc2( mltid ) ); explains.add( key, searcher.explain( mltHelper.getRealMLTQuery(), mltid ) ); } docDbg.add( "explain", explains ); Index: solr/src/java/org/apache/solr/handler/component/QueryComponent.java =================================================================== --- solr/src/java/org/apache/solr/handler/component/QueryComponent.java (revision 1150855) +++ solr/src/java/org/apache/solr/handler/component/QueryComponent.java (working copy) @@ -17,7 +17,7 @@ package org.apache.solr.handler.component; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Field; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexReader.ReaderContext; import org.apache.lucene.index.Term; @@ -465,7 +465,9 @@ Sort sort = searcher.weightSort(rb.getSortSpec().getSort()); SortField[] sortFields = sort==null ? new SortField[]{SortField.FIELD_SCORE} : sort.getSort(); NamedList sortVals = new NamedList(); // order is important for the sort fields - Field field = new Field("dummy", "", Field.Store.YES, Field.Index.NO); // a dummy Field + org.apache.lucene.document2.FieldType docft = new org.apache.lucene.document2.FieldType(); + docft.setStored(true); + Field field = new Field("dummy", docft, ""); // a dummy Field ReaderContext topReaderContext = searcher.getTopReaderContext(); AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext); AtomicReaderContext currentLeaf = null; Index: solr/src/java/org/apache/solr/response/CSVResponseWriter.java =================================================================== --- solr/src/java/org/apache/solr/response/CSVResponseWriter.java (revision 1150855) +++ solr/src/java/org/apache/solr/response/CSVResponseWriter.java (working copy) @@ -19,8 +19,8 @@ import org.apache.commons.csv.CSVPrinter; import org.apache.commons.csv.CSVStrategy; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Fieldable; +import org.apache.lucene.document2.Document; +import org.apache.lucene.index.IndexableField; import org.apache.solr.common.SolrDocument; import org.apache.solr.common.SolrDocumentList; import org.apache.solr.common.SolrException; @@ -146,7 +146,7 @@ CSVSharedBufPrinter mvPrinter; // printer used to encode multiple values in a single CSV value // used to collect values - List values = new ArrayList(1); // low starting amount in case there are many fields + List values = new ArrayList(1); // low starting amount in case there are many fields int tmp; } Index: solr/src/java/org/apache/solr/response/JSONResponseWriter.java =================================================================== --- solr/src/java/org/apache/solr/response/JSONResponseWriter.java (revision 1150855) +++ solr/src/java/org/apache/solr/response/JSONResponseWriter.java (working copy) @@ -26,7 +26,7 @@ import java.util.Map; import java.util.Set; -import org.apache.lucene.document.Fieldable; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.StringHelper; import org.apache.solr.common.SolrDocument; import org.apache.solr.common.params.CommonParams; @@ -303,10 +303,10 @@ protected static class MultiValueField { final SchemaField sfield; - final ArrayList fields; - MultiValueField(SchemaField sfield, Fieldable firstVal) { + final ArrayList fields; + MultiValueField(SchemaField sfield, IndexableField firstVal) { this.sfield = sfield; - this.fields = new ArrayList(4); + this.fields = new ArrayList(4); this.fields.add(firstVal); } } Index: solr/src/java/org/apache/solr/schema/BCDIntField.java =================================================================== --- solr/src/java/org/apache/solr/schema/BCDIntField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/BCDIntField.java (working copy) @@ -20,7 +20,6 @@ import org.apache.lucene.search.SortField; import org.apache.solr.search.QParser; import org.apache.solr.search.function.ValueSource; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.solr.util.BCDUtils; import org.apache.solr.response.TextResponseWriter; @@ -56,20 +55,11 @@ return indexedToReadable(f.stringValue()); } - @Override - public String toExternal(Fieldable f) { - return indexedToReadable(f.stringValue()); - } - // Note, this can't return type 'Integer' because BCDStrField and BCDLong extend it @Override public Object toObject(IndexableField f) { return Integer.valueOf( toExternal(f) ); } - @Override - public Object toObject(Fieldable f) { - return Integer.valueOf( toExternal(f) ); - } @Override public String indexedToReadable(String indexedForm) { Index: solr/src/java/org/apache/solr/schema/BCDLongField.java =================================================================== --- solr/src/java/org/apache/solr/schema/BCDLongField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/BCDLongField.java (working copy) @@ -17,7 +17,6 @@ package org.apache.solr.schema; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; /** * @@ -27,8 +26,4 @@ public Long toObject(IndexableField f) { return Long.valueOf( toExternal(f) ); } - @Override - public Long toObject(Fieldable f) { - return Long.valueOf( toExternal(f) ); - } } Index: solr/src/java/org/apache/solr/schema/BCDStrField.java =================================================================== --- solr/src/java/org/apache/solr/schema/BCDStrField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/BCDStrField.java (working copy) @@ -17,7 +17,6 @@ package org.apache.solr.schema; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; /** * @@ -31,8 +30,4 @@ public String toObject(IndexableField f) { return toExternal(f); } - @Override - public String toObject(Fieldable f) { - return toExternal(f); - } } Index: solr/src/java/org/apache/solr/schema/BinaryField.java =================================================================== --- solr/src/java/org/apache/solr/schema/BinaryField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/BinaryField.java (working copy) @@ -20,7 +20,6 @@ import java.io.IOException; import java.nio.ByteBuffer; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.document2.Field; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.SortField; @@ -50,22 +49,12 @@ public String toExternal(IndexableField f) { return toBase64String(toObject(f)); } - @Override - public String toExternal(Fieldable f) { - return toBase64String(toObject(f)); - } @Override public ByteBuffer toObject(IndexableField f) { BytesRef bytes = f.binaryValue(null); return ByteBuffer.wrap(bytes.bytes, bytes.offset, bytes.length); } - - @Override - public ByteBuffer toObject(Fieldable f) { - BytesRef bytes = f.binaryValue(null); - return ByteBuffer.wrap(bytes.bytes, bytes.offset, bytes.length); - } @Override public IndexableField createField(SchemaField field, Object val, float boost) { Index: solr/src/java/org/apache/solr/schema/BoolField.java =================================================================== --- solr/src/java/org/apache/solr/schema/BoolField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/BoolField.java (working copy) @@ -31,7 +31,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document.Fieldable; import org.apache.solr.response.TextResponseWriter; import org.apache.solr.analysis.SolrAnalyzer; @@ -118,19 +117,11 @@ public String toExternal(IndexableField f) { return indexedToReadable(f.stringValue()); } - @Override - public String toExternal(Fieldable f) { - return indexedToReadable(f.stringValue()); - } @Override public Boolean toObject(IndexableField f) { return Boolean.valueOf( toExternal(f) ); } - @Override - public Boolean toObject(Fieldable f) { - return Boolean.valueOf( toExternal(f) ); - } @Override public Object toObject(SchemaField sf, BytesRef term) { Index: solr/src/java/org/apache/solr/schema/ByteField.java =================================================================== --- solr/src/java/org/apache/solr/schema/ByteField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/ByteField.java (working copy) @@ -16,7 +16,6 @@ * limitations under the License. */ -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.SortField; import org.apache.lucene.search.cache.ByteValuesCreator; @@ -78,7 +77,7 @@ } @Override - public Byte toObject(Fieldable f) { + public Byte toObject(IndexableField f) { return Byte.valueOf(toExternal(f)); } } Index: solr/src/java/org/apache/solr/schema/CollationField.java =================================================================== --- solr/src/java/org/apache/solr/schema/CollationField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/CollationField.java (working copy) @@ -31,7 +31,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; import org.apache.lucene.collation.CollationKeyAnalyzer; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; import org.apache.lucene.search.SortField; Index: solr/src/java/org/apache/solr/schema/DateField.java =================================================================== --- solr/src/java/org/apache/solr/schema/DateField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/DateField.java (working copy) @@ -17,7 +17,6 @@ package org.apache.solr.schema; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; @@ -211,25 +210,12 @@ public String toExternal(IndexableField f) { return indexedToReadable(f.stringValue()); } - @Override - public String toExternal(Fieldable f) { - return indexedToReadable(f.stringValue()); - } public Date toObject(String indexedForm) throws java.text.ParseException { return parseDate(indexedToReadable(indexedForm)); } @Override - public Date toObject(Fieldable f) { - try { - return parseDate( toExternal(f) ); - } - catch( ParseException ex ) { - throw new RuntimeException( ex ); - } - } - @Override public Date toObject(IndexableField f) { try { return parseDate( toExternal(f) ); Index: solr/src/java/org/apache/solr/schema/DoubleField.java =================================================================== --- solr/src/java/org/apache/solr/schema/DoubleField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/DoubleField.java (working copy) @@ -17,7 +17,6 @@ package org.apache.solr.schema; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.SortField; import org.apache.lucene.search.cache.CachedArrayCreator; @@ -82,8 +81,4 @@ public Double toObject(IndexableField f) { return Double.valueOf(toExternal(f)); } - @Override - public Double toObject(Fieldable f) { - return Double.valueOf(toExternal(f)); - } } Index: solr/src/java/org/apache/solr/schema/ExternalFileField.java =================================================================== --- solr/src/java/org/apache/solr/schema/ExternalFileField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/ExternalFileField.java (working copy) @@ -17,7 +17,6 @@ package org.apache.solr.schema; import org.apache.lucene.search.SortField; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.solr.search.function.ValueSource; import org.apache.solr.search.function.FileFloatSource; Index: solr/src/java/org/apache/solr/schema/FieldType.java =================================================================== --- solr/src/java/org/apache/solr/schema/FieldType.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/FieldType.java (working copy) @@ -21,7 +21,6 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.document2.Field; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; @@ -263,7 +262,7 @@ return createField(field.getName(), val, newType, boost); } - protected Fieldable createField(String name, String val, org.apache.lucene.document.Field.Store storage, org.apache.lucene.document.Field.Index index, + /*protected Fieldable createField(String name, String val, org.apache.lucene.document.Field.Store storage, org.apache.lucene.document.Field.Index index, org.apache.lucene.document.Field.TermVector vec, boolean omitNorms, boolean omitTFPos, float boost){ org.apache.lucene.document.Field f = new org.apache.lucene.document.Field(name, val, @@ -295,8 +294,8 @@ getFieldIndex(field, val), getFieldTermVec(field, val), field.omitNorms(), field.omitTf(), boost); } + */ - /** * Create the field from native Lucene parts. Mostly intended for use by FieldTypes outputing multiple * Fields per SchemaField @@ -326,30 +325,6 @@ IndexableField f = createField( field, value, boost); return f==null ? new IndexableField[]{} : new IndexableField[]{f}; } - - /* Helpers for field construction */ - protected org.apache.lucene.document.Field.TermVector getFieldTermVec(SchemaField field, - String internalVal) { - org.apache.lucene.document.Field.TermVector ftv = org.apache.lucene.document.Field.TermVector.NO; - if (field.storeTermPositions() && field.storeTermOffsets()) - ftv = org.apache.lucene.document.Field.TermVector.WITH_POSITIONS_OFFSETS; - else if (field.storeTermPositions()) - ftv = org.apache.lucene.document.Field.TermVector.WITH_POSITIONS; - else if (field.storeTermOffsets()) - ftv = org.apache.lucene.document.Field.TermVector.WITH_OFFSETS; - else if (field.storeTermVector()) - ftv = org.apache.lucene.document.Field.TermVector.YES; - return ftv; - } - protected org.apache.lucene.document.Field.Store getFieldStore(SchemaField field, - String internalVal) { - return field.stored() ? org.apache.lucene.document.Field.Store.YES : org.apache.lucene.document.Field.Store.NO; - } - protected org.apache.lucene.document.Field.Index getFieldIndex(SchemaField field, - String internalVal) { - return field.indexed() ? (isTokenized() ? org.apache.lucene.document.Field.Index.ANALYZED : - org.apache.lucene.document.Field.Index.NOT_ANALYZED) : org.apache.lucene.document.Field.Index.NO; - } /** * Convert an external value (from XML update command or from query string) @@ -367,24 +342,18 @@ * value * @see #toInternal */ - public String toExternal(Fieldable f) { + public String toExternal(IndexableField f) { // currently used in writing XML of the search result (but perhaps // a more efficient toXML(IndexableField f, Writer w) should be used // in the future. return f.stringValue(); } - public String toExternal(IndexableField f) { - return f.stringValue(); - } /** * Convert the stored-field format to an external object. * @see #toInternal * @since solr 1.3 */ - public Object toObject(Fieldable f) { - return toExternal(f); // by default use the string - } public Object toObject(IndexableField f) { return toExternal(f); // by default use the string } @@ -408,25 +377,17 @@ } /** Given the stored field, return the human readable representation */ - public String storedToReadable(Fieldable f) { - return toExternal(f); - } - /** Given the stored field, return the human readable representation */ public String storedToReadable(IndexableField f) { return toExternal(f); } /** Given the stored field, return the indexed form */ - public String storedToIndexed(Fieldable f) { + public String storedToIndexed(IndexableField f) { // right now, the transformation of single valued fields like SortableInt // is done when the Field is created, not at analysis time... this means // that the indexed form is the same as the stored field form. return f.stringValue(); } - /** Given the stored field, return the indexed form */ - public String storedToIndexed(IndexableField f) { - return f.stringValue(); - } /** Given the readable value, return the term value that will match it. */ public String readableToIndexed(String val) { Index: solr/src/java/org/apache/solr/schema/FloatField.java =================================================================== --- solr/src/java/org/apache/solr/schema/FloatField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/FloatField.java (working copy) @@ -23,7 +23,6 @@ import org.apache.solr.search.QParser; import org.apache.solr.search.function.ValueSource; import org.apache.solr.search.function.FloatFieldSource; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.solr.response.TextResponseWriter; @@ -79,8 +78,4 @@ public Float toObject(IndexableField f) { return Float.valueOf( toExternal(f) ); } - @Override - public Float toObject(Fieldable f) { - return Float.valueOf( toExternal(f) ); - } } Index: solr/src/java/org/apache/solr/schema/GeoHashField.java =================================================================== --- solr/src/java/org/apache/solr/schema/GeoHashField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/GeoHashField.java (working copy) @@ -17,7 +17,6 @@ package org.apache.solr.schema; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; import org.apache.lucene.search.SortField; Index: solr/src/java/org/apache/solr/schema/IndexSchema.java =================================================================== --- solr/src/java/org/apache/solr/schema/IndexSchema.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/IndexSchema.java (working copy) @@ -19,7 +19,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.DefaultSimilarity; import org.apache.lucene.search.IndexSearcher; @@ -264,23 +263,16 @@ public IndexableField getUniqueKeyField(org.apache.lucene.document2.Document doc) { return doc.getField(uniqueKeyFieldName); // this should return null if name is null } - public Fieldable getUniqueKeyField(org.apache.lucene.document.Document doc) { - return doc.getField(uniqueKeyFieldName); // this should return null if name is null - } /** * The printable value of the Unique Key field for * the specified Document * @return null if this schema has no unique key field */ - public String printableUniqueKey(org.apache.lucene.document.Document doc) { - Fieldable f = doc.getFieldable(uniqueKeyFieldName); - return f==null ? null : uniqueKeyFieldType.toExternal(f); - } public String printableUniqueKey(org.apache.lucene.document2.Document doc) { IndexableField f = doc.getField(uniqueKeyFieldName); return f==null ? null : uniqueKeyFieldType.toExternal(f); - } + } private SchemaField getIndexedField(String fname) { SchemaField f = getFields().get(fname); Index: solr/src/java/org/apache/solr/schema/IntField.java =================================================================== --- solr/src/java/org/apache/solr/schema/IntField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/IntField.java (working copy) @@ -23,7 +23,6 @@ import org.apache.solr.search.QParser; import org.apache.solr.search.function.ValueSource; import org.apache.solr.search.function.IntFieldSource; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.solr.response.TextResponseWriter; @@ -79,8 +78,4 @@ public Integer toObject(IndexableField f) { return Integer.valueOf( toExternal(f) ); } - @Override - public Integer toObject(Fieldable f) { - return Integer.valueOf( toExternal(f) ); - } } Index: solr/src/java/org/apache/solr/schema/LatLonType.java =================================================================== --- solr/src/java/org/apache/solr/schema/LatLonType.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/LatLonType.java (working copy) @@ -16,7 +16,6 @@ * limitations under the License. */ -import org.apache.lucene.document.Fieldable; import org.apache.lucene.document2.FieldType; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexReader; Index: solr/src/java/org/apache/solr/schema/LongField.java =================================================================== --- solr/src/java/org/apache/solr/schema/LongField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/LongField.java (working copy) @@ -17,7 +17,6 @@ package org.apache.solr.schema; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.SortField; import org.apache.lucene.search.cache.CachedArrayCreator; @@ -81,8 +80,4 @@ public Long toObject(IndexableField f) { return Long.valueOf( toExternal(f) ); } - @Override - public Long toObject(Fieldable f) { - return Long.valueOf( toExternal(f) ); - } } Index: solr/src/java/org/apache/solr/schema/PointType.java =================================================================== --- solr/src/java/org/apache/solr/schema/PointType.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/PointType.java (working copy) @@ -17,8 +17,6 @@ package org.apache.solr.schema; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; Index: solr/src/java/org/apache/solr/schema/RandomSortField.java =================================================================== --- solr/src/java/org/apache/solr/schema/RandomSortField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/RandomSortField.java (working copy) @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Map; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; Index: solr/src/java/org/apache/solr/schema/SchemaField.java =================================================================== --- solr/src/java/org/apache/solr/schema/SchemaField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/SchemaField.java (working copy) @@ -19,7 +19,6 @@ import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.SortField; import org.apache.solr.search.QParser; Index: solr/src/java/org/apache/solr/schema/ShortField.java =================================================================== --- solr/src/java/org/apache/solr/schema/ShortField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/ShortField.java (working copy) @@ -16,7 +16,6 @@ * limitations under the License. */ -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.SortField; import org.apache.lucene.search.cache.CachedArrayCreator; @@ -84,9 +83,5 @@ public Short toObject(IndexableField f) { return Short.valueOf(toExternal(f)); } - @Override - public Short toObject(Fieldable f) { - return Short.valueOf(toExternal(f)); - } } Index: solr/src/java/org/apache/solr/schema/SortableDoubleField.java =================================================================== --- solr/src/java/org/apache/solr/schema/SortableDoubleField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/SortableDoubleField.java (working copy) @@ -27,7 +27,6 @@ import org.apache.solr.search.function.FieldCacheSource; import org.apache.solr.search.function.DocValues; import org.apache.solr.search.function.StringIndexDocValues; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexableField; import org.apache.solr.util.NumberUtils; @@ -66,10 +65,6 @@ public String toExternal(IndexableField f) { return indexedToReadable(f.stringValue()); } - @Override - public String toExternal(Fieldable f) { - return indexedToReadable(f.stringValue()); - } @Override public Double toObject(IndexableField f) { Index: solr/src/java/org/apache/solr/schema/SortableFloatField.java =================================================================== --- solr/src/java/org/apache/solr/schema/SortableFloatField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/SortableFloatField.java (working copy) @@ -27,7 +27,6 @@ import org.apache.solr.search.function.FieldCacheSource; import org.apache.solr.search.function.DocValues; import org.apache.solr.search.function.StringIndexDocValues; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexableField; import org.apache.solr.util.NumberUtils; @@ -66,19 +65,11 @@ public String toExternal(IndexableField f) { return indexedToReadable(f.stringValue()); } - @Override - public String toExternal(Fieldable f) { - return indexedToReadable(f.stringValue()); - } @Override public Float toObject(IndexableField f) { return NumberUtils.SortableStr2float(f.stringValue()); } - @Override - public Float toObject(Fieldable f) { - return NumberUtils.SortableStr2float(f.stringValue()); - } @Override public String indexedToReadable(String indexedForm) { Index: solr/src/java/org/apache/solr/schema/SortableIntField.java =================================================================== --- solr/src/java/org/apache/solr/schema/SortableIntField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/SortableIntField.java (working copy) @@ -27,7 +27,6 @@ import org.apache.solr.search.function.FieldCacheSource; import org.apache.solr.search.function.DocValues; import org.apache.solr.search.function.StringIndexDocValues; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexableField; import org.apache.solr.util.NumberUtils; @@ -69,10 +68,6 @@ public String toExternal(IndexableField f) { return indexedToReadable(f.stringValue()); } - @Override - public String toExternal(Fieldable f) { - return indexedToReadable(f.stringValue()); - } @Override public String indexedToReadable(String indexedForm) { @@ -90,10 +85,6 @@ public Integer toObject(IndexableField f) { return NumberUtils.SortableStr2int(f.stringValue(), 0, 3); } - @Override - public Integer toObject(Fieldable f) { - return NumberUtils.SortableStr2int(f.stringValue(), 0, 3); - } @Override public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { Index: solr/src/java/org/apache/solr/schema/SortableLongField.java =================================================================== --- solr/src/java/org/apache/solr/schema/SortableLongField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/SortableLongField.java (working copy) @@ -27,7 +27,6 @@ import org.apache.solr.search.function.FieldCacheSource; import org.apache.solr.search.function.DocValues; import org.apache.solr.search.function.StringIndexDocValues; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexableField; import org.apache.solr.util.NumberUtils; @@ -83,17 +82,8 @@ public Long toObject(IndexableField f) { return NumberUtils.SortableStr2long(f.stringValue(),0,5); } - @Override - public String toExternal(Fieldable f) { - return indexedToReadable(f.stringValue()); - } @Override - public Long toObject(Fieldable f) { - return NumberUtils.SortableStr2long(f.stringValue(),0,5); - } - - @Override public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { String sval = f.stringValue(); writer.writeLong(name, NumberUtils.SortableStr2long(sval,0,sval.length())); Index: solr/src/java/org/apache/solr/schema/StrField.java =================================================================== --- solr/src/java/org/apache/solr/schema/StrField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/StrField.java (working copy) @@ -18,7 +18,6 @@ package org.apache.solr.schema; import org.apache.lucene.search.SortField; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; import org.apache.solr.response.TextResponseWriter; Index: solr/src/java/org/apache/solr/schema/TextField.java =================================================================== --- solr/src/java/org/apache/solr/schema/TextField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/TextField.java (working copy) @@ -31,7 +31,6 @@ import org.apache.lucene.analysis.CachingTokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.util.BytesRef; import org.apache.solr.response.TextResponseWriter; import org.apache.solr.search.QParser; Index: solr/src/java/org/apache/solr/schema/TrieDateField.java =================================================================== --- solr/src/java/org/apache/solr/schema/TrieDateField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/TrieDateField.java (working copy) @@ -20,7 +20,6 @@ import org.apache.solr.search.function.ValueSource; import org.apache.solr.search.QParser; import org.apache.solr.response.TextResponseWriter; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.SortField; import org.apache.lucene.search.Query; @@ -49,10 +48,6 @@ public Date toObject(IndexableField f) { return (Date) wrappedField.toObject(f); } - @Override - public Date toObject(Fieldable f) { - return (Date) wrappedField.toObject(f); - } @Override public Object toObject(SchemaField sf, BytesRef term) { @@ -96,10 +91,6 @@ public String storedToReadable(IndexableField f) { return wrappedField.storedToReadable(f); } - @Override - public String storedToReadable(Fieldable f) { - return wrappedField.storedToReadable(f); - } @Override public String readableToIndexed(String val) { @@ -115,10 +106,6 @@ public String toExternal(IndexableField f) { return wrappedField.toExternal(f); } - @Override - public String toExternal(Fieldable f) { - return wrappedField.toExternal(f); - } @Override public String indexedToReadable(String _indexedForm) { @@ -134,10 +121,6 @@ public String storedToIndexed(IndexableField f) { return wrappedField.storedToIndexed(f); } - @Override - public String storedToIndexed(Fieldable f) { - return wrappedField.storedToIndexed(f); - } @Override public IndexableField createField(SchemaField field, Object value, float boost) { Index: solr/src/java/org/apache/solr/schema/TrieField.java =================================================================== --- solr/src/java/org/apache/solr/schema/TrieField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/TrieField.java (working copy) @@ -16,8 +16,7 @@ */ package org.apache.solr.schema; -import org.apache.lucene.document.Fieldable; -import org.apache.lucene.document.NumericField; +import org.apache.lucene.document2.NumericField; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.*; import org.apache.lucene.search.cache.CachedArrayCreator; @@ -103,34 +102,8 @@ @Override public Object toObject(IndexableField f) { - if (f instanceof org.apache.lucene.document2.NumericField) { - final Number val = ((org.apache.lucene.document2.NumericField) f).numericValue(); - if (val==null) return badFieldString(f); - return (type == TrieTypes.DATE) ? new Date(val.longValue()) : val; - } else { - // the following code is "deprecated" and only to support pre-3.2 indexes using the old BinaryField encoding: - final BytesRef bytes = f.binaryValue(null); - if (bytes==null) return badFieldString(f); - switch (type) { - case INTEGER: - return toInt(bytes.bytes); - case FLOAT: - return Float.intBitsToFloat(toInt(bytes.bytes)); - case LONG: - return toLong(bytes.bytes); - case DOUBLE: - return Double.longBitsToDouble(toLong(bytes.bytes)); - case DATE: - return new Date(toLong(bytes.bytes)); - default: - throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for trie field: " + f.name()); - } - } - } - @Override - public Object toObject(Fieldable f) { if (f instanceof NumericField) { - final Number val = ((NumericField) f).getNumericValue(); + final Number val = ((NumericField) f).numericValue(); if (val==null) return badFieldString(f); return (type == TrieTypes.DATE) ? new Date(val.longValue()) : val; } else { @@ -318,10 +291,6 @@ public String storedToReadable(IndexableField f) { return toExternal(f); } - @Override - public String storedToReadable(Fieldable f) { - return toExternal(f); - } @Override public String readableToIndexed(String val) { @@ -364,10 +333,6 @@ String s = f.stringValue(); return "ERROR:SCHEMA-INDEX-MISMATCH,stringValue="+s; } - static String badFieldString(Fieldable f) { - String s = f.stringValue(); - return "ERROR:SCHEMA-INDEX-MISMATCH,stringValue="+s; - } @Override public String toExternal(IndexableField f) { @@ -375,12 +340,6 @@ ? dateField.toExternal((Date) toObject(f)) : toObject(f).toString(); } - @Override - public String toExternal(Fieldable f) { - return (type == TrieTypes.DATE) - ? dateField.toExternal((Date) toObject(f)) - : toObject(f).toString(); - } @Override public String indexedToReadable(String _indexedForm) { @@ -506,67 +465,6 @@ } return bytes.utf8ToString(); } - @Override - public String storedToIndexed(Fieldable f) { - final BytesRef bytes = new BytesRef(NumericUtils.BUF_SIZE_LONG); - if (f instanceof NumericField) { - final Number val = ((NumericField) f).getNumericValue(); - if (val==null) - throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Invalid field contents: "+f.name()); - switch (type) { - case INTEGER: - NumericUtils.intToPrefixCoded(val.intValue(), 0, bytes); - break; - case FLOAT: - NumericUtils.intToPrefixCoded(NumericUtils.floatToSortableInt(val.floatValue()), 0, bytes); - break; - case LONG: //fallthrough! - case DATE: - NumericUtils.longToPrefixCoded(val.longValue(), 0, bytes); - break; - case DOUBLE: - NumericUtils.longToPrefixCoded(NumericUtils.doubleToSortableLong(val.doubleValue()), 0, bytes); - break; - default: - throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for trie field: " + f.name()); - } - } else { - // the following code is "deprecated" and only to support pre-3.2 indexes using the old BinaryField encoding: - final BytesRef bytesRef = f.binaryValue(null); - if (bytesRef==null) - throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Invalid field contents: "+f.name()); - switch (type) { - case INTEGER: - NumericUtils.intToPrefixCoded(toInt(bytesRef.bytes), 0, bytes); - break; - case FLOAT: { - // WARNING: Code Duplication! Keep in sync with o.a.l.util.NumericUtils! - // copied from NumericUtils to not convert to/from float two times - // code in next 2 lines is identical to: int v = NumericUtils.floatToSortableInt(Float.intBitsToFloat(toInt(arr))); - int v = toInt(bytesRef.bytes); - if (v<0) v ^= 0x7fffffff; - NumericUtils.intToPrefixCoded(v, 0, bytes); - break; - } - case LONG: //fallthrough! - case DATE: - NumericUtils.longToPrefixCoded(toLong(bytesRef.bytes), 0, bytes); - break; - case DOUBLE: { - // WARNING: Code Duplication! Keep in sync with o.a.l.util.NumericUtils! - // copied from NumericUtils to not convert to/from double two times - // code in next 2 lines is identical to: long v = NumericUtils.doubleToSortableLong(Double.longBitsToDouble(toLong(arr))); - long v = toLong(bytesRef.bytes); - if (v<0) v ^= 0x7fffffffffffffffL; - NumericUtils.longToPrefixCoded(v, 0, bytes); - break; - } - default: - throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for trie field: " + f.name()); - } - } - return bytes.utf8ToString(); - } @Override public IndexableField createField(SchemaField field, Object value, float boost) { Index: solr/src/java/org/apache/solr/schema/UUIDField.java =================================================================== --- solr/src/java/org/apache/solr/schema/UUIDField.java (revision 1150855) +++ solr/src/java/org/apache/solr/schema/UUIDField.java (working copy) @@ -22,7 +22,6 @@ import java.util.Map; import java.util.UUID; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.SortField; import org.apache.solr.common.SolrException; @@ -92,8 +91,4 @@ public UUID toObject(IndexableField f) { return UUID.fromString(f.stringValue()); } - @Override - public UUID toObject(Fieldable f) { - return UUID.fromString(f.stringValue()); - } } Index: solr/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java =================================================================== --- solr/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java (revision 1150855) +++ solr/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java (working copy) @@ -24,8 +24,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.TextField; import org.apache.lucene.search.spell.HighFrequencyDictionary; import org.apache.lucene.search.spell.PlainTextDictionary; import org.apache.lucene.store.RAMDirectory; @@ -100,7 +101,7 @@ for (String s : lines) { Document d = new Document(); - d.add(new Field(WORD_FIELD_NAME, s, Field.Store.NO, Field.Index.ANALYZED)); + d.add(new TextField(WORD_FIELD_NAME, s)); writer.addDocument(d); } writer.optimize(); Index: solr/src/java/org/apache/solr/update/DocumentBuilder.java =================================================================== --- solr/src/java/org/apache/solr/update/DocumentBuilder.java (revision 1150855) +++ solr/src/java/org/apache/solr/update/DocumentBuilder.java (working copy) @@ -21,7 +21,6 @@ import java.util.HashMap; import java.util.List; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexableField; import org.apache.solr.common.SolrDocument; Index: solr/src/test/org/apache/solr/BasicFunctionalityTest.java =================================================================== --- solr/src/test/org/apache/solr/BasicFunctionalityTest.java (revision 1150855) +++ solr/src/test/org/apache/solr/BasicFunctionalityTest.java (working copy) @@ -26,7 +26,7 @@ import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; -import org.apache.lucene.document.FieldSelectorVisitor; +import org.apache.lucene.document2.FieldSelectorVisitor; import org.apache.lucene.document2.Document; import org.apache.lucene.document2.Field; import org.apache.lucene.index.IndexableField; Index: solr/src/webapp/src/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java =================================================================== --- solr/src/webapp/src/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java (revision 1150855) +++ solr/src/webapp/src/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java (working copy) @@ -23,7 +23,7 @@ import java.io.InputStream; import java.util.Set; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.solr.client.solrj.SolrRequest; import org.apache.solr.client.solrj.SolrServer; import org.apache.solr.client.solrj.SolrServerException;