Index: lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java =================================================================== --- lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java (revision 1153521) +++ lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java (working copy) @@ -19,11 +19,11 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.NumericField; -import org.apache.lucene.document2.StringField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.IndexWriterConfig; Index: lucene/contrib/demo/src/java/org/apache/lucene/demo/SearchFiles.java =================================================================== --- lucene/contrib/demo/src/java/org/apache/lucene/demo/SearchFiles.java (revision 1153521) +++ lucene/contrib/demo/src/java/org/apache/lucene/demo/SearchFiles.java (working copy) @@ -26,7 +26,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -175,7 +175,7 @@ continue; } - Document doc = searcher.doc2(hits[i].doc); + Document doc = searcher.doc(hits[i].doc); String path = doc.get("path"); if (path != null) { System.out.println((i+1) + ". " + path); Index: lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java =================================================================== --- lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java (revision 1153521) +++ lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java (working copy) @@ -31,7 +31,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.TermFreqVector; import org.apache.lucene.index.TermPositionVector; @@ -270,7 +270,7 @@ // convenience method public static TokenStream getTokenStream(IndexReader reader, int docId, String field, Analyzer analyzer) throws IOException { - Document doc = reader.document2(docId); + Document doc = reader.document(docId); return getTokenStream(doc, field, analyzer); } Index: lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java =================================================================== --- lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java (revision 1153521) +++ lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java (working copy) @@ -21,9 +21,9 @@ import java.util.ArrayList; import java.util.List; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.StoredFieldVisitor; Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java (revision 1153521) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterPhraseTest.java (working copy) @@ -26,10 +26,10 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (revision 1153521) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (working copy) @@ -42,11 +42,11 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.NumericField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.NumericField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -112,7 +112,7 @@ for (int i = 0; i < hits.scoreDocs.length; i++) { - Document doc = searcher.doc2(hits.scoreDocs[i].doc); + Document doc = searcher.doc(hits.scoreDocs[i].doc); String storedField = doc.get(FIELD_NAME); TokenStream stream = TokenSources.getAnyTokenStream(searcher @@ -185,7 +185,7 @@ Highlighter highlighter = new Highlighter(scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -234,7 +234,7 @@ Highlighter highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -256,7 +256,7 @@ highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -278,7 +278,7 @@ highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -303,7 +303,7 @@ Highlighter highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -327,7 +327,7 @@ Highlighter highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -352,7 +352,7 @@ Highlighter highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(NUMERIC_FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(NUMERIC_FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -375,7 +375,7 @@ highlighter.setTextFragmenter(new SimpleFragmenter(40)); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, @@ -393,7 +393,7 @@ int maxNumFragmentsRequired = 2; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); QueryScorer scorer = new QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(this, scorer); @@ -418,7 +418,7 @@ Highlighter highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer, 5)); @@ -437,7 +437,7 @@ highlighter = new Highlighter(this, scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); highlighter.setTextFragmenter(new SimpleSpanFragmenter(scorer, 20)); @@ -459,7 +459,7 @@ Highlighter highlighter = new Highlighter(this,scorer); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME,new StringReader(text)); highlighter.setTextFragmenter(new SimpleFragmenter(40)); @@ -530,7 +530,7 @@ highlighter.setTextFragmenter(new SimpleFragmenter(40)); int maxNumFragmentsRequired = 2; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); String result = highlighter.getBestFragments(tokenStream, text, maxNumFragmentsRequired, @@ -688,7 +688,7 @@ hits = searcher.search(query, null, 1000); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); int maxNumFragmentsRequired = 2; String fragmentSeparator = "..."; QueryScorer scorer; @@ -716,7 +716,7 @@ numHighlights = 0; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); int maxNumFragmentsRequired = 2; String fragmentSeparator = "..."; QueryScorer scorer; @@ -744,7 +744,7 @@ numHighlights = 0; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); int maxNumFragmentsRequired = 2; String fragmentSeparator = "..."; QueryScorer scorer; @@ -908,7 +908,7 @@ doSearching("Kennedy"); numHighlights = 0; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, @@ -922,7 +922,7 @@ numHighlights = 0; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this); @@ -933,7 +933,7 @@ numHighlights = 0; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, @@ -1041,7 +1041,7 @@ // new Highlighter(HighlighterTest.this, new QueryTermScorer(query)); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this); @@ -1064,7 +1064,7 @@ doSearching("Kennedy"); for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, @@ -1216,7 +1216,7 @@ int maxNumFragmentsRequired = 3; for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream, HighlighterTest.this, false); @@ -1568,7 +1568,7 @@ TopDocs hits = searcher.search(query, null, 10); for( int i = 0; i < hits.totalHits; i++ ){ - Document doc = searcher.doc2( hits.scoreDocs[i].doc ); + Document doc = searcher.doc( hits.scoreDocs[i].doc ); String result = h.getBestFragment( a, "t_text1", doc.get( "t_text1" )); if (VERBOSE) System.out.println("result:" + result); assertEquals("more random words for second field", result); @@ -1631,7 +1631,7 @@ public void assertExpectedHighlightCount(final int maxNumFragmentsRequired, final int expectedHighlights) throws Exception { for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(FIELD_NAME); TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text)); QueryScorer scorer = new QueryScorer(query, FIELD_NAME); Highlighter highlighter = new Highlighter(this, scorer); @@ -1864,7 +1864,7 @@ throws Exception { for (int i = 0; i < hits.totalHits; i++) { - String text = searcher.doc2(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); + String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME); int maxNumFragmentsRequired = 2; String fragmentSeparator = "..."; Scorer scorer = null; Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java (revision 1153521) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/TokenSourcesTest.java (working copy) @@ -26,10 +26,10 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java (revision 1153521) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/AbstractTestCase.java (working copy) @@ -28,10 +28,10 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig.OpenMode; Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java (revision 1153521) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/vectorhighlight/SimpleFragmentsBuilderTest.java (working copy) @@ -17,10 +17,10 @@ * limitations under the License. */ -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; Index: lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocument.java =================================================================== --- lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocument.java (revision 1153521) +++ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedDocument.java (working copy) @@ -16,7 +16,7 @@ * limitations under the License. */ -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import java.util.List; import java.util.Map; Index: lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java =================================================================== --- lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java (revision 1153521) +++ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java (working copy) @@ -26,7 +26,7 @@ import java.util.Set; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.MultiNorms; @@ -189,7 +189,7 @@ } else { InstantiatedDocument document = new InstantiatedDocument(); // copy stored fields from source reader - Document sourceDocument = sourceIndexReader.document2(i); + Document sourceDocument = sourceIndexReader.document(i); for (IndexableField field : sourceDocument) { if (fields == null || fields.contains(field.name())) { document.getDocument().add(field); Index: lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java =================================================================== --- lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (revision 1153521) +++ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (working copy) @@ -29,7 +29,7 @@ import java.util.Set; import java.util.Comparator; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.*; import org.apache.lucene.index.IndexReader.ReaderContext; import org.apache.lucene.store.Directory; @@ -272,7 +272,7 @@ */ @Override - public Document document2(int n) throws IOException { + public Document document(int n) throws IOException { return getIndex().getDocumentsByNumber()[n].getDocument(); } Index: lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java =================================================================== --- lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java (revision 1153521) +++ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexWriter.java (working copy) @@ -36,7 +36,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Token; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.FieldInvertState; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; Index: lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java =================================================================== --- lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (revision 1153521) +++ lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (working copy) @@ -27,10 +27,10 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.MultiNorms; Index: lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestRealTime.java =================================================================== --- lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestRealTime.java (revision 1153521) +++ lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestRealTime.java (working copy) @@ -18,9 +18,9 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.Scorer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.Term; import org.apache.lucene.util.LuceneTestCase; Index: lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java =================================================================== --- lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java (revision 1153521) +++ lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java (working copy) @@ -24,8 +24,8 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; /** * @since 2009-mar-30 13:15:49 Index: lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java =================================================================== --- lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (revision 1153521) +++ lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (working copy) @@ -34,7 +34,7 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.FieldInvertState; Index: lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java =================================================================== --- lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (revision 1153521) +++ lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java (working copy) @@ -29,9 +29,9 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenFilter; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.queryParser.QueryParser; Index: lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelector.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelector.java (revision 1153521) +++ lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelector.java (working copy) @@ -1,5 +1,8 @@ -package org.apache.lucene.document2; +package org.apache.lucene.document; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; + /** * Copyright 2004 The Apache Software Foundation * Index: lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelectorResult.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelectorResult.java (revision 1153521) +++ lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelectorResult.java (working copy) @@ -1,4 +1,4 @@ -package org.apache.lucene.document2; +package org.apache.lucene.document; /** * Copyright 2004 The Apache Software Foundation Index: lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelectorVisitor.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelectorVisitor.java (revision 1153521) +++ lucene/contrib/misc/src/java/org/apache/lucene/document/FieldSelectorVisitor.java (working copy) @@ -1,4 +1,4 @@ -package org.apache.lucene.document2; +package org.apache.lucene.document; /** * Copyright 2004 The Apache Software Foundation @@ -20,7 +20,13 @@ import java.io.Reader; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.document2.NumericField.DataType; +import org.apache.lucene.document.BinaryField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.NumericField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.document.NumericField.DataType; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldReaderException; import org.apache.lucene.index.IndexReader; Index: lucene/contrib/misc/src/java/org/apache/lucene/document/LoadFirstFieldSelector.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document/LoadFirstFieldSelector.java (revision 1153521) +++ lucene/contrib/misc/src/java/org/apache/lucene/document/LoadFirstFieldSelector.java (working copy) @@ -1,4 +1,4 @@ -package org.apache.lucene.document2; +package org.apache.lucene.document; /** * Copyright 2004 The Apache Software Foundation * Index: lucene/contrib/misc/src/java/org/apache/lucene/document/MapFieldSelector.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document/MapFieldSelector.java (revision 1153521) +++ lucene/contrib/misc/src/java/org/apache/lucene/document/MapFieldSelector.java (working copy) @@ -1,4 +1,4 @@ -package org.apache.lucene.document2; +package org.apache.lucene.document; /** * Licensed to the Apache Software Foundation (ASF) under one or more Index: lucene/contrib/misc/src/java/org/apache/lucene/document/SetBasedFieldSelector.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document/SetBasedFieldSelector.java (revision 1153521) +++ lucene/contrib/misc/src/java/org/apache/lucene/document/SetBasedFieldSelector.java (working copy) @@ -1,4 +1,4 @@ -package org.apache.lucene.document2; +package org.apache.lucene.document; /** * Copyright 2004 The Apache Software Foundation @@ -18,6 +18,8 @@ import java.util.Set; +import org.apache.lucene.document.Field; + /** * Declare what fields to load normally and what fields to load lazily * Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelector.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelector.java (revision 1153521) +++ lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelector.java (working copy) @@ -1,33 +0,0 @@ -package org.apache.lucene.document2; - -/** - * Copyright 2004 The Apache Software Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Similar to a {@link java.io.FileFilter}, the FieldSelector allows one to make decisions about - * what Fields get loaded on a {@link Document} by {@link FieldSelectorVisitor} - * - **/ -public interface FieldSelector { - - /** - * - * @param fieldName the field to accept or reject - * @return an instance of {@link FieldSelectorResult} - * if the {@link Field} named fieldName should be loaded. - */ - FieldSelectorResult accept(String fieldName); -} Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorResult.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorResult.java (revision 1153521) +++ lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorResult.java (working copy) @@ -1,76 +0,0 @@ -package org.apache.lucene.document2; - -/** - * Copyright 2004 The Apache Software Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Provides information about what should be done with this Field - * - **/ -public enum FieldSelectorResult { - - /** - * Load this {@link Field} every time the {@link Document} is loaded, reading in the data as it is encountered. - * {@link Document#getField(String)} and {@link Document#getFieldable(String)} should not return null. - *

- * {@link Document#add(Fieldable)} should be called by the Reader. - */ - LOAD, - - /** - * Lazily load this {@link Field}. This means the {@link Field} is valid, but it may not actually contain its data until - * invoked. {@link Document#getField(String)} SHOULD NOT BE USED. {@link Document#getFieldable(String)} is safe to use and should - * return a valid instance of a {@link Fieldable}. - *

- * {@link Document#add(Fieldable)} should be called by the Reader. - */ - LAZY_LOAD, - - /** - * Do not load the {@link Field}. {@link Document#getField(String)} and {@link Document#getFieldable(String)} should return null. - * {@link Document#add(Fieldable)} is not called. - *

- * {@link Document#add(Fieldable)} should not be called by the Reader. - */ - NO_LOAD, - - /** - * Load this field as in the {@link #LOAD} case, but immediately return from {@link Field} loading for the {@link Document}. Thus, the - * Document may not have its complete set of Fields. {@link Document#getField(String)} and {@link Document#getFieldable(String)} should - * both be valid for this {@link Field} - *

- * {@link Document#add(Fieldable)} should be called by the Reader. - */ - LOAD_AND_BREAK, - - /** Expert: Load the size of this {@link Field} rather than its value. - * Size is measured as number of bytes required to store the field == bytes for a binary or any compressed value, and 2*chars for a String value. - * The size is stored as a binary value, represented as an int in a byte[], with the higher order byte first in [0] - */ - SIZE, - - /** Expert: Like {@link #SIZE} but immediately break from the field loading loop, i.e., stop loading further fields, after the size is loaded */ - SIZE_AND_BREAK, - - /** - * Lazily load this {@link Field}, but do not cache the result. This means the {@link Field} is valid, but it may not actually contain its data until - * invoked. {@link Document#getField(String)} SHOULD NOT BE USED. {@link Document#getFieldable(String)} is safe to use and should - * return a valid instance of a {@link Fieldable}. - *

- * {@link Document#add(Fieldable)} should be called by the Reader. - */ - LATENT -} Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorVisitor.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorVisitor.java (revision 1153521) +++ lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorVisitor.java (working copy) @@ -1,329 +0,0 @@ -package org.apache.lucene.document2; - -/** - * Copyright 2004 The Apache Software Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.IOException; -import java.io.Reader; - -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.document2.NumericField.DataType; -import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.FieldReaderException; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.StoredFieldVisitor; -import org.apache.lucene.store.IndexInput; -import org.apache.lucene.util.BytesRef; - -/** Create this, passing a legacy {@link FieldSelector} to it, then - * pass this class to {@link IndexReader#document(int, - * StoredFieldVisitor)}, then call {@link #getDocument} to - * retrieve the loaded document. - - *

NOTE: If you use Lazy fields, you should not - * access the returned document after the reader has been - * closed! - */ - -public class FieldSelectorVisitor extends StoredFieldVisitor { - - private final FieldSelector selector; - private final Document doc; - - public FieldSelectorVisitor(FieldSelector selector) { - this.selector = selector; - doc = new Document(); - } - - public Document getDocument() { - return doc; - } - - @Override - public boolean binaryField(FieldInfo fieldInfo, IndexInput in, int numBytes) throws IOException { - final FieldSelectorResult accept = selector.accept(fieldInfo.name); - switch (accept) { - case LOAD: - case LOAD_AND_BREAK: - final byte[] b = new byte[numBytes]; - in.readBytes(b, 0, b.length); - doc.add(new BinaryField(fieldInfo.name, b)); - return accept != FieldSelectorResult.LOAD; - case LAZY_LOAD: - case LATENT: - addFieldLazy(in, fieldInfo, true, accept == FieldSelectorResult.LAZY_LOAD, numBytes); - return false; - case SIZE: - case SIZE_AND_BREAK: - in.seek(in.getFilePointer() + numBytes); - addFieldSize(fieldInfo, numBytes); - return accept != FieldSelectorResult.SIZE; - default: - // skip - in.seek(in.getFilePointer() + numBytes); - return false; - } - } - - @Override - public boolean stringField(FieldInfo fieldInfo, IndexInput in, int numUTF8Bytes) throws IOException { - final FieldSelectorResult accept = selector.accept(fieldInfo.name); - switch (accept) { - case LOAD: - case LOAD_AND_BREAK: - final byte[] b = new byte[numUTF8Bytes]; - in.readBytes(b, 0, b.length); - FieldType ft = new FieldType(TextField.TYPE_STORED); - ft.setStoreTermVectors(fieldInfo.storeTermVector); - ft.setStoreTermVectorOffsets(fieldInfo.storeOffsetWithTermVector); - ft.setStoreTermVectorPositions(fieldInfo.storePositionWithTermVector); - doc.add(new Field(fieldInfo.name, ft, new String(b, "UTF-8"))); - return accept != FieldSelectorResult.LOAD; - case LAZY_LOAD: - case LATENT: - addFieldLazy(in, fieldInfo, false, accept == FieldSelectorResult.LAZY_LOAD, numUTF8Bytes); - return false; - case SIZE: - case SIZE_AND_BREAK: - in.seek(in.getFilePointer() + numUTF8Bytes); - addFieldSize(fieldInfo, 2*numUTF8Bytes); - return accept != FieldSelectorResult.SIZE; - default: - // skip - in.seek(in.getFilePointer() + numUTF8Bytes); - return false; - } - } - - @Override - public boolean intField(FieldInfo fieldInfo, int value) throws IOException { - FieldType ft = new FieldType(NumericField.TYPE_STORED); - ft.setIndexed(fieldInfo.isIndexed); - ft.setOmitNorms(fieldInfo.omitNorms); - ft.setOmitTermFreqAndPositions(fieldInfo.omitTermFreqAndPositions); - return addNumericField(fieldInfo, new NumericField(fieldInfo.name, ft).setIntValue(value)); - } - - @Override - public boolean longField(FieldInfo fieldInfo, long value) throws IOException { - FieldType ft = new FieldType(NumericField.TYPE_STORED); - ft.setIndexed(fieldInfo.isIndexed); - ft.setOmitNorms(fieldInfo.omitNorms); - ft.setOmitTermFreqAndPositions(fieldInfo.omitTermFreqAndPositions); - return addNumericField(fieldInfo, new NumericField(fieldInfo.name, ft).setLongValue(value)); - } - - @Override - public boolean floatField(FieldInfo fieldInfo, float value) throws IOException { - FieldType ft = new FieldType(NumericField.TYPE_STORED); - ft.setIndexed(fieldInfo.isIndexed); - ft.setOmitNorms(fieldInfo.omitNorms); - ft.setOmitTermFreqAndPositions(fieldInfo.omitTermFreqAndPositions); - return addNumericField(fieldInfo, new NumericField(fieldInfo.name, ft).setFloatValue(value)); - } - - @Override - public boolean doubleField(FieldInfo fieldInfo, double value) throws IOException { - FieldType ft = new FieldType(NumericField.TYPE_STORED); - ft.setIndexed(fieldInfo.isIndexed); - ft.setOmitNorms(fieldInfo.omitNorms); - ft.setOmitTermFreqAndPositions(fieldInfo.omitTermFreqAndPositions); - return addNumericField(fieldInfo, new NumericField(fieldInfo.name, ft).setDoubleValue(value)); - } - - private boolean addNumericField(FieldInfo fieldInfo, NumericField f) { - doc.add(f); - final FieldSelectorResult accept = selector.accept(fieldInfo.name); - switch (accept) { - case LOAD: - return false; - case LOAD_AND_BREAK: - return true; - case LAZY_LOAD: - case LATENT: - return false; - case SIZE: - return false; - case SIZE_AND_BREAK: - return true; - default: - return false; - } - } - - private void addFieldLazy(IndexInput in, FieldInfo fi, boolean binary, boolean cacheResult, int numBytes) throws IOException { - final IndexableField f; - final long pointer = in.getFilePointer(); - // Need to move the pointer ahead by toRead positions - in.seek(pointer+numBytes); - FieldType ft = new FieldType(); - ft.setStored(true); - ft.setOmitNorms(fi.omitNorms); - ft.setOmitTermFreqAndPositions(fi.omitTermFreqAndPositions); - ft.setLazy(true); - - if (binary) { - f = new LazyField(in, fi.name, ft, numBytes, pointer, binary, cacheResult); - } else { - ft.setStoreTermVectors(fi.storeTermVector); - ft.setStoreTermVectorOffsets(fi.storeOffsetWithTermVector); - ft.setStoreTermVectorPositions(fi.storePositionWithTermVector); - f = new LazyField(in, fi.name, ft, numBytes, pointer, binary, cacheResult); - } - - doc.add(f); - } - - // Add the size of field as a byte[] containing the 4 bytes of the integer byte size (high order byte first; char = 2 bytes) - // Read just the size -- caller must skip the field content to continue reading fields - // Return the size in bytes or chars, depending on field type - private void addFieldSize(FieldInfo fi, int numBytes) throws IOException { - byte[] sizebytes = new byte[4]; - sizebytes[0] = (byte) (numBytes>>>24); - sizebytes[1] = (byte) (numBytes>>>16); - sizebytes[2] = (byte) (numBytes>>> 8); - sizebytes[3] = (byte) numBytes ; - doc.add(new BinaryField(fi.name, sizebytes)); - } - - /** - * A Lazy field implementation that defers loading of fields until asked for, instead of when the Document is - * loaded. - */ - private static class LazyField extends Field { - private int toRead; - private long pointer; - private final boolean cacheResult; - private final IndexInput in; - - public LazyField(IndexInput in, String name, FieldType ft, int toRead, long pointer, boolean isBinary, boolean cacheResult) { - super(name, ft); - this.in = in; - this.toRead = toRead; - this.pointer = pointer; - this.isBinary = isBinary; - this.cacheResult = cacheResult; - if (isBinary) - binaryLength = toRead; - } - - @Override - public Number numericValue() { - return null; - } - - @Override - public DataType numericDataType() { - return null; - } - - private IndexInput localFieldsStream; - - private IndexInput getFieldStream() { - if (localFieldsStream == null) { - localFieldsStream = (IndexInput) in.clone(); - } - return localFieldsStream; - } - - /** The value of the field as a Reader, or null. If null, the String value, - * binary value, or TokenStream value is used. Exactly one of stringValue(), - * readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */ - public Reader readerValue() { - return null; - } - - /** The value of the field as a TokenStream, or null. If null, the Reader value, - * String value, or binary value is used. Exactly one of stringValue(), - * readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */ - public TokenStream tokenStreamValue() { - return null; - } - - /** The value of the field as a String, or null. If null, the Reader value, - * binary value, or TokenStream value is used. Exactly one of stringValue(), - * readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */ - synchronized public String stringValue() { - if (isBinary) - return null; - else { - if (fieldsData == null) { - String result = null; - IndexInput localFieldsStream = getFieldStream(); - try { - localFieldsStream.seek(pointer); - byte[] bytes = new byte[toRead]; - localFieldsStream.readBytes(bytes, 0, toRead); - result = new String(bytes, "UTF-8"); - } catch (IOException e) { - throw new FieldReaderException(e); - } - if (cacheResult == true){ - fieldsData = result; - } - return result; - } else { - return (String) fieldsData; - } - } - } - - synchronized private byte[] getBinaryValue(byte[] result) { - if (isBinary) { - if (fieldsData == null) { - // Allocate new buffer if result is null or too small - final byte[] b; - if (result == null || result.length < toRead) - b = new byte[toRead]; - else - b = result; - - IndexInput localFieldsStream = getFieldStream(); - - // Throw this IOException since IndexReader.document does so anyway, so probably not that big of a change for people - // since they are already handling this exception when getting the document - try { - localFieldsStream.seek(pointer); - localFieldsStream.readBytes(b, 0, toRead); - } catch (IOException e) { - throw new FieldReaderException(e); - } - - binaryOffset = 0; - binaryLength = toRead; - if (cacheResult == true){ - fieldsData = b; - } - return b; - } else { - return (byte[]) fieldsData; - } - } else - return null; - } - - @Override - public BytesRef binaryValue(BytesRef reuse) { - final byte[] bytes = getBinaryValue(reuse != null ? reuse.bytes : null); - if (bytes != null) { - return new BytesRef(bytes, 0, bytes.length); - } else { - return null; - } - } - } -} \ No newline at end of file Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/LoadFirstFieldSelector.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document2/LoadFirstFieldSelector.java (revision 1153521) +++ lucene/contrib/misc/src/java/org/apache/lucene/document2/LoadFirstFieldSelector.java (working copy) @@ -1,29 +0,0 @@ -package org.apache.lucene.document2; -/** - * Copyright 2004 The Apache Software Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -/** - * Load the First field and break. - *

- * See {@link FieldSelectorResult#LOAD_AND_BREAK} - */ -public class LoadFirstFieldSelector implements FieldSelector { - - public FieldSelectorResult accept(String fieldName) { - return FieldSelectorResult.LOAD_AND_BREAK; - } -} \ No newline at end of file Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/MapFieldSelector.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document2/MapFieldSelector.java (revision 1153521) +++ lucene/contrib/misc/src/java/org/apache/lucene/document2/MapFieldSelector.java (working copy) @@ -1,67 +0,0 @@ -package org.apache.lucene.document2; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * A {@link FieldSelector} based on a Map of field names to {@link FieldSelectorResult}s - * - */ -public class MapFieldSelector implements FieldSelector { - - Map fieldSelections; - - /** Create a a MapFieldSelector - * @param fieldSelections maps from field names (String) to {@link FieldSelectorResult}s - */ - public MapFieldSelector(Map fieldSelections) { - this.fieldSelections = fieldSelections; - } - - /** Create a a MapFieldSelector - * @param fields fields to LOAD. List of Strings. All other fields are NO_LOAD. - */ - public MapFieldSelector(List fields) { - fieldSelections = new HashMap(fields.size()*5/3); - for (final String field : fields) - fieldSelections.put(field, FieldSelectorResult.LOAD); - } - - /** Create a a MapFieldSelector - * @param fields fields to LOAD. All other fields are NO_LOAD. - */ - public MapFieldSelector(String... fields) { - this(Arrays.asList(fields)); - } - - - - /** Load field according to its associated value in fieldSelections - * @param field a field name - * @return the fieldSelections value that field maps to or NO_LOAD if none. - */ - public FieldSelectorResult accept(String field) { - FieldSelectorResult selection = fieldSelections.get(field); - return selection!=null ? selection : FieldSelectorResult.NO_LOAD; - } - -} Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/SetBasedFieldSelector.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document2/SetBasedFieldSelector.java (revision 1153521) +++ lucene/contrib/misc/src/java/org/apache/lucene/document2/SetBasedFieldSelector.java (working copy) @@ -1,60 +0,0 @@ -package org.apache.lucene.document2; - -/** - * Copyright 2004 The Apache Software Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.util.Set; - -/** - * Declare what fields to load normally and what fields to load lazily - * - **/ - -public class SetBasedFieldSelector implements FieldSelector { - - private Set fieldsToLoad; - private Set lazyFieldsToLoad; - - /** - * Pass in the Set of {@link Field} names to load and the Set of {@link Field} names to load lazily. If both are null, the - * Document will not have any {@link Field} on it. - * @param fieldsToLoad A Set of {@link String} field names to load. May be empty, but not null - * @param lazyFieldsToLoad A Set of {@link String} field names to load lazily. May be empty, but not null - */ - public SetBasedFieldSelector(Set fieldsToLoad, Set lazyFieldsToLoad) { - this.fieldsToLoad = fieldsToLoad; - this.lazyFieldsToLoad = lazyFieldsToLoad; - } - - /** - * Indicate whether to load the field with the given name or not. If the {@link Field#name()} is not in either of the - * initializing Sets, then {@link org.apache.lucene.document.FieldSelectorResult#NO_LOAD} is returned. If a Field name - * is in both fieldsToLoad and lazyFieldsToLoad, lazy has precedence. - * - * @param fieldName The {@link Field} name to check - * @return The {@link FieldSelectorResult} - */ - public FieldSelectorResult accept(String fieldName) { - FieldSelectorResult result = FieldSelectorResult.NO_LOAD; - if (fieldsToLoad.contains(fieldName) == true){ - result = FieldSelectorResult.LOAD; - } - if (lazyFieldsToLoad.contains(fieldName) == true){ - result = FieldSelectorResult.LAZY_LOAD; - } - return result; - } -} \ No newline at end of file Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribFieldsReader.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribFieldsReader.java (revision 1153521) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribFieldsReader.java (working copy) @@ -22,13 +22,13 @@ import java.util.*; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldSelector; -import org.apache.lucene.document2.FieldSelectorResult; -import org.apache.lucene.document2.FieldSelectorVisitor; -import org.apache.lucene.document2.LoadFirstFieldSelector; -import org.apache.lucene.document2.SetBasedFieldSelector; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldSelector; +import org.apache.lucene.document.FieldSelectorResult; +import org.apache.lucene.document.FieldSelectorVisitor; +import org.apache.lucene.document.LoadFirstFieldSelector; +import org.apache.lucene.document.SetBasedFieldSelector; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; @@ -40,7 +40,7 @@ public class TestContribFieldsReader extends LuceneTestCase { private static Directory dir; - private static org.apache.lucene.document2.Document testDoc = new org.apache.lucene.document2.Document(); + private static org.apache.lucene.document.Document testDoc = new org.apache.lucene.document.Document(); private static FieldInfos fieldInfos = null; @BeforeClass @@ -243,7 +243,7 @@ IndexReader reader = IndexReader.open(tmpDir); Document doc; - doc = reader.document2(0);//Load all of them + doc = reader.document(0);//Load all of them assertTrue("doc is null and it shouldn't be", doc != null); IndexableField field = doc.getField(DocHelper.LARGE_LAZY_FIELD_KEY); assertTrue("field is null and it shouldn't be", field != null); Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribIndexReader.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribIndexReader.java (revision 1153521) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribIndexReader.java (working copy) @@ -23,14 +23,14 @@ import java.util.Set; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldSelector; -import org.apache.lucene.document2.FieldSelectorVisitor; -import org.apache.lucene.document2.SetBasedFieldSelector; -import org.apache.lucene.document2.BinaryField; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.BinaryField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldSelector; +import org.apache.lucene.document.FieldSelectorVisitor; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.SetBasedFieldSelector; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; @@ -137,7 +137,7 @@ writer.addDocument(doc); writer.close(); IndexReader reader = IndexReader.open(dir, false); - Document doc2 = reader.document2(reader.maxDoc() - 1); + Document doc2 = reader.document(reader.maxDoc() - 1); IndexableField[] fields = doc2.getFields("bin1"); assertNotNull(fields); assertEquals(1, fields.length); @@ -171,7 +171,7 @@ writer.optimize(); writer.close(); reader = IndexReader.open(dir, false); - doc2 = reader.document2(reader.maxDoc() - 1); + doc2 = reader.document(reader.maxDoc() - 1); fields = doc2.getFields("bin1"); assertNotNull(fields); assertEquals(1, fields.length); Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribParallelReader.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribParallelReader.java (revision 1153521) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestContribParallelReader.java (working copy) @@ -22,12 +22,12 @@ import java.util.Random; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; -import org.apache.lucene.document2.FieldSelector; -import org.apache.lucene.document2.FieldSelectorVisitor; -import org.apache.lucene.document2.MapFieldSelector; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldSelector; +import org.apache.lucene.document.FieldSelectorVisitor; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.MapFieldSelector; +import org.apache.lucene.document.TextField; import org.apache.lucene.search.*; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java (revision 1153521) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestFieldNormModifier.java (working copy) @@ -21,10 +21,10 @@ import java.util.Arrays; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.search.Collector; import org.apache.lucene.search.DefaultSimilarity; Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java (revision 1153521) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java (working copy) @@ -19,7 +19,7 @@ import java.io.File; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestLazyBug.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestLazyBug.java (revision 1153521) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestLazyBug.java (working copy) @@ -22,11 +22,11 @@ import java.util.Set; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.FieldSelector; -import org.apache.lucene.document2.FieldSelectorResult; -import org.apache.lucene.document2.FieldSelectorVisitor; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldSelector; +import org.apache.lucene.document.FieldSelectorResult; +import org.apache.lucene.document.FieldSelectorVisitor; +import org.apache.lucene.document.TextField; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; import org.junit.AfterClass; Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java (revision 1153521) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestMultiPassIndexSplitter.java (working copy) @@ -17,10 +17,10 @@ */ import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.BytesRef; @@ -77,7 +77,7 @@ IndexReader ir; ir = IndexReader.open(dirs[0], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); // rounding error - Document doc = ir.document2(0); + Document doc = ir.document(0); assertEquals("0", doc.get("id")); TermsEnum te = MultiFields.getTerms(ir, "id").iterator(); assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seek(new BytesRef("1"))); @@ -85,7 +85,7 @@ ir.close(); ir = IndexReader.open(dirs[1], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - doc = ir.document2(0); + doc = ir.document(0); assertEquals("1", doc.get("id")); te = MultiFields.getTerms(ir, "id").iterator(); assertEquals(TermsEnum.SeekStatus.NOT_FOUND, te.seek(new BytesRef("0"))); @@ -94,7 +94,7 @@ ir.close(); ir = IndexReader.open(dirs[2], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - doc = ir.document2(0); + doc = ir.document(0); assertEquals("2", doc.get("id")); te = MultiFields.getTerms(ir, "id").iterator(); @@ -122,19 +122,19 @@ IndexReader ir; ir = IndexReader.open(dirs[0], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - Document doc = ir.document2(0); + Document doc = ir.document(0); assertEquals("0", doc.get("id")); int start = ir.numDocs(); ir.close(); ir = IndexReader.open(dirs[1], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - doc = ir.document2(0); + doc = ir.document(0); assertEquals(start + "", doc.get("id")); start += ir.numDocs(); ir.close(); ir = IndexReader.open(dirs[2], true); assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); - doc = ir.document2(0); + doc = ir.document(0); assertEquals(start + "", doc.get("id")); // make sure the deleted doc is not here TermsEnum te = MultiFields.getTerms(ir, "id").iterator(); Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java (revision 1153521) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java (working copy) @@ -1,10 +1,10 @@ package org.apache.lucene.index; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; Index: lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java (revision 1153521) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java (working copy) @@ -21,9 +21,9 @@ import java.util.Random; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexReader; @@ -152,7 +152,7 @@ writer.close(); IndexReader reader = IndexReader.open(dir, null, true, 1, new AppendingCodecProvider()); assertEquals(2, reader.numDocs()); - Document doc2 = reader.document2(0); + Document doc2 = reader.document(0); assertEquals(text, doc2.get("f")); Fields fields = MultiFields.getFields(reader); Terms terms = fields.terms("f"); Index: lucene/contrib/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java (revision 1153521) +++ lucene/contrib/misc/src/test/org/apache/lucene/misc/TestHighFreqTerms.java (working copy) @@ -19,10 +19,10 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.store.Directory; Index: lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java (revision 1153521) +++ lucene/contrib/misc/src/test/org/apache/lucene/misc/TestLengthNormModifier.java (working copy) @@ -20,10 +20,10 @@ import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.FieldInvertState; import org.apache.lucene.index.FieldNormModifier; import org.apache.lucene.index.IndexReader; Index: lucene/contrib/misc/src/test/org/apache/lucene/search/TestThreadSafe.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/search/TestThreadSafe.java (revision 1153521) +++ lucene/contrib/misc/src/test/org/apache/lucene/search/TestThreadSafe.java (working copy) @@ -24,7 +24,7 @@ import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.IndexableField; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.*; +import org.apache.lucene.document.*; import java.util.Random; import java.util.List; Index: lucene/contrib/misc/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java (revision 1153521) +++ lucene/contrib/misc/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; Index: lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java =================================================================== --- lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (revision 1153521) +++ lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java (working copy) @@ -33,7 +33,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; @@ -801,7 +801,7 @@ o.println(); ScoreDoc[] scoreDocs = hits.scoreDocs; for (int i = 0; i < Math.min(25, len); i++) { - Document d = searcher.doc2(scoreDocs[i].doc); + Document d = searcher.doc(scoreDocs[i].doc); String summary = d.get( "summary"); o.println("score : " + scoreDocs[i].score); o.println("url : " + d.get("url")); @@ -825,7 +825,7 @@ // field does not store term vector info if (vector == null) { - Document d=ir.document2(docNum); + Document d=ir.document(docNum); IndexableField text[]=d.getFields(fieldName); if(text!=null) { Index: lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java (revision 1153521) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java (working copy) @@ -21,9 +21,9 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.RandomIndexWriter; Index: lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java (revision 1153521) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/ChainedFilterTest.java (working copy) @@ -20,9 +20,9 @@ import java.util.Calendar; import java.util.GregorianCalendar; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -131,13 +131,13 @@ TopDocs hits = searcher.search(query, chain, 1000); numHits = hits.totalHits; assertEquals(MAX / 2, numHits); - assertEquals("bob", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); + assertEquals("bob", searcher.doc(hits.scoreDocs[0].doc).get("owner")); chain = getChainedFilter(new Filter[] {bobFilter}, new int[] {ChainedFilter.ANDNOT}); hits = searcher.search(query, chain, 1000); numHits = hits.totalHits; assertEquals(MAX / 2, numHits); - assertEquals("sue", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); + assertEquals("sue", searcher.doc(hits.scoreDocs[0].doc).get("owner")); } public void testOR() throws Exception { @@ -154,7 +154,7 @@ TopDocs hits = searcher.search(query, chain, 1000); assertEquals("AND matches just bob", MAX / 2, hits.totalHits); - assertEquals("bob", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); + assertEquals("bob", searcher.doc(hits.scoreDocs[0].doc).get("owner")); } public void testXOR() throws Exception { @@ -163,7 +163,7 @@ TopDocs hits = searcher.search(query, chain, 1000); assertEquals("XOR matches sue", MAX / 2, hits.totalHits); - assertEquals("sue", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); + assertEquals("sue", searcher.doc(hits.scoreDocs[0].doc).get("owner")); } public void testANDNOT() throws Exception { @@ -174,7 +174,7 @@ TopDocs hits = searcher.search(query, chain, 1000); assertEquals("ANDNOT matches just bob", MAX / 2, hits.totalHits); - assertEquals("bob", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); + assertEquals("bob", searcher.doc(hits.scoreDocs[0].doc).get("owner")); chain = getChainedFilter( new Filter[]{bobFilter, bobFilter}, @@ -183,7 +183,7 @@ hits = searcher.search(query, chain, 1000); assertEquals("ANDNOT bob ANDNOT bob matches all sues", MAX / 2, hits.totalHits); - assertEquals("sue", searcher.doc2(hits.scoreDocs[0].doc).get("owner")); + assertEquals("sue", searcher.doc(hits.scoreDocs[0].doc).get("owner")); } /* Index: lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (revision 1153521) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (working copy) @@ -21,9 +21,9 @@ import java.util.HashSet; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiFields; @@ -93,7 +93,7 @@ ScoreDoc[] hits = searcher.search(tq,df, 1000).scoreDocs; for(int i=0;i0); for(int i=0;i0); for(int i=0;i0); for(int i=0;i0)); - Document doc=searcher.doc2(sd[0].doc); + Document doc=searcher.doc(sd[0].doc); assertEquals("Should match most similar not most rare variant", "2",doc.get("id")); } //Test multiple input words are having variants produced @@ -104,7 +104,7 @@ TopDocs topDocs = searcher.search(flt, 1); ScoreDoc[] sd = topDocs.scoreDocs; assertTrue("score docs must match 1 doc", (sd!=null)&&(sd.length>0)); - Document doc=searcher.doc2(sd[0].doc); + Document doc=searcher.doc(sd[0].doc); assertEquals("Should match most similar when using 2 words", "2",doc.get("id")); } //Test bug found when first query word does not match anything @@ -119,7 +119,7 @@ TopDocs topDocs = searcher.search(flt, 1); ScoreDoc[] sd = topDocs.scoreDocs; assertTrue("score docs must match 1 doc", (sd!=null)&&(sd.length>0)); - Document doc=searcher.doc2(sd[0].doc); + Document doc=searcher.doc(sd[0].doc); assertEquals("Should match most similar when using 2 words", "2",doc.get("id")); } Index: lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java (revision 1153521) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/TermsFilterTest.java (working copy) @@ -18,9 +18,10 @@ */ import java.util.HashSet; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.RandomIndexWriter; Index: lucene/contrib/queries/src/test/org/apache/lucene/search/TestSlowCollationMethods.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/TestSlowCollationMethods.java (revision 1153521) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/TestSlowCollationMethods.java (working copy) @@ -4,10 +4,10 @@ import java.text.Collator; import java.util.Locale; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.search.BooleanClause.Occur; @@ -92,7 +92,7 @@ TopFieldDocs docs = searcher.search(new MatchAllDocsQuery(), null, numDocs, new Sort(sf)); String prev = ""; for (ScoreDoc doc : docs.scoreDocs) { - String value = reader.document2(doc.doc).get("field"); + String value = reader.document(doc.doc).get("field"); assertTrue(collator.compare(value, prev) >= 0); prev = value; } @@ -102,7 +102,7 @@ // positive test TopDocs docs = searcher.search(query, numDocs); for (ScoreDoc doc : docs.scoreDocs) { - String value = reader.document2(doc.doc).get("field"); + String value = reader.document(doc.doc).get("field"); assertTrue(collator.compare(value, startPoint) >= 0); assertTrue(collator.compare(value, endPoint) <= 0); } @@ -113,7 +113,7 @@ bq.add(query, Occur.MUST_NOT); docs = searcher.search(bq, numDocs); for (ScoreDoc doc : docs.scoreDocs) { - String value = reader.document2(doc.doc).get("field"); + String value = reader.document(doc.doc).get("field"); assertTrue(collator.compare(value, startPoint) < 0 || collator.compare(value, endPoint) > 0); } } Index: lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java (revision 1153521) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestRegexQuery.java (working copy) @@ -24,9 +24,9 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.index.TermsEnum; Index: lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java (revision 1153521) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java (working copy) @@ -20,10 +20,10 @@ import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; Index: lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java (revision 1153521) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/similar/TestMoreLikeThis.java (working copy) @@ -25,10 +25,10 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.search.BooleanClause; Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/package.html =================================================================== --- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/package.html (revision 1153521) +++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/core/config/package.html (working copy) @@ -46,5 +46,4 @@ reference implementation.

- \ No newline at end of file Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/StandardQueryParser.java =================================================================== --- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/StandardQueryParser.java (revision 1153521) +++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/StandardQueryParser.java (working copy) @@ -22,7 +22,7 @@ import java.util.TooManyListenersException; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document2.DateTools; +import org.apache.lucene.document.DateTools; import org.apache.lucene.queryParser.core.QueryNodeException; import org.apache.lucene.queryParser.core.QueryParserHelper; import org.apache.lucene.queryParser.core.config.QueryConfigHandler; Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DateResolutionAttribute.java =================================================================== --- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DateResolutionAttribute.java (revision 1153521) +++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DateResolutionAttribute.java (working copy) @@ -17,8 +17,8 @@ * limitations under the License. */ -import org.apache.lucene.document2.DateTools; -import org.apache.lucene.document2.DateTools.Resolution; +import org.apache.lucene.document.DateTools; +import org.apache.lucene.document.DateTools.Resolution; import org.apache.lucene.queryParser.core.config.QueryConfigHandler; import org.apache.lucene.queryParser.standard.nodes.RangeQueryNode; import org.apache.lucene.queryParser.standard.processors.ParametricRangeQueryNodeProcessor; Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DateResolutionAttributeImpl.java =================================================================== --- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DateResolutionAttributeImpl.java (revision 1153521) +++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/DateResolutionAttributeImpl.java (working copy) @@ -17,8 +17,8 @@ * limitations under the License. */ -import org.apache.lucene.document2.DateTools; -import org.apache.lucene.document2.DateTools.Resolution; +import org.apache.lucene.document.DateTools; +import org.apache.lucene.document.DateTools.Resolution; import org.apache.lucene.queryParser.core.config.QueryConfigHandler; import org.apache.lucene.queryParser.standard.processors.ParametricRangeQueryNodeProcessor; import org.apache.lucene.util.AttributeImpl; Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionFCListener.java =================================================================== --- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionFCListener.java (revision 1153521) +++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionFCListener.java (working copy) @@ -17,7 +17,7 @@ * limitations under the License. */ -import org.apache.lucene.document2.DateTools; +import org.apache.lucene.document.DateTools; import org.apache.lucene.queryParser.core.config.FieldConfig; import org.apache.lucene.queryParser.core.config.FieldConfigListener; import org.apache.lucene.queryParser.core.config.QueryConfigHandler; Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttribute.java =================================================================== --- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttribute.java (revision 1153521) +++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttribute.java (working copy) @@ -19,7 +19,7 @@ import java.util.Map; -import org.apache.lucene.document2.DateTools; +import org.apache.lucene.document.DateTools; import org.apache.lucene.util.Attribute; /** Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttributeImpl.java =================================================================== --- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttributeImpl.java (revision 1153521) +++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/config/FieldDateResolutionMapAttributeImpl.java (working copy) @@ -20,8 +20,8 @@ import java.util.HashMap; import java.util.Map; -import org.apache.lucene.document2.DateTools; -import org.apache.lucene.document2.DateTools.Resolution; +import org.apache.lucene.document.DateTools; +import org.apache.lucene.document.DateTools.Resolution; import org.apache.lucene.util.AttributeImpl; /** Index: lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/ParametricRangeQueryNodeProcessor.java =================================================================== --- lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/ParametricRangeQueryNodeProcessor.java (revision 1153521) +++ lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/ParametricRangeQueryNodeProcessor.java (working copy) @@ -23,8 +23,8 @@ import java.util.List; import java.util.Locale; -import org.apache.lucene.document2.DateTools; -import org.apache.lucene.document2.DateTools.Resolution; +import org.apache.lucene.document.DateTools; +import org.apache.lucene.document.DateTools.Resolution; import org.apache.lucene.queryParser.core.QueryNodeException; import org.apache.lucene.queryParser.core.config.FieldConfig; import org.apache.lucene.queryParser.core.config.QueryConfigHandler; Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java =================================================================== --- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (revision 1153521) +++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (working copy) @@ -21,9 +21,9 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.IndexSearcher; @@ -98,7 +98,7 @@ TopDocs td = searcher.search(q, 10); ScoreDoc[] sd = td.scoreDocs; for (int i = 0; i < sd.length; i++) { - Document doc = searcher.doc2(sd[i].doc); + Document doc = searcher.doc(sd[i].doc); String id = doc.get("id"); assertTrue(qString + "matched doc#" + id + " not expected", expecteds .contains(id)); Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java =================================================================== --- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java (revision 1153521) +++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/precedence/TestPrecedenceQueryParser.java (working copy) @@ -34,7 +34,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document2.DateTools; +import org.apache.lucene.document.DateTools; import org.apache.lucene.queryParser.TestQueryParser; import org.apache.lucene.queryParser.core.QueryNodeException; import org.apache.lucene.queryParser.core.QueryNodeParseException; Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java =================================================================== --- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java (revision 1153521) +++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java (working copy) @@ -24,8 +24,8 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.queryParser.core.QueryNodeException; import org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute.Operator; Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java =================================================================== --- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java (revision 1153521) +++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java (working copy) @@ -39,9 +39,9 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.document2.DateTools; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.DateTools; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/surround/query/SingleFieldTestDb.java =================================================================== --- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/surround/query/SingleFieldTestDb.java (revision 1153521) +++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/surround/query/SingleFieldTestDb.java (working copy) @@ -24,9 +24,9 @@ import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.Version; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; Index: lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java =================================================================== --- lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (revision 1153521) +++ lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (working copy) @@ -22,11 +22,11 @@ import java.util.Map; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.NumericField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.NumericField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; @@ -284,7 +284,7 @@ assertEquals(2, results); double lastDistance = 0; for(int i =0 ; i < results; i++){ - Document d = searcher.doc2(scoreDocs[i].doc); + Document d = searcher.doc(scoreDocs[i].doc); String name = d.get("name"); double rsLat = Double.parseDouble(d.get(latField)); @@ -380,7 +380,7 @@ assertEquals(18, results); double lastDistance = 0; for(int i =0 ; i < results; i++){ - Document d = searcher.doc2(scoreDocs[i].doc); + Document d = searcher.doc(scoreDocs[i].doc); String name = d.get("name"); double rsLat = Double.parseDouble(d.get(latField)); double rsLng = Double.parseDouble(d.get(lngField)); @@ -475,7 +475,7 @@ assertEquals(expected[x], results); double lastDistance = 0; for(int i =0 ; i < results; i++){ - Document d = searcher.doc2(scoreDocs[i].doc); + Document d = searcher.doc(scoreDocs[i].doc); String name = d.get("name"); double rsLat = Double.parseDouble(d.get(latField)); @@ -570,7 +570,7 @@ assertEquals(expected[x], results); for(int i =0 ; i < results; i++){ - Document d = searcher.doc2(scoreDocs[i].doc); + Document d = searcher.doc(scoreDocs[i].doc); String name = d.get("name"); double rsLat = Double.parseDouble(d.get(latField)); Index: lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java =================================================================== --- lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java (revision 1153521) +++ lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java (working copy) @@ -19,11 +19,11 @@ import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.NumericField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.NumericField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; Index: lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java =================================================================== --- lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java (revision 1153521) +++ lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynExpand.java (working copy) @@ -30,7 +30,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; @@ -148,7 +148,7 @@ @Override public void collect(int doc) throws IOException { - Document d = reader.document2(doc); + Document d = reader.document(doc); IndexableField[] values = d.getFields( Syns2Index.F_SYN); for ( int j = 0; j < values.length; j++) { Index: lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynLookup.java =================================================================== --- lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynLookup.java (revision 1153521) +++ lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/SynLookup.java (working copy) @@ -29,7 +29,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; @@ -91,7 +91,7 @@ ScoreDoc[] hits = searcher.search(query, countingCollector.numHits).scoreDocs; for (int i = 0; i < hits.length; i++) { - Document doc = searcher.doc2(hits[i].doc); + Document doc = searcher.doc(hits[i].doc); IndexableField[] values = doc.getFields(Syns2Index.F_SYN); @@ -155,7 +155,7 @@ @Override public void collect(int doc) throws IOException { - Document d = reader.document2(doc); + Document d = reader.document(doc); IndexableField[] values = d.getFields( Syns2Index.F_SYN); for ( int j = 0; j < values.length; j++) { Index: lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java =================================================================== --- lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java (revision 1153521) +++ lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java (working copy) @@ -32,10 +32,10 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.TieredMergePolicy; Index: lucene/contrib/xml-query-parser/src/demo/java/org/apache/lucene/xmlparser/webdemo/FormBasedXmlQueryDemo.java =================================================================== --- lucene/contrib/xml-query-parser/src/demo/java/org/apache/lucene/xmlparser/webdemo/FormBasedXmlQueryDemo.java (revision 1153521) +++ lucene/contrib/xml-query-parser/src/demo/java/org/apache/lucene/xmlparser/webdemo/FormBasedXmlQueryDemo.java (working copy) @@ -34,8 +34,8 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; Index: lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java =================================================================== --- lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (revision 1153521) +++ lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (working copy) @@ -9,10 +9,10 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenFilter; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.NumericField; -import org.apache.lucene.document2.TextField; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.NumericField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.search.IndexSearcher; @@ -233,7 +233,7 @@ ScoreDoc[] scoreDocs = hits.scoreDocs; for(int i=0;iThe boost is multiplied by {@link org.apache.lucene.document.Document#getBoost()} of the document - * containing this field. If a document has multiple fields with the same - * name, all such values are multiplied together. This product is then - * used to compute the norm factor for the field. By - * default, in the {@link - * org.apache.lucene.search.Similarity#computeNorm(FieldInvertState)} method, the boost value is multiplied - * by the length normalization factor and then - * rounded by {@link org.apache.lucene.search.Similarity#encodeNormValue(float)} before it is stored in the - * index. One should attempt to ensure that this product does not overflow - * the range of that encoding. - * - * @see org.apache.lucene.document.Document#setBoost(float) - * @see org.apache.lucene.search.Similarity#computeNorm(FieldInvertState) - * @see org.apache.lucene.search.Similarity#encodeNormValue(float) - */ - public void setBoost(float boost) { - this.boost = boost; - } - - /** Returns the boost factor for hits for this field. - * - *

The default value is 1.0. - * - *

Note: this value is not stored directly with the document in the index. - * Documents returned from {@link org.apache.lucene.index.IndexReader#document(int)} and - * {@link org.apache.lucene.search.IndexSearcher#doc(int)} may thus not have the same value present as when - * this field was indexed. - * - * @see #setBoost(float) - */ - public float getBoost() { - return boost; - } - - /** Returns the name of the field as an interned string. - * For example "date", "title", "body", ... - */ - public String name() { return name; } - - protected void setStoreTermVector(Field.TermVector termVector) { - this.storeTermVector = termVector.isStored(); - this.storePositionWithTermVector = termVector.withPositions(); - this.storeOffsetWithTermVector = termVector.withOffsets(); - } - - /** True iff the value of the field is to be stored in the index for return - with search hits. It is an error for this to be true if a field is - Reader-valued. */ - public final boolean isStored() { return isStored; } - - /** True iff the value of the field is to be indexed, so that it may be - searched on. */ - public final boolean isIndexed() { return isIndexed; } - - /** True iff the value of the field should be tokenized as text prior to - indexing. Un-tokenized fields are indexed as a single word and may not be - Reader-valued. */ - public final boolean isTokenized() { return isTokenized; } - - /** True iff the term or terms used to index this field are stored as a term - * vector, available from {@link org.apache.lucene.index.IndexReader#getTermFreqVector(int,String)}. - * These methods do not provide access to the original content of the field, - * only to terms used to index it. If the original content must be - * preserved, use the stored attribute instead. - * - * @see org.apache.lucene.index.IndexReader#getTermFreqVector(int, String) - */ - public final boolean isTermVectorStored() { return storeTermVector; } - - /** - * True iff terms are stored as term vector together with their offsets - * (start and end position in source text). - */ - public boolean isStoreOffsetWithTermVector(){ - return storeOffsetWithTermVector; - } - - /** - * True iff terms are stored as term vector together with their token positions. - */ - public boolean isStorePositionWithTermVector(){ - return storePositionWithTermVector; - } - - /** True iff the value of the filed is stored as binary */ - public final boolean isBinary() { - return isBinary; - } - - - private byte[] getBinaryValue() { - return getBinaryValue(null); - } - - private byte[] getBinaryValue(byte[] result /* unused */){ - if (isBinary || fieldsData instanceof byte[]) - return (byte[]) fieldsData; - else - return null; - } - - public boolean isNumeric() { - return false; - } - - public BytesRef binaryValue(BytesRef reuse) { - final byte[] bytes = getBinaryValue(); - if (bytes != null) { - if (reuse == null) { - return new BytesRef(bytes, - getBinaryOffset(), - getBinaryLength()); - } else { - reuse.bytes = bytes; - reuse.offset = getBinaryOffset(); - reuse.length = getBinaryLength(); - return reuse; - } - } else { - return null; - } - } - - /** - * Returns length of byte[] segment that is used as value, if Field is not binary - * returned value is undefined - * @return length of byte[] segment that represents this Field value - */ - private int getBinaryLength() { - if (isBinary) { - return binaryLength; - } else if (fieldsData instanceof byte[]) - return ((byte[]) fieldsData).length; - else - return 0; - } - - /** - * Returns offset into byte[] segment that is used as value, if Field is not binary - * returned value is undefined - * @return index of the first character in byte[] segment that represents this Field value - */ - public int getBinaryOffset() { - return binaryOffset; - } - - /** True if norms are omitted for this indexed field */ - public boolean getOmitNorms() { return omitNorms; } - - /** @see #setOmitTermFreqAndPositions */ - public boolean getOmitTermFreqAndPositions() { return omitTermFreqAndPositions; } - - /** Expert: - * - * If set, omit normalization factors associated with this indexed field. - * This effectively disables indexing boosts and length normalization for this field. - */ - public void setOmitNorms(boolean omitNorms) { this.omitNorms=omitNorms; } - - /** Expert: - * - * If set, omit term freq, positions and payloads from - * postings for this field. - * - *

NOTE: While this option reduces storage space - * required in the index, it also means any query - * requiring positional information, such as {@link - * PhraseQuery} or {@link SpanQuery} subclasses will - * silently fail to find results. - */ - public void setOmitTermFreqAndPositions(boolean omitTermFreqAndPositions) { this.omitTermFreqAndPositions=omitTermFreqAndPositions; } - - public boolean isLazy() { - return lazy; - } - - /** Prints a Field for human consumption. */ - @Override - public final String toString() { - StringBuilder result = new StringBuilder(); - if (isStored) { - result.append("stored"); - } - if (isIndexed) { - if (result.length() > 0) - result.append(","); - result.append("indexed"); - } - if (isTokenized) { - if (result.length() > 0) - result.append(","); - result.append("tokenized"); - } - if (storeTermVector) { - if (result.length() > 0) - result.append(","); - result.append("termVector"); - } - if (storeOffsetWithTermVector) { - if (result.length() > 0) - result.append(","); - result.append("termVectorOffsets"); - } - if (storePositionWithTermVector) { - if (result.length() > 0) - result.append(","); - result.append("termVectorPosition"); - } - if (isBinary) { - if (result.length() > 0) - result.append(","); - result.append("binary"); - } - if (omitNorms) { - result.append(",omitNorms"); - } - if (omitTermFreqAndPositions) { - result.append(",omitTermFreqAndPositions"); - } - if (lazy){ - result.append(",lazy"); - } - result.append('<'); - result.append(name); - result.append(':'); - - if (fieldsData != null && lazy == false) { - result.append(fieldsData); - } - - result.append('>'); - return result.toString(); - } -} Index: lucene/src/java/org/apache/lucene/document/BinaryField.java =================================================================== --- lucene/src/java/org/apache/lucene/document/BinaryField.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document/BinaryField.java (working copy) @@ -1,4 +1,4 @@ -package org.apache.lucene.document2; +package org.apache.lucene.document; /** * Licensed to the Apache Software Foundation (ASF) under one or more Index: lucene/src/java/org/apache/lucene/document/CompressionTools.java =================================================================== --- lucene/src/java/org/apache/lucene/document/CompressionTools.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document/CompressionTools.java (working copy) @@ -1,127 +0,0 @@ -package org.apache.lucene.document; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.util.zip.Deflater; -import java.util.zip.Inflater; -import java.util.zip.DataFormatException; -import java.io.ByteArrayOutputStream; - -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.CharsRef; -import org.apache.lucene.util.UnicodeUtil; - -/** Simple utility class providing static methods to - * compress and decompress binary data for stored fields. - * This class uses java.util.zip.Deflater and Inflater - * classes to compress and decompress. - */ - -public class CompressionTools { - - // Export only static methods - private CompressionTools() {} - - /** Compresses the specified byte range using the - * specified compressionLevel (constants are defined in - * java.util.zip.Deflater). */ - public static byte[] compress(byte[] value, int offset, int length, int compressionLevel) { - - /* Create an expandable byte array to hold the compressed data. - * You cannot use an array that's the same size as the orginal because - * there is no guarantee that the compressed data will be smaller than - * the uncompressed data. */ - ByteArrayOutputStream bos = new ByteArrayOutputStream(length); - - Deflater compressor = new Deflater(); - - try { - compressor.setLevel(compressionLevel); - compressor.setInput(value, offset, length); - compressor.finish(); - - // Compress the data - final byte[] buf = new byte[1024]; - while (!compressor.finished()) { - int count = compressor.deflate(buf); - bos.write(buf, 0, count); - } - } finally { - compressor.end(); - } - - return bos.toByteArray(); - } - - /** Compresses the specified byte range, with default BEST_COMPRESSION level */ - public static byte[] compress(byte[] value, int offset, int length) { - return compress(value, offset, length, Deflater.BEST_COMPRESSION); - } - - /** Compresses all bytes in the array, with default BEST_COMPRESSION level */ - public static byte[] compress(byte[] value) { - return compress(value, 0, value.length, Deflater.BEST_COMPRESSION); - } - - /** Compresses the String value, with default BEST_COMPRESSION level */ - public static byte[] compressString(String value) { - return compressString(value, Deflater.BEST_COMPRESSION); - } - - /** Compresses the String value using the specified - * compressionLevel (constants are defined in - * java.util.zip.Deflater). */ - public static byte[] compressString(String value, int compressionLevel) { - BytesRef result = new BytesRef(); - UnicodeUtil.UTF16toUTF8(value, 0, value.length(), result); - return compress(result.bytes, 0, result.length, compressionLevel); - } - - /** Decompress the byte array previously returned by - * compress */ - public static byte[] decompress(byte[] value) throws DataFormatException { - // Create an expandable byte array to hold the decompressed data - ByteArrayOutputStream bos = new ByteArrayOutputStream(value.length); - - Inflater decompressor = new Inflater(); - - try { - decompressor.setInput(value); - - // Decompress the data - final byte[] buf = new byte[1024]; - while (!decompressor.finished()) { - int count = decompressor.inflate(buf); - bos.write(buf, 0, count); - } - } finally { - decompressor.end(); - } - - return bos.toByteArray(); - } - - /** Decompress the byte array previously returned by - * compressString back into a String */ - public static String decompressString(byte[] value) throws DataFormatException { - final byte[] bytes = decompress(value); - CharsRef result = new CharsRef(bytes.length); - UnicodeUtil.UTF8toUTF16(bytes, 0, bytes.length, result); - return new String(result.chars, 0, result.length); - } -} Index: lucene/src/java/org/apache/lucene/document/CompressionTools.java =================================================================== --- lucene/src/java/org/apache/lucene/document/CompressionTools.java (revision 0) +++ lucene/src/java/org/apache/lucene/document/CompressionTools.java (revision 1153521) @@ -0,0 +1,127 @@ +package org.apache.lucene.document; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.zip.Deflater; +import java.util.zip.Inflater; +import java.util.zip.DataFormatException; +import java.io.ByteArrayOutputStream; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.CharsRef; +import org.apache.lucene.util.UnicodeUtil; + +/** Simple utility class providing static methods to + * compress and decompress binary data for stored fields. + * This class uses java.util.zip.Deflater and Inflater + * classes to compress and decompress. + */ + +public class CompressionTools { + + // Export only static methods + private CompressionTools() {} + + /** Compresses the specified byte range using the + * specified compressionLevel (constants are defined in + * java.util.zip.Deflater). */ + public static byte[] compress(byte[] value, int offset, int length, int compressionLevel) { + + /* Create an expandable byte array to hold the compressed data. + * You cannot use an array that's the same size as the orginal because + * there is no guarantee that the compressed data will be smaller than + * the uncompressed data. */ + ByteArrayOutputStream bos = new ByteArrayOutputStream(length); + + Deflater compressor = new Deflater(); + + try { + compressor.setLevel(compressionLevel); + compressor.setInput(value, offset, length); + compressor.finish(); + + // Compress the data + final byte[] buf = new byte[1024]; + while (!compressor.finished()) { + int count = compressor.deflate(buf); + bos.write(buf, 0, count); + } + } finally { + compressor.end(); + } + + return bos.toByteArray(); + } + + /** Compresses the specified byte range, with default BEST_COMPRESSION level */ + public static byte[] compress(byte[] value, int offset, int length) { + return compress(value, offset, length, Deflater.BEST_COMPRESSION); + } + + /** Compresses all bytes in the array, with default BEST_COMPRESSION level */ + public static byte[] compress(byte[] value) { + return compress(value, 0, value.length, Deflater.BEST_COMPRESSION); + } + + /** Compresses the String value, with default BEST_COMPRESSION level */ + public static byte[] compressString(String value) { + return compressString(value, Deflater.BEST_COMPRESSION); + } + + /** Compresses the String value using the specified + * compressionLevel (constants are defined in + * java.util.zip.Deflater). */ + public static byte[] compressString(String value, int compressionLevel) { + BytesRef result = new BytesRef(); + UnicodeUtil.UTF16toUTF8(value, 0, value.length(), result); + return compress(result.bytes, 0, result.length, compressionLevel); + } + + /** Decompress the byte array previously returned by + * compress */ + public static byte[] decompress(byte[] value) throws DataFormatException { + // Create an expandable byte array to hold the decompressed data + ByteArrayOutputStream bos = new ByteArrayOutputStream(value.length); + + Inflater decompressor = new Inflater(); + + try { + decompressor.setInput(value); + + // Decompress the data + final byte[] buf = new byte[1024]; + while (!decompressor.finished()) { + int count = decompressor.inflate(buf); + bos.write(buf, 0, count); + } + } finally { + decompressor.end(); + } + + return bos.toByteArray(); + } + + /** Decompress the byte array previously returned by + * compressString back into a String */ + public static String decompressString(byte[] value) throws DataFormatException { + final byte[] bytes = decompress(value); + CharsRef result = new CharsRef(bytes.length); + UnicodeUtil.UTF8toUTF16(bytes, 0, bytes.length, result); + return new String(result.chars, 0, result.length); + } +} Index: lucene/src/java/org/apache/lucene/document/DateTools.java =================================================================== --- lucene/src/java/org/apache/lucene/document/DateTools.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document/DateTools.java (working copy) @@ -1,4 +1,4 @@ -package org.apache.lucene.document2; +package org.apache.lucene.document; /** * Licensed to the Apache Software Foundation (ASF) under one or more Index: lucene/src/java/org/apache/lucene/document/Document.java =================================================================== --- lucene/src/java/org/apache/lucene/document/Document.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document/Document.java (working copy) @@ -1,4 +1,4 @@ -package org.apache.lucene.document2; +package org.apache.lucene.document; /** * Licensed to the Apache Software Foundation (ASF) under one or more Index: lucene/src/java/org/apache/lucene/document/Field.java =================================================================== --- lucene/src/java/org/apache/lucene/document/Field.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document/Field.java (working copy) @@ -1,4 +1,4 @@ -package org.apache.lucene.document2; +package org.apache.lucene.document; /** * Licensed to the Apache Software Foundation (ASF) under one or more @@ -20,7 +20,7 @@ import java.io.Reader; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.document2.NumericField; +import org.apache.lucene.document.NumericField; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.StringHelper; Index: lucene/src/java/org/apache/lucene/document/FieldType.java =================================================================== --- lucene/src/java/org/apache/lucene/document/FieldType.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document/FieldType.java (working copy) @@ -1,4 +1,4 @@ -package org.apache.lucene.document2; +package org.apache.lucene.document; /* * Licensed to the Apache Software Foundation (ASF) under one or more Index: lucene/src/java/org/apache/lucene/document/Fieldable.java =================================================================== --- lucene/src/java/org/apache/lucene/document/Fieldable.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document/Fieldable.java (working copy) @@ -1,213 +0,0 @@ -package org.apache.lucene.document; - -/** - * Copyright 2004 The Apache Software Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.Reader; - -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.document2.NumericField.DataType; -import org.apache.lucene.index.FieldInvertState; // for javadocs -import org.apache.lucene.search.PhraseQuery; // for javadocs -import org.apache.lucene.search.spans.SpanQuery; // for javadocs -import org.apache.lucene.util.BytesRef; // for javadocs - -/** - * Synonymous with {@link Field}. - * - *

WARNING: This interface may change within minor versions, despite Lucene's backward compatibility requirements. - * This means new methods may be added from version to version. This change only affects the Fieldable API; other backwards - * compatibility promises remain intact. For example, Lucene can still - * read and write indices created within the same major version. - *

- * - **/ -public interface Fieldable { - - /** Sets the boost factor hits on this field. This value will be - * multiplied into the score of all hits on this this field of this - * document. - * - *

The boost is multiplied by {@link org.apache.lucene.document.Document#getBoost()} of the document - * containing this field. If a document has multiple fields with the same - * name, all such values are multiplied together. This product is then - * used to compute the norm factor for the field. By - * default, in the {@link - * org.apache.lucene.search.Similarity#computeNorm(FieldInvertState)} method, the boost value is multiplied - * by the length normalization factor - * and then rounded by {@link org.apache.lucene.search.Similarity#encodeNormValue(float)} before it is stored in the - * index. One should attempt to ensure that this product does not overflow - * the range of that encoding. - * - * @see org.apache.lucene.document.Document#setBoost(float) - * @see org.apache.lucene.search.Similarity#computeNorm(FieldInvertState) - * @see org.apache.lucene.search.Similarity#encodeNormValue(float) - */ - void setBoost(float boost); - - /** Returns the boost factor for hits for this field. - * - *

The default value is 1.0. - * - *

Note: this value is not stored directly with the document in the index. - * Documents returned from {@link org.apache.lucene.index.IndexReader#document(int)} and - * {@link org.apache.lucene.search.IndexSearcher#doc(int)} may thus not have the same value present as when - * this field was indexed. - * - * @see #setBoost(float) - */ - float getBoost(); - - /** Returns the name of the field as an interned string. - * For example "date", "title", "body", ... - */ - String name(); - - /** The value of the field as a String, or null. - *

- * For indexing, if isStored()==true, the stringValue() will be used as the stored field value - * unless isBinary()==true, in which case binaryValue() will be used. - * - * If isIndexed()==true and isTokenized()==false, this String value will be indexed as a single token. - * If isIndexed()==true and isTokenized()==true, then tokenStreamValue() will be used to generate indexed tokens if not null, - * else readerValue() will be used to generate indexed tokens if not null, else stringValue() will be used to generate tokens. - */ - public String stringValue(); - - /** The value of the field as a Reader, which can be used at index time to generate indexed tokens. - * @see #stringValue() - */ - public Reader readerValue(); - - /** The TokenStream for this field to be used when indexing, or null. - * @see #stringValue() - */ - public TokenStream tokenStreamValue(); - - /** True if the value of the field is to be stored in the index for return - with search hits. */ - boolean isStored(); - - /** True if the value of the field is to be indexed, so that it may be - searched on. */ - boolean isIndexed(); - - /** True if the value of the field should be tokenized as text prior to - indexing. Un-tokenized fields are indexed as a single word and may not be - Reader-valued. */ - boolean isTokenized(); - - /** True if the term or terms used to index this field are stored as a term - * vector, available from {@link org.apache.lucene.index.IndexReader#getTermFreqVector(int,String)}. - * These methods do not provide access to the original content of the field, - * only to terms used to index it. If the original content must be - * preserved, use the stored attribute instead. - * - * @see org.apache.lucene.index.IndexReader#getTermFreqVector(int, String) - */ - boolean isTermVectorStored(); - - /** - * True if terms are stored as term vector together with their offsets - * (start and end positon in source text). - */ - boolean isStoreOffsetWithTermVector(); - - /** - * True if terms are stored as term vector together with their token positions. - */ - boolean isStorePositionWithTermVector(); - - /** True if the value of the field is stored as binary */ - boolean isBinary(); - - /** True if norms are omitted for this indexed field */ - boolean getOmitNorms(); - - /** Expert: - * - * If set, omit normalization factors associated with this indexed field. - * This effectively disables indexing boosts and length normalization for this field. - */ - void setOmitNorms(boolean omitNorms); - - /** - * Indicates whether a Field is Lazy or not. The semantics of Lazy loading are such that if a Field is lazily loaded, retrieving - * it's values via {@link #stringValue()} or {@link #binaryValue(BytesRef)} is only valid as long as the {@link org.apache.lucene.index.IndexReader} that - * retrieved the {@link Document} is still open. - * - * @return true if this field can be loaded lazily - */ - boolean isLazy(); - - /** - * Returns offset into byte[] segment that is used as value, if Field is not binary - * returned value is undefined - * @return index of the first character in byte[] segment that represents this Field value - */ - //abstract int getBinaryOffset(); - - /** - * Returns length of byte[] segment that is used as value, if Field is not binary - * returned value is undefined - * @return length of byte[] segment that represents this Field value - */ - //abstract int getBinaryLength(); - - //abstract byte[] getBinaryValue(); - - // nocommit api break - abstract BytesRef binaryValue(BytesRef reuse); - - abstract DataType getDataType(); - - abstract Number getNumericValue(); - - /** - * Return the raw byte[] for the binary field. Note that - * you must also call {@link #binaryValue} - * to know which range of bytes in this - * returned array belong to the field.

- * About reuse: if you pass in the result byte[] and it is - * used, likely the underlying implementation will hold - * onto this byte[] and return it in future calls to - * {@link #binaryValue(BytesRef)}. - * So if you subsequently re-use the same byte[] elsewhere - * it will alter this Fieldable's value. - * @param result User defined buffer that will be used if - * possible. If this is null or not large enough, a new - * buffer is allocated - * @return reference to the Field value as byte[]. - */ - // nocommit -- remove this too; add resuse param to binaryValue - //abstract byte[] getBinaryValue(byte[] result); - - /** @see #setOmitTermFreqAndPositions */ - boolean getOmitTermFreqAndPositions(); - - /** Expert: - * - * If set, omit term freq, positions and payloads from - * postings for this field. - * - *

NOTE: While this option reduces storage space - * required in the index, it also means any query - * requiring positional information, such as {@link - * PhraseQuery} or {@link SpanQuery} subclasses will - * fail with an exception. - */ - void setOmitTermFreqAndPositions(boolean omitTermFreqAndPositions); -} Index: lucene/src/java/org/apache/lucene/document/NumericField.java =================================================================== --- lucene/src/java/org/apache/lucene/document/NumericField.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document/NumericField.java (working copy) @@ -1,4 +1,4 @@ -package org.apache.lucene.document2; +package org.apache.lucene.document; /** * Licensed to the Apache Software Foundation (ASF) under one or more @@ -21,7 +21,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.NumericTokenStream; -import org.apache.lucene.document2.NumericField.DataType; +import org.apache.lucene.document.NumericField.DataType; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.search.NumericRangeQuery; // javadocs import org.apache.lucene.search.NumericRangeFilter; // javadocs Index: lucene/src/java/org/apache/lucene/document/StringField.java =================================================================== --- lucene/src/java/org/apache/lucene/document/StringField.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document/StringField.java (working copy) @@ -1,4 +1,4 @@ -package org.apache.lucene.document2; +package org.apache.lucene.document; /** * Licensed to the Apache Software Foundation (ASF) under one or more Index: lucene/src/java/org/apache/lucene/document/TextField.java =================================================================== --- lucene/src/java/org/apache/lucene/document/TextField.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document/TextField.java (working copy) @@ -1,4 +1,4 @@ -package org.apache.lucene.document2; +package org.apache.lucene.document; /** * Licensed to the Apache Software Foundation (ASF) under one or more Index: lucene/src/java/org/apache/lucene/document/package.html =================================================================== --- lucene/src/java/org/apache/lucene/document/package.html (revision 1153521) +++ lucene/src/java/org/apache/lucene/document/package.html (working copy) @@ -1,56 +0,0 @@ - - - - - - - -

The logical representation of a {@link org.apache.lucene.document.Document} for indexing and searching.

-

The document package provides the user level logical representation of content to be indexed and searched. The -package also provides utilities for working with {@link org.apache.lucene.document.Document}s and {@link org.apache.lucene.document.Fieldable}s.

-

Document and Fieldable

-

A {@link org.apache.lucene.document.Document} is a collection of {@link org.apache.lucene.document.Fieldable}s. A - {@link org.apache.lucene.document.Fieldable} is a logical representation of a user's content that needs to be indexed or stored. - {@link org.apache.lucene.document.Fieldable}s have a number of properties that tell Lucene how to treat the content (like indexed, tokenized, - stored, etc.) See the {@link org.apache.lucene.document.Field} implementation of {@link org.apache.lucene.document.Fieldable} - for specifics on these properties. -

-

Note: it is common to refer to {@link org.apache.lucene.document.Document}s having {@link org.apache.lucene.document.Field}s, even though technically they have -{@link org.apache.lucene.document.Fieldable}s.

-

Working with Documents

-

First and foremost, a {@link org.apache.lucene.document.Document} is something created by the user application. It is your job - to create Documents based on the content of the files you are working with in your application (Word, txt, PDF, Excel or any other format.) - How this is done is completely up to you. That being said, there are many tools available in other projects that can make - the process of taking a file and converting it into a Lucene {@link org.apache.lucene.document.Document}. To see an example of this, - take a look at the Lucene demo and the associated source code - for extracting content from HTML. -

-

The {@link org.apache.lucene.document.DateTools} is a utility class to make dates and times searchable -(remember, Lucene only searches text). {@link org.apache.lucene.document.NumericField} is a special helper class -to simplify indexing of numeric values (and also dates) for fast range range queries with {@link org.apache.lucene.search.NumericRangeQuery} -(using a special sortable string representation of numeric values).

-

The {@link org.apache.lucene.index.StoredFieldVisitor} class provides a mechanism to customize how the stored fields values are processed. -If no StoredFieldVisitor is used, all Fields are loaded into a returned Document. As an example of the StoredFieldVisitor usage, consider - the common use case of -displaying search results on a web page and then having users click through to see the full document. In this scenario, it is often - the case that there are many small fields and one or two large fields (containing the contents of the original file). Before StoredFieldVisitor, -the full Document had to be loaded, including the large fields, in order to display the results. Now, using the StoredFieldVisitor, one -can choose which fields should be loaded and how their values should be recorded. If you simply want to load only certain fields, consider using -{@link org.apache.lucene.index.DocumentStoredFieldVisitor}. - - Index: lucene/src/java/org/apache/lucene/document2/BinaryField.java =================================================================== --- lucene/src/java/org/apache/lucene/document2/BinaryField.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document2/BinaryField.java (working copy) @@ -1,46 +0,0 @@ -package org.apache.lucene.document2; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -public final class BinaryField extends Field { - - public static final FieldType TYPE_STORED = new FieldType(); - static { - TYPE_STORED.setStored(true); - TYPE_STORED.freeze(); - } - - public BinaryField(String name, byte[] value) { - super(name, BinaryField.TYPE_STORED, value); - this.isBinary = true; - } - - public BinaryField(String name, byte[] value, int offset, int length) { - super(name, BinaryField.TYPE_STORED, value, offset, length); - this.isBinary = true; - } - - public BinaryField(String name, FieldType custom, byte[] value) { - super(name, custom, value); - this.isBinary = true; - } - - public boolean isNumeric() { - return false; - } -} Index: lucene/src/java/org/apache/lucene/document2/DateTools.java =================================================================== --- lucene/src/java/org/apache/lucene/document2/DateTools.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document2/DateTools.java (working copy) @@ -1,210 +0,0 @@ -package org.apache.lucene.document2; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import org.apache.lucene.search.NumericRangeQuery; // for javadocs -import org.apache.lucene.util.NumericUtils; // for javadocs - -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.Calendar; -import java.util.Date; -import java.util.Locale; -import java.util.TimeZone; - -/** - * Provides support for converting dates to strings and vice-versa. - * The strings are structured so that lexicographic sorting orders - * them by date, which makes them suitable for use as field values - * and search terms. - * - *

This class also helps you to limit the resolution of your dates. Do not - * save dates with a finer resolution than you really need, as then - * RangeQuery and PrefixQuery will require more memory and become slower. - * - *

- * Another approach is {@link NumericUtils}, which provides - * a sortable binary representation (prefix encoded) of numeric values, which - * date/time are. - * For indexing a {@link Date} or {@link Calendar}, just get the unix timestamp as - * long using {@link Date#getTime} or {@link Calendar#getTimeInMillis} and - * index this as a numeric value with {@link NumericField} - * and use {@link NumericRangeQuery} to query it. - */ -public class DateTools { - - final static TimeZone GMT = TimeZone.getTimeZone("GMT"); - - private static final ThreadLocal TL_CAL = new ThreadLocal() { - @Override - protected Calendar initialValue() { - return Calendar.getInstance(GMT, Locale.US); - } - }; - - //indexed by format length - private static final ThreadLocal TL_FORMATS = new ThreadLocal() { - @Override - protected SimpleDateFormat[] initialValue() { - SimpleDateFormat[] arr = new SimpleDateFormat[Resolution.MILLISECOND.formatLen+1]; - for (Resolution resolution : Resolution.values()) { - arr[resolution.formatLen] = (SimpleDateFormat)resolution.format.clone(); - } - return arr; - } - }; - - // cannot create, the class has static methods only - private DateTools() {} - - /** - * Converts a Date to a string suitable for indexing. - * - * @param date the date to be converted - * @param resolution the desired resolution, see - * {@link #round(Date, DateTools.Resolution)} - * @return a string in format yyyyMMddHHmmssSSS or shorter, - * depending on resolution; using GMT as timezone - */ - public static String dateToString(Date date, Resolution resolution) { - return timeToString(date.getTime(), resolution); - } - - /** - * Converts a millisecond time to a string suitable for indexing. - * - * @param time the date expressed as milliseconds since January 1, 1970, 00:00:00 GMT - * @param resolution the desired resolution, see - * {@link #round(long, DateTools.Resolution)} - * @return a string in format yyyyMMddHHmmssSSS or shorter, - * depending on resolution; using GMT as timezone - */ - public static String timeToString(long time, Resolution resolution) { - final Date date = new Date(round(time, resolution)); - return TL_FORMATS.get()[resolution.formatLen].format(date); - } - - /** - * Converts a string produced by timeToString or - * dateToString back to a time, represented as the - * number of milliseconds since January 1, 1970, 00:00:00 GMT. - * - * @param dateString the date string to be converted - * @return the number of milliseconds since January 1, 1970, 00:00:00 GMT - * @throws ParseException if dateString is not in the - * expected format - */ - public static long stringToTime(String dateString) throws ParseException { - return stringToDate(dateString).getTime(); - } - - /** - * Converts a string produced by timeToString or - * dateToString back to a time, represented as a - * Date object. - * - * @param dateString the date string to be converted - * @return the parsed time as a Date object - * @throws ParseException if dateString is not in the - * expected format - */ - public static Date stringToDate(String dateString) throws ParseException { - try { - return TL_FORMATS.get()[dateString.length()].parse(dateString); - } catch (Exception e) { - throw new ParseException("Input is not a valid date string: " + dateString, 0); - } - } - - /** - * Limit a date's resolution. For example, the date 2004-09-21 13:50:11 - * will be changed to 2004-09-01 00:00:00 when using - * Resolution.MONTH. - * - * @param resolution The desired resolution of the date to be returned - * @return the date with all values more precise than resolution - * set to 0 or 1 - */ - public static Date round(Date date, Resolution resolution) { - return new Date(round(date.getTime(), resolution)); - } - - /** - * Limit a date's resolution. For example, the date 1095767411000 - * (which represents 2004-09-21 13:50:11) will be changed to - * 1093989600000 (2004-09-01 00:00:00) when using - * Resolution.MONTH. - * - * @param resolution The desired resolution of the date to be returned - * @return the date with all values more precise than resolution - * set to 0 or 1, expressed as milliseconds since January 1, 1970, 00:00:00 GMT - */ - @SuppressWarnings("fallthrough") - public static long round(long time, Resolution resolution) { - final Calendar calInstance = TL_CAL.get(); - calInstance.setTimeInMillis(time); - - switch (resolution) { - //NOTE: switch statement fall-through is deliberate - case YEAR: - calInstance.set(Calendar.MONTH, 0); - case MONTH: - calInstance.set(Calendar.DAY_OF_MONTH, 1); - case DAY: - calInstance.set(Calendar.HOUR_OF_DAY, 0); - case HOUR: - calInstance.set(Calendar.MINUTE, 0); - case MINUTE: - calInstance.set(Calendar.SECOND, 0); - case SECOND: - calInstance.set(Calendar.MILLISECOND, 0); - case MILLISECOND: - // don't cut off anything - break; - default: - throw new IllegalArgumentException("unknown resolution " + resolution); - } - return calInstance.getTimeInMillis(); - } - - /** Specifies the time granularity. */ - public static enum Resolution { - - YEAR(4), MONTH(6), DAY(8), HOUR(10), MINUTE(12), SECOND(14), MILLISECOND(17); - - final int formatLen; - final SimpleDateFormat format;//should be cloned before use, since it's not threadsafe - - Resolution(int formatLen) { - this.formatLen = formatLen; - // formatLen 10's place: 11111111 - // formatLen 1's place: 12345678901234567 - this.format = new SimpleDateFormat("yyyyMMddHHmmssSSS".substring(0,formatLen),Locale.US); - this.format.setTimeZone(GMT); - } - - /** this method returns the name of the resolution - * in lowercase (for backwards compatibility) */ - @Override - public String toString() { - return super.toString().toLowerCase(Locale.ENGLISH); - } - - } - -} Index: lucene/src/java/org/apache/lucene/document2/Document.java =================================================================== --- lucene/src/java/org/apache/lucene/document2/Document.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document2/Document.java (working copy) @@ -1,216 +0,0 @@ -package org.apache.lucene.document2; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.util.*; - -import org.apache.lucene.index.IndexReader; // for javadoc -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.search.IndexSearcher; // for javadoc -import org.apache.lucene.search.ScoreDoc; // for javadoc - -/** Documents are the unit of indexing and search. - * - * A Document is a set of fields. Each field has a name and a textual value. - * A field may be {@link IndexableField#stored() stored} with the document, in which - * case it is returned with search hits on the document. Thus each document - * should typically contain one or more stored fields which uniquely identify - * it. - * - *

Note that fields which are not {@link IndexableField#stored() stored} are - * not available in documents retrieved from the index, e.g. with {@link - * ScoreDoc#doc} or {@link IndexReader#document(int)}. - */ - -public final class Document implements Iterable { - - List fields = new ArrayList(); - - /** Constructs a new document with no fields. */ - public Document() {} - - // @Override not until Java 1.6 - public Iterator iterator() { - - return new Iterator() { - private int fieldUpto = 0; - - public boolean hasNext() { - return fieldUpto < fields.size(); - } - - public void remove() { - throw new UnsupportedOperationException(); - } - - public IndexableField next() { - return fields.get(fieldUpto++); - } - }; - } - - /** - *

Adds a field to a document. Several fields may be added with - * the same name. In this case, if the fields are indexed, their text is - * treated as though appended for the purposes of search.

- *

Note that add like the removeField(s) methods only makes sense - * prior to adding a document to an index. These methods cannot - * be used to change the content of an existing index! In order to achieve this, - * a document has to be deleted from an index and a new changed version of that - * document has to be added.

- */ - public final void add(IndexableField field) { - fields.add(field); - } - - /** - *

Removes field with the specified name from the document. - * If multiple fields exist with this name, this method removes the first field that has been added. - * If there is no field with the specified name, the document remains unchanged.

- *

Note that the removeField(s) methods like the add method only make sense - * prior to adding a document to an index. These methods cannot - * be used to change the content of an existing index! In order to achieve this, - * a document has to be deleted from an index and a new changed version of that - * document has to be added.

- */ - public final void removeField(String name) { - Iterator it = fields.iterator(); - while (it.hasNext()) { - IndexableField field = it.next(); - if (field.name().equals(name)) { - it.remove(); - return; - } - } - } - - /** - *

Removes all fields with the given name from the document. - * If there is no field with the specified name, the document remains unchanged.

- *

Note that the removeField(s) methods like the add method only make sense - * prior to adding a document to an index. These methods cannot - * be used to change the content of an existing index! In order to achieve this, - * a document has to be deleted from an index and a new changed version of that - * document has to be added.

- */ - public final void removeFields(String name) { - Iterator it = fields.iterator(); - while (it.hasNext()) { - IndexableField field = it.next(); - if (field.name().equals(name)) { - it.remove(); - } - } - } - - private final static byte[][] NO_BYTES = new byte[0][]; - - /** - * Returns an array of byte arrays for of the fields that have the name specified - * as the method parameter. This method returns an empty - * array when there are no matching fields. It never - * returns null. - * - * @param name the name of the field - * @return a byte[][] of binary field values - */ - public final byte[][] getBinaryValues(String name) { - List result = new ArrayList(); - for (IndexableField field : fields) { - if (field.name().equals(name) && ((Field) field).isBinary()) - result.add(field.binaryValue(null).bytes); - } - - if (result.size() == 0) - return NO_BYTES; - - return result.toArray(new byte[result.size()][]); - } - - /** - * Returns an array of bytes for the first (or only) field that has the name - * specified as the method parameter. This method will return null - * if no binary fields with the specified name are available. - * There may be non-binary fields with the same name. - * - * @param name the name of the field. - * @return a byte[] containing the binary field value or null - */ - public final byte[] getBinaryValue(String name) { - for (IndexableField field : fields) { - if (field.name().equals(name) && ((Field) field).isBinary()) - return field.binaryValue(null).bytes; - } - return null; - } - - public final IndexableField getField(String name) { - for (IndexableField field : fields) { - if (field.name().equals(name)) - return field; - } - return null; - } - - private final static IndexableField[] NO_FIELDS = new IndexableField[0]; - - public IndexableField[] getFields(String name) { - List result = new ArrayList(); - for (IndexableField field : fields) { - if (field.name().equals(name)) { - result.add(field); - } - } - - if (result.size() == 0) - return NO_FIELDS; - - return result.toArray(new IndexableField[result.size()]); - } - - public Integer size() { - return fields.size(); - } - - public final List getFields() { - return fields; - } - - public final String get(String name) { - for (IndexableField field : fields) { - if (field.name().equals(name) && (field.binaryValue(null) == null)) - return field.stringValue(); - } - return null; - } - - /** Prints the fields of a document for human consumption. */ - @Override - public final String toString() { - StringBuilder buffer = new StringBuilder(); - buffer.append("Document<"); - for (int i = 0; i < fields.size(); i++) { - IndexableField field = fields.get(i); - buffer.append(field.toString()); - if (i != fields.size()-1) - buffer.append(" "); - } - buffer.append(">"); - return buffer.toString(); - } -} Index: lucene/src/java/org/apache/lucene/document2/Field.java =================================================================== --- lucene/src/java/org/apache/lucene/document2/Field.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document2/Field.java (working copy) @@ -1,383 +0,0 @@ -package org.apache.lucene.document2; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.Reader; - -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.document2.NumericField; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.StringHelper; - -/** - * A field is a section of a Document. Each field has two parts, a name and a - * value. Values may be free text, provided as a String or as a Reader, or they - * may be atomic keywords, which are not further processed. Such keywords may be - * used to represent dates, urls, etc. Fields are optionally stored in the - * index, so that they may be returned with hits on the document. - */ - -public class Field implements IndexableField { - - protected FieldType type; - protected String name = "body"; - // the data object for all different kind of field values - protected Object fieldsData = null; - // pre-analyzed tokenStream for indexed fields - protected TokenStream tokenStream; - protected boolean isBinary = false; - // length/offset for all primitive types - protected int binaryLength; - protected int binaryOffset; - - protected float boost = 1.0f; - - public Field(String name, FieldType type) { - this.name = name; - this.type = type; - } - - public Field(String name, FieldType type, Reader reader) { - if (name == null) - throw new NullPointerException("name cannot be null"); - if (reader == null) - throw new NullPointerException("reader cannot be null"); - - this.name = StringHelper.intern(name); // field names are interned - this.fieldsData = reader; - this.type = type; - } - - public Field(String name, FieldType type, TokenStream tokenStream) { - if (name == null) - throw new NullPointerException("name cannot be null"); - if (tokenStream == null) - throw new NullPointerException("tokenStream cannot be null"); - - this.name = StringHelper.intern(name); // field names are interned - this.fieldsData = null; - this.tokenStream = tokenStream; - this.type = type; - } - - public Field(String name, FieldType type, byte[] value) { - this(name, type, value, 0, value.length); - } - - public Field(String name, FieldType type, byte[] value, int offset, int length) { - this.isBinary = true; - this.fieldsData = value; - this.type = type; - this.binaryOffset = offset; - this.binaryLength = length; - this.name = StringHelper.intern(name); - } - - public Field(String name, FieldType type, String value) { - this(name, true, type, value); - } - - public Field(String name, boolean internName, FieldType type, String value) { - if (name == null) { - throw new IllegalArgumentException("name cannot be null"); - } - if (value == null) { - throw new IllegalArgumentException("value cannot be null"); - } - if (!type.stored() && !type.indexed()) { - throw new IllegalArgumentException("it doesn't make sense to have a field that " - + "is neither indexed nor stored"); - } - if (!type.indexed() && !type.tokenized() && (type.storeTermVectors())) { - throw new IllegalArgumentException("cannot store term vector information " - + "for a field that is not indexed"); - } - - this.type = type; - this.name = name; - this.fieldsData = value; - - if (internName) // field names are optionally interned - name = StringHelper.intern(name); - } - - public boolean isNumeric() { - return false; - } - - /** - * The value of the field as a String, or null. If null, the Reader value or - * binary value is used. Exactly one of stringValue(), readerValue(), and - * getBinaryValue() must be set. - */ - public String stringValue() { - return fieldsData instanceof String ? (String) fieldsData : null; - } - - /** - * The value of the field as a Reader, or null. If null, the String value or - * binary value is used. Exactly one of stringValue(), readerValue(), and - * getBinaryValue() must be set. - */ - public Reader readerValue() { - return fieldsData instanceof Reader ? (Reader) fieldsData : null; - } - - /** - * The TokesStream for this field to be used when indexing, or null. If null, - * the Reader value or String value is analyzed to produce the indexed tokens. - */ - public TokenStream tokenStreamValue() { - return tokenStream; - } - - /** - *

- * Expert: change the value of this field. This can be used during indexing to - * re-use a single Field instance to improve indexing speed by avoiding GC - * cost of new'ing and reclaiming Field instances. Typically a single - * {@link Document} instance is re-used as well. This helps most on small - * documents. - *

- * - *

- * Each Field instance should only be used once within a single - * {@link Document} instance. See ImproveIndexingSpeed for details. - *

- */ - public void setValue(String value) { - if (isBinary) { - throw new IllegalArgumentException( - "cannot set a String value on a binary field"); - } - fieldsData = value; - } - - /** - * Expert: change the value of this field. See setValue(String). - */ - public void setValue(Reader value) { - if (isBinary) { - throw new IllegalArgumentException( - "cannot set a Reader value on a binary field"); - } - if (stored()) { - throw new IllegalArgumentException( - "cannot set a Reader value on a stored field"); - } - fieldsData = value; - } - - /** - * Expert: change the value of this field. See setValue(String). - */ - public void setValue(byte[] value) { - if (!isBinary) { - throw new IllegalArgumentException( - "cannot set a byte[] value on a non-binary field"); - } - fieldsData = value; - binaryLength = value.length; - binaryOffset = 0; - } - - /** - * Expert: change the value of this field. See setValue(String). - */ - public void setValue(byte[] value, int offset, int length) { - if (!isBinary) { - throw new IllegalArgumentException( - "cannot set a byte[] value on a non-binary field"); - } - fieldsData = value; - binaryLength = length; - binaryOffset = offset; - } - - /** - * Expert: sets the token stream to be used for indexing and causes - * isIndexed() and isTokenized() to return true. May be combined with stored - * values from stringValue() or getBinaryValue() - */ - public void setTokenStream(TokenStream tokenStream) { - if (!indexed() || !tokenized()) { - throw new IllegalArgumentException( - "cannot set token stream on non indexed and tokenized field"); - } - this.tokenStream = tokenStream; - } - - public String name() { - return name; - } - - public float boost() { - return boost; - } - - /** - * Sets the boost factor hits on this field. This value will be multiplied - * into the score of all hits on this this field of this document. - * - *

- * The boost is multiplied by - * {@link org.apache.lucene.document.Document#getBoost()} of the document - * containing this field. If a document has multiple fields with the same - * name, all such values are multiplied together. This product is then used to - * compute the norm factor for the field. By default, in the - * {@link org.apache.lucene.search.Similarity#computeNorm(FieldInvertState)} - * method, the boost value is multiplied by the length normalization factor - * and then rounded by - * {@link org.apache.lucene.search.Similarity#encodeNormValue(float)} before - * it is stored in the index. One should attempt to ensure that this product - * does not overflow the range of that encoding. - * - * @see org.apache.lucene.document.Document#setBoost(float) - * @see org.apache.lucene.search.Similarity#computeNorm(FieldInvertState) - * @see org.apache.lucene.search.Similarity#encodeNormValue(float) - */ - public void setBoost(float boost) { - this.boost = boost; - } - - public boolean numeric() { - return false; - } - - public Number numericValue() { - return null; - } - - public NumericField.DataType numericDataType() { - return null; - } - - private byte[] getBinaryValue(byte[] result /* unused */) { - if (isBinary || fieldsData instanceof byte[]) return (byte[]) fieldsData; - else return null; - } - - private byte[] getBinaryValue() { - return getBinaryValue(null); - } - - public BytesRef binaryValue(BytesRef reuse) { - final byte[] bytes = getBinaryValue(); - if (bytes != null) { - if (reuse == null) { - return new BytesRef(bytes, getBinaryOffset(), getBinaryLength()); - } else { - reuse.bytes = bytes; - reuse.offset = getBinaryOffset(); - reuse.length = getBinaryLength(); - return reuse; - } - } else { - return null; - } - } - - /** - * Returns length of byte[] segment that is used as value, if Field is not - * binary returned value is undefined - * - * @return length of byte[] segment that represents this Field value - */ - private int getBinaryLength() { - if (isBinary) { - return binaryLength; - } else if (fieldsData instanceof byte[]) return ((byte[]) fieldsData).length; - else return 0; - } - - /** - * Returns offset into byte[] segment that is used as value, if Field is not - * binary returned value is undefined - * - * @return index of the first character in byte[] segment that represents this - * Field value - */ - public int getBinaryOffset() { - return binaryOffset; - } - - public boolean isBinary() { - return isBinary; - } - - /** methods from inner FieldType */ - - public boolean stored() { - return type.stored(); - } - - public boolean indexed() { - return type.indexed(); - } - - public boolean tokenized() { - return type.tokenized(); - } - - public boolean omitNorms() { - return type.omitNorms(); - } - - public boolean omitTermFreqAndPositions() { - return type.omitTermFreqAndPositions(); - } - - public boolean storeTermVectors() { - return type.storeTermVectors(); - } - - public boolean storeTermVectorOffsets() { - return type.storeTermVectorOffsets(); - } - - public boolean storeTermVectorPositions() { - return type.storeTermVectorPositions(); - } - - public boolean lazy() { - return type.lazy(); - } - - /** Prints a Field for human consumption. */ - @Override - public final String toString() { - StringBuilder result = new StringBuilder(); - result.append(type.toString()); - result.append('<'); - result.append(name); - result.append(':'); - - if (fieldsData != null && type.lazy() == false) { - result.append(fieldsData); - } - - result.append('>'); - return result.toString(); - } -} Index: lucene/src/java/org/apache/lucene/document2/FieldType.java =================================================================== --- lucene/src/java/org/apache/lucene/document2/FieldType.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document2/FieldType.java (working copy) @@ -1,183 +0,0 @@ -package org.apache.lucene.document2; - -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -public class FieldType { - - private boolean indexed; - private boolean stored; - private boolean tokenized; - private boolean storeTermVectors; - private boolean storeTermVectorOffsets; - private boolean storeTermVectorPositions; - private boolean omitNorms; - private boolean omitTermFreqsAndPositions; - private boolean lazy; - private boolean frozen; - - public FieldType(FieldType ref) { - this.indexed = ref.indexed(); - this.stored = ref.stored(); - this.tokenized = ref.tokenized(); - this.storeTermVectors = ref.storeTermVectors(); - this.storeTermVectorOffsets = ref.storeTermVectorOffsets(); - this.storeTermVectorPositions = ref.storeTermVectorPositions(); - this.omitNorms = ref.omitNorms(); - this.omitTermFreqsAndPositions = ref.omitTermFreqAndPositions(); - this.lazy = ref.lazy(); - } - - public FieldType() { - } - - private void checkIfFrozen() { - if (frozen) { - throw new IllegalStateException(); - } - } - - public void freeze() { - this.frozen = true; - } - - public boolean indexed() { - return this.indexed; - } - - public void setIndexed(boolean value) { - checkIfFrozen(); - this.indexed = value; - } - - public boolean stored() { - return this.stored; - } - - public void setStored(boolean value) { - checkIfFrozen(); - this.stored = value; - } - - public boolean tokenized() { - return this.tokenized; - } - - public void setTokenized(boolean value) { - checkIfFrozen(); - this.tokenized = value; - } - - public boolean storeTermVectors() { - return this.storeTermVectors; - } - - public void setStoreTermVectors(boolean value) { - checkIfFrozen(); - this.storeTermVectors = value; - } - - public boolean storeTermVectorOffsets() { - return this.storeTermVectorOffsets; - } - - public void setStoreTermVectorOffsets(boolean value) { - checkIfFrozen(); - this.storeTermVectorOffsets = value; - } - - public boolean storeTermVectorPositions() { - return this.storeTermVectorPositions; - } - - public void setStoreTermVectorPositions(boolean value) { - checkIfFrozen(); - this.storeTermVectorPositions = value; - } - - public boolean omitNorms() { - return this.omitNorms; - } - - public void setOmitNorms(boolean value) { - checkIfFrozen(); - this.omitNorms = value; - } - - public boolean omitTermFreqAndPositions() { - return this.omitTermFreqsAndPositions; - } - - public void setOmitTermFreqAndPositions(boolean value) { - checkIfFrozen(); - this.omitTermFreqsAndPositions = value; - } - - public boolean lazy() { - return this.lazy; - } - - public void setLazy(boolean value) { - checkIfFrozen(); - this.lazy = value; - } - - /** Prints a Field for human consumption. */ - @Override - public final String toString() { - StringBuilder result = new StringBuilder(); - if (stored()) { - result.append("stored"); - } - if (indexed()) { - if (result.length() > 0) - result.append(","); - result.append("indexed"); - } - if (tokenized()) { - if (result.length() > 0) - result.append(","); - result.append("tokenized"); - } - if (storeTermVectors()) { - if (result.length() > 0) - result.append(","); - result.append("termVector"); - } - if (storeTermVectorOffsets()) { - if (result.length() > 0) - result.append(","); - result.append("termVectorOffsets"); - } - if (storeTermVectorPositions()) { - if (result.length() > 0) - result.append(","); - result.append("termVectorPosition"); - } - if (omitNorms()) { - result.append(",omitNorms"); - } - if (omitTermFreqAndPositions()) { - result.append(",omitTermFreqAndPositions"); - } - if (lazy()){ - result.append(",lazy"); - } - - return result.toString(); - } -} Index: lucene/src/java/org/apache/lucene/document2/NumericField.java =================================================================== --- lucene/src/java/org/apache/lucene/document2/NumericField.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document2/NumericField.java (working copy) @@ -1,387 +0,0 @@ -package org.apache.lucene.document2; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.Reader; - -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.NumericTokenStream; -import org.apache.lucene.document2.NumericField.DataType; -import org.apache.lucene.util.NumericUtils; -import org.apache.lucene.search.NumericRangeQuery; // javadocs -import org.apache.lucene.search.NumericRangeFilter; // javadocs -import org.apache.lucene.search.SortField; // javadocs -import org.apache.lucene.search.FieldCache; // javadocs - -/** - *

- * This class provides a {@link Field} that enables indexing of numeric values - * for efficient range filtering and sorting. Here's an example usage, adding an - * int value: - * - *

- * document.add(new NumericField(name).setIntValue(value));
- * 
- * - * For optimal performance, re-use the NumericField and - * {@link Document} instance for more than one document: - * - *
- *  NumericField field = new NumericField(name);
- *  Document document = new Document();
- *  document.add(field);
- * 
- *  for(all documents) {
- *    ...
- *    field.setIntValue(value)
- *    writer.addDocument(document);
- *    ...
- *  }
- * 
- * - *

- * The java native types int, long, float - * and double are directly supported. However, any value that can - * be converted into these native types can also be indexed. For example, - * date/time values represented by a {@link java.util.Date} can be translated - * into a long value using the {@link java.util.Date#getTime} method. If you - * don't need millisecond precision, you can quantize the value, either by - * dividing the result of {@link java.util.Date#getTime} or using the separate - * getters (for year, month, etc.) to construct an int or - * long value. - *

- * - *

- * To perform range querying or filtering against a NumericField, - * use {@link NumericRangeQuery} or {@link NumericRangeFilter}. To sort - * according to a NumericField, use the normal numeric sort types, - * eg {@link SortField#INT}. NumericField values can also be loaded - * directly from {@link FieldCache}. - *

- * - *

- * By default, a NumericField's value is not stored but is indexed - * for range filtering and sorting. You can use the - * {@link #NumericField(String,FieldType)} constructor if you need to - * change these defaults, and alter the default field type (set it to stored). - *

- * - *

- * You may add the same field name as a NumericField to the same - * document more than once. Range querying and filtering will be the logical OR - * of all values; so a range query will hit all documents that have at least one - * value in the range. However sort behavior is not defined. If you need to - * sort, you should separately index a single-valued NumericField. - *

- * - *

- * A NumericField will consume somewhat more disk space in the - * index than an ordinary single-valued field. However, for a typical index that - * includes substantial textual content per document, this increase will likely - * be in the noise. - *

- * - *

- * Within Lucene, each numeric value is indexed as a trie structure, - * where each term is logically assigned to larger and larger pre-defined - * brackets (which are simply lower-precision representations of the value). The - * step size between each successive bracket is called the - * precisionStep, measured in bits. Smaller - * precisionStep values result in larger number of brackets, which - * consumes more disk space in the index but may result in faster range search - * performance. The default value, 4, was selected for a reasonable tradeoff of - * disk space consumption versus performance. You can use the expert constructor - * {@link #NumericField(String,int,FieldType)} if you'd like to change - * the value. Note that you must also specify a congruent value when creating - * {@link NumericRangeQuery} or {@link NumericRangeFilter}. For low cardinality - * fields larger precision steps are good. If the cardinality is < 100, it is - * fair to use {@link Integer#MAX_VALUE}, which produces one term per value. - * - *

- * For more information on the internals of numeric trie indexing, including the - * - * precisionStep configuration, see {@link NumericRangeQuery}. - * The format of indexed values is described in {@link NumericUtils}. - * - *

- * If you only need to sort by numeric value, and never run range - * querying/filtering, you can index using a precisionStep of - * {@link Integer#MAX_VALUE}. This will minimize disk space consumed. - *

- * - *

- * More advanced users can instead use {@link NumericTokenStream} directly, when - * indexing numbers. This class is a wrapper around this token stream type for - * easier, more intuitive usage. - *

- * - * @since 2.9 - */ -public final class NumericField extends Field { - - /** Data type of the value in {@link NumericField}. - * @since 3.2 - */ - public static enum DataType { INT, LONG, FLOAT, DOUBLE } - - public static final FieldType TYPE_UNSTORED = new FieldType(); - public static final FieldType TYPE_STORED = new FieldType(); - static { - TYPE_UNSTORED.setIndexed(true); - TYPE_UNSTORED.setTokenized(true); - TYPE_UNSTORED.setOmitNorms(true); - TYPE_UNSTORED.setOmitTermFreqAndPositions(true); - TYPE_UNSTORED.freeze(); - - TYPE_STORED.setIndexed(true); - TYPE_STORED.setStored(true); - TYPE_STORED.setTokenized(true); - TYPE_STORED.setOmitNorms(true); - TYPE_STORED.setOmitTermFreqAndPositions(true); - TYPE_STORED.freeze(); - } - - //public static enum DataType { INT, LONG, FLOAT, DOUBLE } - - private DataType dataType; - private transient NumericTokenStream numericTS; - private final int precisionStep; - - /** - * Creates a field for numeric values using the default - * precisionStep {@link NumericUtils#PRECISION_STEP_DEFAULT} (4). - * The instance is not yet initialized with a numeric value, before indexing a - * document containing this field, set a value using the various set - * ???Value() methods. This constructor creates an indexed, but not - * stored field. - * - * @param name - * the field name - */ - public NumericField(String name) { - this(name, NumericUtils.PRECISION_STEP_DEFAULT, NumericField.TYPE_UNSTORED); - } - - /** - * Creates a field for numeric values using the default - * precisionStep {@link NumericUtils#PRECISION_STEP_DEFAULT} (4). - * The instance is not yet initialized with a numeric value, before indexing a - * document containing this field, set a value using the various set - * ???Value() methods. - * - * @param name - * the field name - * @param type - * if the defualt field should be altered, e.g. stored, - * {@link Document#getField} then returns {@code NumericField} - * instances on search results, or indexed using - * {@link NumericTokenStream} - */ - public NumericField(String name, FieldType type) { - this(name, NumericUtils.PRECISION_STEP_DEFAULT, type); - } - - /** - * Creates a field for numeric values with the specified - * precisionStep. The instance is not yet initialized with a - * numeric value, before indexing a document containing this field, set a - * value using the various set???Value() methods. This constructor - * creates an indexed, but not stored field. - * - * @param name - * the field name - * @param precisionStep - * the used precision step - */ - public NumericField(String name, int precisionStep) { - this(name, precisionStep, NumericField.TYPE_UNSTORED); - } - - /** - * Creates a field for numeric values with the specified - * precisionStep. The instance is not yet initialized with a - * numeric value, before indexing a document containing this field, set a - * value using the various set???Value() methods. - * - * @param name - * the field name - * @param precisionStep - * the used precision step - * @param type - * if the defualt field should be altered, e.g. stored, - * {@link Document#getField} then returns {@code NumericField} - * instances on search results, or indexed using - * {@link NumericTokenStream} - */ - public NumericField(String name, int precisionStep, FieldType type) { - super(name, type); - this.precisionStep = precisionStep; - } - - /** Returns a {@link NumericTokenStream} for indexing the numeric value. */ - public TokenStream tokenStreamValue() { - if (!indexed()) return null; - if (numericTS == null) { - // lazy init the TokenStream as it is heavy to instantiate - // (attributes,...), - // if not needed (stored field loading) - numericTS = new NumericTokenStream(precisionStep); - // initialize value in TokenStream - if (fieldsData != null) { - assert dataType != null; - final Number val = (Number) fieldsData; - switch (dataType) { - case INT: - numericTS.setIntValue(val.intValue()); - break; - case LONG: - numericTS.setLongValue(val.longValue()); - break; - case FLOAT: - numericTS.setFloatValue(val.floatValue()); - break; - case DOUBLE: - numericTS.setDoubleValue(val.doubleValue()); - break; - default: - assert false : "Should never get here"; - } - } - } - return numericTS; - } - - /** Returns always null for numeric fields */ - public Reader readerValue() { - return null; - } - - /** - * Returns the numeric value as a string. It is recommended to - * use {@link Document#getField} instead that returns {@code NumericField} - * instances. You can then use {@link #numericValue} to return the stored - * value. - */ - @Override - public String stringValue() { - return (fieldsData == null) ? null : fieldsData.toString(); - } - - /** - * Returns the current numeric value as a subclass of {@link Number}, - * null if not yet initialized. - */ - @Override - public Number numericValue() { - return (Number) fieldsData; - } - - /** Returns the precision step. */ - public int getPrecisionStep() { - return precisionStep; - } - - /** - * Returns the data type of the current value, {@code null} if not yet set. - * - * @since 3.2 - */ - @Override - public DataType numericDataType() { - return dataType; - } - - public DataType numericType() { - return dataType; - } - - @Override - public boolean numeric() { - return true; - } - - @Override - public boolean isNumeric() { - return true; - } - - /** - * Initializes the field with the supplied long value. - * - * @param value - * the numeric value - * @return this instance, because of this you can use it the following way: - * document.add(new NumericField(name, precisionStep).setLongValue(value)) - */ - public NumericField setLongValue(final long value) { - if (numericTS != null) numericTS.setLongValue(value); - fieldsData = Long.valueOf(value); - dataType = DataType.LONG; - return this; - } - - /** - * Initializes the field with the supplied int value. - * - * @param value - * the numeric value - * @return this instance, because of this you can use it the following way: - * document.add(new NumericField(name, precisionStep).setIntValue(value)) - */ - public NumericField setIntValue(final int value) { - if (numericTS != null) numericTS.setIntValue(value); - fieldsData = Integer.valueOf(value); - dataType = DataType.INT; - return this; - } - - /** - * Initializes the field with the supplied double value. - * - * @param value - * the numeric value - * @return this instance, because of this you can use it the following way: - * document.add(new NumericField(name, precisionStep).setDoubleValue(value)) - */ - public NumericField setDoubleValue(final double value) { - if (numericTS != null) numericTS.setDoubleValue(value); - fieldsData = Double.valueOf(value); - dataType = DataType.DOUBLE; - return this; - } - - /** - * Initializes the field with the supplied float value. - * - * @param value - * the numeric value - * @return this instance, because of this you can use it the following way: - * document.add(new NumericField(name, precisionStep).setFloatValue(value)) - */ - public NumericField setFloatValue(final float value) { - if (numericTS != null) numericTS.setFloatValue(value); - fieldsData = Float.valueOf(value); - dataType = DataType.FLOAT; - return this; - } - -} Index: lucene/src/java/org/apache/lucene/document2/StringField.java =================================================================== --- lucene/src/java/org/apache/lucene/document2/StringField.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document2/StringField.java (working copy) @@ -1,53 +0,0 @@ -package org.apache.lucene.document2; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -public final class StringField extends Field { - - public static final FieldType TYPE_UNSTORED = new FieldType(); - public static final FieldType TYPE_STORED = new FieldType(); - static { - TYPE_UNSTORED.setIndexed(true); - TYPE_UNSTORED.setOmitNorms(true); - TYPE_UNSTORED.setOmitTermFreqAndPositions(true); - TYPE_UNSTORED.freeze(); - - TYPE_STORED.setIndexed(true); - TYPE_STORED.setStored(true); - TYPE_STORED.setOmitNorms(true); - TYPE_STORED.setOmitTermFreqAndPositions(true); - TYPE_STORED.freeze(); - } - - public StringField(String name, boolean internName, String value) { - super(name, StringField.TYPE_UNSTORED, value); - } - - public StringField(String name, String value) { - this(name, true, value); - } - - @Override - public String stringValue() { - return (fieldsData == null) ? null : fieldsData.toString(); - } - - public boolean isNumeric() { - return false; - } -} Index: lucene/src/java/org/apache/lucene/document2/TextField.java =================================================================== --- lucene/src/java/org/apache/lucene/document2/TextField.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/document2/TextField.java (working copy) @@ -1,54 +0,0 @@ -package org.apache.lucene.document2; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.Reader; - -import org.apache.lucene.analysis.TokenStream; - -public final class TextField extends Field { - - public static final FieldType TYPE_UNSTORED = new FieldType(); - public static final FieldType TYPE_STORED = new FieldType(); - static { - TYPE_UNSTORED.setIndexed(true); - TYPE_UNSTORED.setTokenized(true); - TYPE_UNSTORED.freeze(); - - TYPE_STORED.setIndexed(true); - TYPE_STORED.setStored(true); - TYPE_STORED.setTokenized(true); - TYPE_STORED.freeze(); - } - - public TextField(String name, Reader reader) { - super(name, TextField.TYPE_UNSTORED, reader); - } - - public TextField(String name, String value) { - super(name, TextField.TYPE_UNSTORED, value); - } - - public TextField(String name, TokenStream stream) { - super(name, TextField.TYPE_UNSTORED, stream); - } - - public boolean isNumeric() { - return false; - } -} Index: lucene/src/java/org/apache/lucene/index/CheckIndex.java =================================================================== --- lucene/src/java/org/apache/lucene/index/CheckIndex.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/index/CheckIndex.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IndexInput; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.index.codecs.DefaultSegmentInfosWriter; import org.apache.lucene.util.Bits; @@ -897,7 +897,7 @@ for (int j = 0; j < info.docCount; ++j) { if (delDocs == null || !delDocs.get(j)) { status.docCount++; - Document doc = reader.document2(j); + Document doc = reader.document(j); status.totFields += doc.getFields().size(); } } Index: lucene/src/java/org/apache/lucene/index/Document2StoredFieldVisitor.java =================================================================== --- lucene/src/java/org/apache/lucene/index/Document2StoredFieldVisitor.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/index/Document2StoredFieldVisitor.java (working copy) @@ -1,141 +0,0 @@ -package org.apache.lucene.index; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.IOException; -import java.util.Set; -import java.util.HashSet; - -import org.apache.lucene.document2.BinaryField; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.NumericField; -import org.apache.lucene.document2.TextField; -import org.apache.lucene.store.IndexInput; - -/** A {@link StoredFieldVisitor} that creates a {@link - * Document} containing all stored fields, or only specific - * requested fields provided to {@link #DocumentStoredFieldVisitor(Set)} - * This is used by {@link IndexReader#document(int)} to load a - * document. - * - * @lucene.experimental */ - -public class Document2StoredFieldVisitor extends StoredFieldVisitor { - private final Document doc = new Document(); - private final Set fieldsToAdd; - - /** Load only fields named in the provided Set<String>. */ - public Document2StoredFieldVisitor(Set fieldsToAdd) { - this.fieldsToAdd = fieldsToAdd; - } - - /** Load only fields named in the provided Set<String>. */ - public Document2StoredFieldVisitor(String... fields) { - fieldsToAdd = new HashSet(fields.length); - for(String field : fields) { - fieldsToAdd.add(field); - } - } - - /** Load all stored fields. */ - public Document2StoredFieldVisitor() { - this.fieldsToAdd = null; - } - - @Override - public boolean binaryField(FieldInfo fieldInfo, IndexInput in, int numBytes) throws IOException { - if (accept(fieldInfo)) { - final byte[] b = new byte[numBytes]; - in.readBytes(b, 0, b.length); - doc.add(new BinaryField(fieldInfo.name, b)); - } else { - in.seek(in.getFilePointer() + numBytes); - } - return false; - } - - @Override - public boolean stringField(FieldInfo fieldInfo, IndexInput in, int numUTF8Bytes) throws IOException { - if (accept(fieldInfo)) { - final byte[] b = new byte[numUTF8Bytes]; - in.readBytes(b, 0, b.length); - FieldType ft = new FieldType(TextField.TYPE_STORED); - ft.setStoreTermVectors(fieldInfo.storeTermVector); - ft.setStoreTermVectorPositions(fieldInfo.storePositionWithTermVector); - ft.setStoreTermVectorOffsets(fieldInfo.storeOffsetWithTermVector); - ft.setStoreTermVectors(fieldInfo.storeTermVector); - doc.add(new Field(fieldInfo.name, - false, - ft, - new String(b, "UTF-8"))); - } else { - in.seek(in.getFilePointer() + numUTF8Bytes); - } - return false; - } - - @Override - public boolean intField(FieldInfo fieldInfo, int value) { - if (accept(fieldInfo)) { - FieldType ft = new FieldType(NumericField.TYPE_STORED); - ft.setIndexed(fieldInfo.isIndexed); - doc.add(new NumericField(fieldInfo.name, ft).setIntValue(value)); - } - return false; - } - - @Override - public boolean longField(FieldInfo fieldInfo, long value) { - if (accept(fieldInfo)) { - FieldType ft = new FieldType(NumericField.TYPE_STORED); - ft.setIndexed(fieldInfo.isIndexed); - doc.add(new NumericField(fieldInfo.name, ft).setLongValue(value)); - } - return false; - } - - @Override - public boolean floatField(FieldInfo fieldInfo, float value) { - if (accept(fieldInfo)) { - FieldType ft = new FieldType(NumericField.TYPE_STORED); - ft.setIndexed(fieldInfo.isIndexed); - doc.add(new NumericField(fieldInfo.name, ft).setFloatValue(value)); - } - return false; - } - - @Override - public boolean doubleField(FieldInfo fieldInfo, double value) { - if (accept(fieldInfo)) { - FieldType ft = new FieldType(NumericField.TYPE_STORED); - ft.setIndexed(fieldInfo.isIndexed); - doc.add(new NumericField(fieldInfo.name, ft).setDoubleValue(value)); - } - return false; - } - - private boolean accept(FieldInfo fieldInfo) { - return fieldsToAdd == null || fieldsToAdd.contains(fieldInfo.name); - } - - public Document getDocument() { - return doc; - } -} \ No newline at end of file Index: lucene/src/java/org/apache/lucene/index/DocumentStoredFieldVisitor.java =================================================================== --- lucene/src/java/org/apache/lucene/index/DocumentStoredFieldVisitor.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/index/DocumentStoredFieldVisitor.java (working copy) @@ -21,12 +21,12 @@ import java.util.Set; import java.util.HashSet; -import org.apache.lucene.document2.BinaryField; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.NumericField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.BinaryField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.NumericField; +import org.apache.lucene.document.TextField; import org.apache.lucene.store.IndexInput; /** A {@link StoredFieldVisitor} that creates a {@link @@ -37,17 +37,17 @@ * * @lucene.experimental */ -public class Document2StoredFieldVisitor extends StoredFieldVisitor { +public class DocumentStoredFieldVisitor extends StoredFieldVisitor { private final Document doc = new Document(); private final Set fieldsToAdd; /** Load only fields named in the provided Set<String>. */ - public Document2StoredFieldVisitor(Set fieldsToAdd) { + public DocumentStoredFieldVisitor(Set fieldsToAdd) { this.fieldsToAdd = fieldsToAdd; } /** Load only fields named in the provided Set<String>. */ - public Document2StoredFieldVisitor(String... fields) { + public DocumentStoredFieldVisitor(String... fields) { fieldsToAdd = new HashSet(fields.length); for(String field : fields) { fieldsToAdd.add(field); @@ -55,7 +55,7 @@ } /** Load all stored fields. */ - public Document2StoredFieldVisitor() { + public DocumentStoredFieldVisitor() { this.fieldsToAdd = null; } Index: lucene/src/java/org/apache/lucene/index/IndexReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/IndexReader.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/index/IndexReader.java (working copy) @@ -26,7 +26,7 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.codecs.Codec; import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.search.FieldCache; // javadocs @@ -974,22 +974,13 @@ * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ - /* - public Document document(int docID) throws CorruptIndexException, IOException { + public org.apache.lucene.document.Document document(int docID) throws CorruptIndexException, IOException { ensureOpen(); final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(); document(docID, visitor); return visitor.getDocument(); } - */ - public org.apache.lucene.document2.Document document2(int docID) throws CorruptIndexException, IOException { - ensureOpen(); - final Document2StoredFieldVisitor visitor = new Document2StoredFieldVisitor(); - document(docID, visitor); - return visitor.getDocument(); - } - /** Returns true if any documents have been deleted */ public abstract boolean hasDeletions(); @@ -1011,7 +1002,7 @@ /** Expert: Resets the normalization factor for the named field of the named * document. The norm represents the product of the field's {@link - * org.apache.lucene.document.Fieldable#setBoost(float) boost} and its + * org.apache.lucene.document.Field#setBoost(float) boost} and its * length normalization}. Thus, to preserve the length normalization * values when resetting this, one should base the new value upon the old. * Index: lucene/src/java/org/apache/lucene/index/IndexableField.java =================================================================== --- lucene/src/java/org/apache/lucene/index/IndexableField.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/index/IndexableField.java (working copy) @@ -20,8 +20,8 @@ import java.io.Reader; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.document2.NumericField; -import org.apache.lucene.document2.NumericField.DataType; +import org.apache.lucene.document.NumericField; +import org.apache.lucene.document.NumericField.DataType; import org.apache.lucene.util.BytesRef; // nocommit jdocs Index: lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java =================================================================== --- lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java (working copy) @@ -23,9 +23,9 @@ import java.util.Map; import java.util.Map.Entry; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockObtainFailedException; @@ -68,7 +68,7 @@ int numDocs = r.numDocs(); // index is allowed to have exactly one document or 0. if (numDocs == 1) { - Document doc = r.document2(r.maxDoc() - 1); + Document doc = r.document(r.maxDoc() - 1); Field sid = (Field) doc.getField(SNAPSHOTS_ID); if (sid == null) { throw new IllegalStateException("directory is not a valid snapshots store!"); Index: lucene/src/java/org/apache/lucene/index/SegmentMerger.java =================================================================== --- lucene/src/java/org/apache/lucene/index/SegmentMerger.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/index/SegmentMerger.java (working copy) @@ -23,7 +23,7 @@ import java.util.Collection; import java.util.List; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader.FieldOption; import org.apache.lucene.index.MergePolicy.MergeAbortedException; import org.apache.lucene.index.codecs.Codec; @@ -316,7 +316,7 @@ // on the fly? // NOTE: it's very important to first assign to doc then pass it to // termVectorsWriter.addAllDocVectors; see LUCENE-1282 - Document doc = reader.document2(j); + Document doc = reader.document(j); fieldsWriter.addDocument(doc, fieldInfos); docCount++; checkAbort.work(300); @@ -343,7 +343,7 @@ for (; docCount < maxDoc; docCount++) { // NOTE: it's very important to first assign to doc then pass it to // termVectorsWriter.addAllDocVectors; see LUCENE-1282 - Document doc = reader.document2(docCount); + Document doc = reader.document(docCount); fieldsWriter.addDocument(doc, fieldInfos); checkAbort.work(300); } Index: lucene/src/java/org/apache/lucene/index/StoredFieldVisitor.java =================================================================== --- lucene/src/java/org/apache/lucene/index/StoredFieldVisitor.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/index/StoredFieldVisitor.java (working copy) @@ -19,7 +19,7 @@ import java.io.IOException; -import org.apache.lucene.document2.Document; // javadocs +import org.apache.lucene.document.Document; import org.apache.lucene.store.IndexInput; /** Index: lucene/src/java/org/apache/lucene/queryParser/QueryParser.java =================================================================== --- lucene/src/java/org/apache/lucene/queryParser/QueryParser.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/queryParser/QueryParser.java (working copy) @@ -7,7 +7,7 @@ import java.util.Locale; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document2.DateTools; +import org.apache.lucene.document.DateTools; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; Index: lucene/src/java/org/apache/lucene/queryParser/QueryParser.jj =================================================================== --- lucene/src/java/org/apache/lucene/queryParser/QueryParser.jj (revision 1153521) +++ lucene/src/java/org/apache/lucene/queryParser/QueryParser.jj (working copy) @@ -31,7 +31,7 @@ import java.util.Locale; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document2.DateTools; +import org.apache.lucene.document.DateTools; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; Index: lucene/src/java/org/apache/lucene/queryParser/QueryParserBase.java =================================================================== --- lucene/src/java/org/apache/lucene/queryParser/QueryParserBase.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/queryParser/QueryParserBase.java (working copy) @@ -28,7 +28,7 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; -import org.apache.lucene.document2.DateTools; +import org.apache.lucene.document.DateTools; import org.apache.lucene.index.Term; import org.apache.lucene.queryParser.QueryParser.Operator; import org.apache.lucene.search.*; Index: lucene/src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java =================================================================== --- lucene/src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/queryParser/QueryParserTokenManager.java (working copy) @@ -5,7 +5,7 @@ import java.util.List; import java.util.Locale; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document2.DateTools; +import org.apache.lucene.document.DateTools; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; Index: lucene/src/java/org/apache/lucene/search/FieldCache.java =================================================================== --- lucene/src/java/org/apache/lucene/search/FieldCache.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/search/FieldCache.java (working copy) @@ -24,7 +24,7 @@ import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.RamUsageEstimator; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.document2.NumericField; // for javadocs +import org.apache.lucene.document.NumericField; import org.apache.lucene.analysis.NumericTokenStream; // for javadocs import org.apache.lucene.util.packed.PackedInts; Index: lucene/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java =================================================================== --- lucene/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java (working copy) @@ -24,7 +24,7 @@ import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.document2.NumericField; // for javadocs +import org.apache.lucene.document.NumericField; /** * A range filter built on top of a cached single term field (in {@link FieldCache}). Index: lucene/src/java/org/apache/lucene/search/IndexSearcher.java =================================================================== --- lucene/src/java/org/apache/lucene/search/IndexSearcher.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/search/IndexSearcher.java (working copy) @@ -29,7 +29,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexReader.ReaderContext; @@ -239,16 +239,10 @@ } /* Sugar for .getIndexReader().document(docID) */ - /* public Document doc(int docID) throws CorruptIndexException, IOException { return reader.document(docID); } - */ - public org.apache.lucene.document2.Document doc2(int docID) throws CorruptIndexException, IOException { - return reader.document2(docID); - } - /* Sugar for .getIndexReader().document(docID, fieldVisitor) */ public void doc(int docID, StoredFieldVisitor fieldVisitor) throws CorruptIndexException, IOException { reader.document(docID, fieldVisitor); Index: lucene/src/java/org/apache/lucene/search/NumericRangeFilter.java =================================================================== --- lucene/src/java/org/apache/lucene/search/NumericRangeFilter.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/search/NumericRangeFilter.java (working copy) @@ -18,7 +18,7 @@ */ import org.apache.lucene.analysis.NumericTokenStream; // for javadocs -import org.apache.lucene.document.NumericField; // for javadocs +import org.apache.lucene.document.NumericField; import org.apache.lucene.util.NumericUtils; // for javadocs /** Index: lucene/src/java/org/apache/lucene/search/NumericRangeQuery.java =================================================================== --- lucene/src/java/org/apache/lucene/search/NumericRangeQuery.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/search/NumericRangeQuery.java (working copy) @@ -22,7 +22,7 @@ import java.util.Comparator; import org.apache.lucene.analysis.NumericTokenStream; // for javadocs -import org.apache.lucene.document.NumericField; // for javadocs +import org.apache.lucene.document.NumericField; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.ToStringUtils; import org.apache.lucene.index.Terms; Index: lucene/src/java/org/apache/lucene/util/NumericUtils.java =================================================================== --- lucene/src/java/org/apache/lucene/util/NumericUtils.java (revision 1153521) +++ lucene/src/java/org/apache/lucene/util/NumericUtils.java (working copy) @@ -18,7 +18,7 @@ */ import org.apache.lucene.analysis.NumericTokenStream; -import org.apache.lucene.document2.NumericField; +import org.apache.lucene.document.NumericField; import org.apache.lucene.search.NumericRangeFilter; import org.apache.lucene.search.NumericRangeQuery; // for javadocs Index: lucene/src/test-framework/org/apache/lucene/index/DocHelper.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/index/DocHelper.java (revision 1153521) +++ lucene/src/test-framework/org/apache/lucene/index/DocHelper.java (working copy) @@ -26,11 +26,11 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document2.BinaryField; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.BinaryField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.search.SimilarityProvider; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; Index: lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java (revision 1153521) +++ lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java (working copy) @@ -23,7 +23,7 @@ import junit.framework.Assert; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexWriter; Index: lucene/src/test-framework/org/apache/lucene/util/LineFileDocs.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/util/LineFileDocs.java (revision 1153521) +++ lucene/src/test-framework/org/apache/lucene/util/LineFileDocs.java (working copy) @@ -28,11 +28,11 @@ import java.util.zip.GZIPInputStream; import java.util.Random; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.StringField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; /** Minimal port of contrib/benchmark's LneDocSource + * DocMaker, so tests can enum docs from a line file created Index: lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (revision 1153521) +++ lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (working copy) @@ -36,7 +36,7 @@ import java.util.regex.Pattern; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document.FieldType; import org.apache.lucene.index.*; import org.apache.lucene.index.codecs.Codec; import org.apache.lucene.index.codecs.CodecProvider; @@ -1058,14 +1058,14 @@ return dir; } - public static org.apache.lucene.document2.Field newField(String name, String value, FieldType type) { + public static org.apache.lucene.document.Field newField(String name, String value, FieldType type) { return newField(random, name, value, type); } - public static org.apache.lucene.document2.Field newField(Random random, String name, String value, FieldType type) { + public static org.apache.lucene.document.Field newField(Random random, String name, String value, FieldType type) { if (usually(random)) { // most of the time, don't modify the params - return new org.apache.lucene.document2.Field(name, type, value); + return new org.apache.lucene.document.Field(name, type, value); } FieldType newType = new FieldType(type); @@ -1095,7 +1095,7 @@ } */ - return new org.apache.lucene.document2.Field(name, newType, value); + return new org.apache.lucene.document.Field(name, newType, value); } /** return a random Locale from the available locales on the system */ Index: lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java (revision 1153521) +++ lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java (working copy) @@ -34,7 +34,7 @@ import java.util.zip.ZipEntry; import java.util.zip.ZipFile; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.ConcurrentMergeScheduler; import org.apache.lucene.index.FieldInfos; Index: lucene/src/test/org/apache/lucene/TestDemo.java =================================================================== --- lucene/src/test/org/apache/lucene/TestDemo.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/TestDemo.java (working copy) @@ -21,9 +21,9 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.Term; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.queryParser.ParseException; @@ -72,7 +72,7 @@ assertEquals(1, hits.totalHits); // Iterate through the results: for (int i = 0; i < hits.scoreDocs.length; i++) { - Document hitDoc = isearcher.doc2(hits.scoreDocs[i].doc); + Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc); assertEquals(text, hitDoc.get("fieldname")); } Index: lucene/src/test/org/apache/lucene/TestExternalCodecs.java =================================================================== --- lucene/src/test/org/apache/lucene/TestExternalCodecs.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/TestExternalCodecs.java (working copy) @@ -20,7 +20,7 @@ import org.apache.lucene.util.*; import org.apache.lucene.util.Bits; import org.apache.lucene.index.*; -import org.apache.lucene.document2.*; +import org.apache.lucene.document.*; import org.apache.lucene.search.*; import org.apache.lucene.analysis.*; import org.apache.lucene.index.codecs.*; Index: lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java =================================================================== --- lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java (working copy) @@ -31,9 +31,9 @@ import org.apache.lucene.index.MergeScheduler; import org.apache.lucene.index.MergePolicy.OneMerge; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; /** * Holds tests cases to verify external APIs are accessible Index: lucene/src/test/org/apache/lucene/TestSearch.java =================================================================== --- lucene/src/test/org/apache/lucene/TestSearch.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/TestSearch.java (working copy) @@ -26,7 +26,7 @@ import junit.textui.TestRunner; import org.apache.lucene.store.*; -import org.apache.lucene.document2.*; +import org.apache.lucene.document.*; import org.apache.lucene.analysis.*; import org.apache.lucene.index.*; import org.apache.lucene.search.*; @@ -127,7 +127,7 @@ out.println(hits.length + " total results"); for (int i = 0 ; i < hits.length && i < 10; i++) { - Document d = searcher.doc2(hits[i].doc); + Document d = searcher.doc(hits[i].doc); out.println(i + " " + hits[i].score + " " + d.get("contents")); } } Index: lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java =================================================================== --- lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java (working copy) @@ -23,7 +23,7 @@ import java.util.Random; import org.apache.lucene.store.*; -import org.apache.lucene.document2.*; +import org.apache.lucene.document.*; import org.apache.lucene.analysis.*; import org.apache.lucene.index.*; import org.apache.lucene.search.*; @@ -142,7 +142,7 @@ out.println(hits.length + " total results\n"); for (int i = 0 ; i < hits.length; i++) { if ( i < 10 || (i > 94 && i < 105) ) { - Document d = searcher.doc2(hits[i].doc); + Document d = searcher.doc(hits[i].doc); out.println(i + " " + d.get(ID_FIELD)); } } @@ -152,7 +152,7 @@ assertEquals("total results", expectedCount, hits.length); for (int i = 0 ; i < hits.length; i++) { if (i < 10 || (i > 94 && i < 105) ) { - Document d = searcher.doc2(hits[i].doc); + Document d = searcher.doc(hits[i].doc); assertEquals("check " + i, String.valueOf(i), d.get(ID_FIELD)); } } Index: lucene/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/analysis/TestCachingTokenFilter.java (working copy) @@ -22,8 +22,8 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.DocsAndPositionsEnum; Index: lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java =================================================================== --- lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/document/TestBinaryDocument.java (working copy) @@ -1,7 +1,12 @@ -package org.apache.lucene.document2; +package org.apache.lucene.document; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.document.BinaryField; +import org.apache.lucene.document.CompressionTools; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.RandomIndexWriter; @@ -56,7 +61,7 @@ /** open a reader and fetch the document */ IndexReader reader = writer.getReader(); - Document docFromReader = reader.document2(0); + Document docFromReader = reader.document(0); assertTrue(docFromReader != null); /** fetch the binary stored field and compare it's content with the original one */ @@ -95,7 +100,7 @@ /** open a reader and fetch the document */ IndexReader reader = writer.getReader(); - Document docFromReader = reader.document2(0); + Document docFromReader = reader.document(0); assertTrue(docFromReader != null); /** fetch the binary compressed field and compare it's content with the original one */ Index: lucene/src/test/org/apache/lucene/document/TestDateTools.java =================================================================== --- lucene/src/test/org/apache/lucene/document/TestDateTools.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/document/TestDateTools.java (working copy) @@ -1,4 +1,4 @@ -package org.apache.lucene.document2; +package org.apache.lucene.document; import java.text.ParseException; import java.text.SimpleDateFormat; @@ -8,6 +8,7 @@ import java.util.TimeZone; import java.util.Locale; +import org.apache.lucene.document.DateTools; import org.apache.lucene.util.LuceneTestCase; /** Index: lucene/src/test/org/apache/lucene/document/TestDocument.java =================================================================== --- lucene/src/test/org/apache/lucene/document/TestDocument.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/document/TestDocument.java (working copy) @@ -1,5 +1,11 @@ -package org.apache.lucene.document2; +package org.apache.lucene.document; +import org.apache.lucene.document.BinaryField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.RandomIndexWriter; @@ -171,7 +177,7 @@ ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - doAssert(searcher.doc2(hits[0].doc), true); + doAssert(searcher.doc(hits[0].doc), true); writer.close(); searcher.close(); reader.close(); @@ -249,7 +255,7 @@ assertEquals(3, hits.length); int result = 0; for (int i = 0; i < 3; i++) { - Document doc2 = searcher.doc2(hits[i].doc); + Document doc2 = searcher.doc(hits[i].doc); Field f = (Field) doc2.getField("id"); if (f.stringValue().equals("id1")) result |= 1; else if (f.stringValue().equals("id2")) result |= 2; Index: lucene/src/test/org/apache/lucene/document2/TestBinaryDocument.java =================================================================== --- lucene/src/test/org/apache/lucene/document2/TestBinaryDocument.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/document2/TestBinaryDocument.java (working copy) @@ -1,110 +0,0 @@ -package org.apache.lucene.document2; - -import org.apache.lucene.util.LuceneTestCase; - -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.store.Directory; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Tests {@link Document} class. - */ -public class TestBinaryDocument extends LuceneTestCase { - - String binaryValStored = "this text will be stored as a byte array in the index"; - String binaryValCompressed = "this text will be also stored and compressed as a byte array in the index"; - - public void testBinaryFieldInIndex() - throws Exception - { - FieldType ft = new FieldType(); - ft.setStored(true); - IndexableField binaryFldStored = new BinaryField("binaryStored", binaryValStored.getBytes()); - IndexableField stringFldStored = new Field("stringStored", ft, binaryValStored); - - Document doc = new Document(); - - doc.add(binaryFldStored); - - doc.add(stringFldStored); - - /** test for field count */ - assertEquals(2, doc.fields.size()); - - /** add the doc to a ram index */ - Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); - writer.addDocument(doc); - - /** open a reader and fetch the document */ - IndexReader reader = writer.getReader(); - Document docFromReader = reader.document2(0); - assertTrue(docFromReader != null); - - /** fetch the binary stored field and compare it's content with the original one */ - String binaryFldStoredTest = new String(docFromReader.getBinaryValue("binaryStored")); - assertTrue(binaryFldStoredTest.equals(binaryValStored)); - - /** fetch the string field and compare it's content with the original one */ - String stringFldStoredTest = docFromReader.get("stringStored"); - assertTrue(stringFldStoredTest.equals(binaryValStored)); - - writer.close(); - reader.close(); - - reader = IndexReader.open(dir, false); - /** delete the document from index */ - reader.deleteDocument(0); - assertEquals(0, reader.numDocs()); - - reader.close(); - dir.close(); - } - - public void testCompressionTools() throws Exception { - IndexableField binaryFldCompressed = new BinaryField("binaryCompressed", CompressionTools.compress(binaryValCompressed.getBytes())); - IndexableField stringFldCompressed = new BinaryField("stringCompressed", CompressionTools.compressString(binaryValCompressed)); - - Document doc = new Document(); - - doc.add(binaryFldCompressed); - doc.add(stringFldCompressed); - - /** add the doc to a ram index */ - Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); - writer.addDocument(doc); - - /** open a reader and fetch the document */ - IndexReader reader = writer.getReader(); - Document docFromReader = reader.document2(0); - assertTrue(docFromReader != null); - - /** fetch the binary compressed field and compare it's content with the original one */ - String binaryFldCompressedTest = new String(CompressionTools.decompress(docFromReader.getBinaryValue("binaryCompressed"))); - assertTrue(binaryFldCompressedTest.equals(binaryValCompressed)); - assertTrue(CompressionTools.decompressString(docFromReader.getBinaryValue("stringCompressed")).equals(binaryValCompressed)); - - writer.close(); - reader.close(); - dir.close(); - } -} Index: lucene/src/test/org/apache/lucene/document2/TestDateTools.java =================================================================== --- lucene/src/test/org/apache/lucene/document2/TestDateTools.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/document2/TestDateTools.java (working copy) @@ -1,199 +0,0 @@ -package org.apache.lucene.document2; - -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.Calendar; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.TimeZone; -import java.util.Locale; - -import org.apache.lucene.util.LuceneTestCase; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -public class TestDateTools extends LuceneTestCase { - - public void testStringToDate() throws ParseException { - - Date d = null; - d = DateTools.stringToDate("2004"); - assertEquals("2004-01-01 00:00:00:000", isoFormat(d)); - d = DateTools.stringToDate("20040705"); - assertEquals("2004-07-05 00:00:00:000", isoFormat(d)); - d = DateTools.stringToDate("200407050910"); - assertEquals("2004-07-05 09:10:00:000", isoFormat(d)); - d = DateTools.stringToDate("20040705091055990"); - assertEquals("2004-07-05 09:10:55:990", isoFormat(d)); - - try { - d = DateTools.stringToDate("97"); // no date - fail(); - } catch(ParseException e) { /* expected exception */ } - try { - d = DateTools.stringToDate("200401011235009999"); // no date - fail(); - } catch(ParseException e) { /* expected exception */ } - try { - d = DateTools.stringToDate("aaaa"); // no date - fail(); - } catch(ParseException e) { /* expected exception */ } - - } - - public void testStringtoTime() throws ParseException { - long time = DateTools.stringToTime("197001010000"); - Calendar cal = new GregorianCalendar(); - cal.clear(); - cal.set(1970, 0, 1, // year=1970, month=january, day=1 - 0, 0, 0); // hour, minute, second - cal.set(Calendar.MILLISECOND, 0); - cal.setTimeZone(TimeZone.getTimeZone("GMT")); - assertEquals(cal.getTime().getTime(), time); - cal.set(1980, 1, 2, // year=1980, month=february, day=2 - 11, 5, 0); // hour, minute, second - cal.set(Calendar.MILLISECOND, 0); - time = DateTools.stringToTime("198002021105"); - assertEquals(cal.getTime().getTime(), time); - } - - public void testDateAndTimetoString() throws ParseException { - Calendar cal = new GregorianCalendar(); - cal.clear(); - cal.setTimeZone(TimeZone.getTimeZone("GMT")); - cal.set(2004, 1, 3, // year=2004, month=february(!), day=3 - 22, 8, 56); // hour, minute, second - cal.set(Calendar.MILLISECOND, 333); - - String dateString; - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.YEAR); - assertEquals("2004", dateString); - assertEquals("2004-01-01 00:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MONTH); - assertEquals("200402", dateString); - assertEquals("2004-02-01 00:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.DAY); - assertEquals("20040203", dateString); - assertEquals("2004-02-03 00:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.HOUR); - assertEquals("2004020322", dateString); - assertEquals("2004-02-03 22:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MINUTE); - assertEquals("200402032208", dateString); - assertEquals("2004-02-03 22:08:00:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.SECOND); - assertEquals("20040203220856", dateString); - assertEquals("2004-02-03 22:08:56:000", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MILLISECOND); - assertEquals("20040203220856333", dateString); - assertEquals("2004-02-03 22:08:56:333", isoFormat(DateTools.stringToDate(dateString))); - - // date before 1970: - cal.set(1961, 2, 5, // year=1961, month=march(!), day=5 - 23, 9, 51); // hour, minute, second - cal.set(Calendar.MILLISECOND, 444); - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.MILLISECOND); - assertEquals("19610305230951444", dateString); - assertEquals("1961-03-05 23:09:51:444", isoFormat(DateTools.stringToDate(dateString))); - - dateString = DateTools.dateToString(cal.getTime(), DateTools.Resolution.HOUR); - assertEquals("1961030523", dateString); - assertEquals("1961-03-05 23:00:00:000", isoFormat(DateTools.stringToDate(dateString))); - - // timeToString: - cal.set(1970, 0, 1, // year=1970, month=january, day=1 - 0, 0, 0); // hour, minute, second - cal.set(Calendar.MILLISECOND, 0); - dateString = DateTools.timeToString(cal.getTime().getTime(), - DateTools.Resolution.MILLISECOND); - assertEquals("19700101000000000", dateString); - - cal.set(1970, 0, 1, // year=1970, month=january, day=1 - 1, 2, 3); // hour, minute, second - cal.set(Calendar.MILLISECOND, 0); - dateString = DateTools.timeToString(cal.getTime().getTime(), - DateTools.Resolution.MILLISECOND); - assertEquals("19700101010203000", dateString); - } - - public void testRound() { - Calendar cal = new GregorianCalendar(); - cal.clear(); - cal.setTimeZone(TimeZone.getTimeZone("GMT")); - cal.set(2004, 1, 3, // year=2004, month=february(!), day=3 - 22, 8, 56); // hour, minute, second - cal.set(Calendar.MILLISECOND, 333); - Date date = cal.getTime(); - assertEquals("2004-02-03 22:08:56:333", isoFormat(date)); - - Date dateYear = DateTools.round(date, DateTools.Resolution.YEAR); - assertEquals("2004-01-01 00:00:00:000", isoFormat(dateYear)); - - Date dateMonth = DateTools.round(date, DateTools.Resolution.MONTH); - assertEquals("2004-02-01 00:00:00:000", isoFormat(dateMonth)); - - Date dateDay = DateTools.round(date, DateTools.Resolution.DAY); - assertEquals("2004-02-03 00:00:00:000", isoFormat(dateDay)); - - Date dateHour = DateTools.round(date, DateTools.Resolution.HOUR); - assertEquals("2004-02-03 22:00:00:000", isoFormat(dateHour)); - - Date dateMinute = DateTools.round(date, DateTools.Resolution.MINUTE); - assertEquals("2004-02-03 22:08:00:000", isoFormat(dateMinute)); - - Date dateSecond = DateTools.round(date, DateTools.Resolution.SECOND); - assertEquals("2004-02-03 22:08:56:000", isoFormat(dateSecond)); - - Date dateMillisecond = DateTools.round(date, DateTools.Resolution.MILLISECOND); - assertEquals("2004-02-03 22:08:56:333", isoFormat(dateMillisecond)); - - // long parameter: - long dateYearLong = DateTools.round(date.getTime(), DateTools.Resolution.YEAR); - assertEquals("2004-01-01 00:00:00:000", isoFormat(new Date(dateYearLong))); - - long dateMillisecondLong = DateTools.round(date.getTime(), DateTools.Resolution.MILLISECOND); - assertEquals("2004-02-03 22:08:56:333", isoFormat(new Date(dateMillisecondLong))); - } - - private String isoFormat(Date date) { - SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss:SSS", Locale.US); - sdf.setTimeZone(TimeZone.getTimeZone("GMT")); - return sdf.format(date); - } - - public void testDateToolsUTC() throws Exception { - // Sun, 30 Oct 2005 00:00:00 +0000 -- the last second of 2005's DST in Europe/London - long time = 1130630400; - try { - TimeZone.setDefault(TimeZone.getTimeZone(/* "GMT" */ "Europe/London")); - String d1 = DateTools.dateToString(new Date(time*1000), DateTools.Resolution.MINUTE); - String d2 = DateTools.dateToString(new Date((time+3600)*1000), DateTools.Resolution.MINUTE); - assertFalse("different times", d1.equals(d2)); - assertEquals("midnight", DateTools.stringToTime(d1), time*1000); - assertEquals("later", DateTools.stringToTime(d2), (time+3600)*1000); - } finally { - TimeZone.setDefault(null); - } - } - -} \ No newline at end of file Index: lucene/src/test/org/apache/lucene/document2/TestDocument.java =================================================================== --- lucene/src/test/org/apache/lucene/document2/TestDocument.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/document2/TestDocument.java (working copy) @@ -1,282 +0,0 @@ -package org.apache.lucene.document2; - -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.store.Directory; -import org.apache.lucene.util.LuceneTestCase; - -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Tests {@link Document} class. - */ -public class TestDocument extends LuceneTestCase { - - String binaryVal = "this text will be stored as a byte array in the index"; - String binaryVal2 = "this text will be also stored as a byte array in the index"; - - public void testBinaryField() throws Exception { - Document doc = new Document(); - - FieldType ft = new FieldType(); - ft.setStored(true); - IndexableField stringFld = new Field("string", ft, binaryVal); - IndexableField binaryFld = new BinaryField("binary", binaryVal.getBytes()); - IndexableField binaryFld2 = new BinaryField("binary", binaryVal2.getBytes()); - - doc.add(stringFld); - doc.add(binaryFld); - - assertEquals(2, doc.fields.size()); - - assertTrue(binaryFld.binaryValue(null) != null); - assertTrue(binaryFld.stored()); - assertFalse(binaryFld.indexed()); - assertFalse(binaryFld.tokenized()); - - String binaryTest = new String(doc.getBinaryValue("binary")); - assertTrue(binaryTest.equals(binaryVal)); - - String stringTest = doc.get("string"); - assertTrue(binaryTest.equals(stringTest)); - - doc.add(binaryFld2); - - assertEquals(3, doc.fields.size()); - - byte[][] binaryTests = doc.getBinaryValues("binary"); - - assertEquals(2, binaryTests.length); - - binaryTest = new String(binaryTests[0]); - String binaryTest2 = new String(binaryTests[1]); - - assertFalse(binaryTest.equals(binaryTest2)); - - assertTrue(binaryTest.equals(binaryVal)); - assertTrue(binaryTest2.equals(binaryVal2)); - - doc.removeField("string"); - assertEquals(2, doc.fields.size()); - - doc.removeFields("binary"); - assertEquals(0, doc.fields.size()); - } - - /** - * Tests {@link Document#removeField(String)} method for a brand new Document - * that has not been indexed yet. - * - * @throws Exception on error - */ - public void testRemoveForNewDocument() throws Exception { - Document doc = makeDocumentWithFields(); - assertEquals(8, doc.fields.size()); - doc.removeFields("keyword"); - assertEquals(6, doc.fields.size()); - doc.removeFields("doesnotexists"); // removing non-existing fields is - // siltenlty ignored - doc.removeFields("keyword"); // removing a field more than once - assertEquals(6, doc.fields.size()); - doc.removeField("text"); - assertEquals(5, doc.fields.size()); - doc.removeField("text"); - assertEquals(4, doc.fields.size()); - doc.removeField("text"); - assertEquals(4, doc.fields.size()); - doc.removeField("doesnotexists"); // removing non-existing fields is - // siltenlty ignored - assertEquals(4, doc.fields.size()); - doc.removeFields("unindexed"); - assertEquals(2, doc.fields.size()); - doc.removeFields("unstored"); - assertEquals(0, doc.fields.size()); - doc.removeFields("doesnotexists"); // removing non-existing fields is - // siltenlty ignored - assertEquals(0, doc.fields.size()); - } - - public void testConstructorExceptions() { - FieldType ft = new FieldType(); - ft.setStored(true); - new Field("name", ft, "value"); // okay - new StringField("name", "value"); // okay - try { - new Field("name", new FieldType(), "value"); - fail(); - } catch (IllegalArgumentException e) { - // expected exception - } - new Field("name", ft, "value"); // okay - try { - FieldType ft2 = new FieldType(); - ft2.setStored(true); - ft2.setStoreTermVectors(true); - new Field("name", ft2, "value"); - fail(); - } catch (IllegalArgumentException e) { - // expected exception - } - } - - /** - * Tests {@link Document#getValues(String)} method for a brand new Document - * that has not been indexed yet. - * - * @throws Exception on error - */ - public void testGetValuesForNewDocument() throws Exception { - doAssert(makeDocumentWithFields(), false); - } - - /** - * Tests {@link Document#getValues(String)} method for a Document retrieved - * from an index. - * - * @throws Exception on error - */ - public void testGetValuesForIndexedDocument() throws Exception { - Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); - writer.addDocument(makeDocumentWithFields()); - IndexReader reader = writer.getReader(); - - IndexSearcher searcher = newSearcher(reader); - - // search for something that does exists - Query query = new TermQuery(new Term("keyword", "test1")); - - // ensure that queries return expected results without DateFilter first - ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; - assertEquals(1, hits.length); - - doAssert(searcher.doc2(hits[0].doc), true); - writer.close(); - searcher.close(); - reader.close(); - dir.close(); - } - - private Document makeDocumentWithFields() { - Document doc = new Document(); - FieldType stored = new FieldType(); - stored.setStored(true); - doc.add(new Field("keyword", StringField.TYPE_STORED, "test1")); - doc.add(new Field("keyword", StringField.TYPE_STORED, "test2")); - doc.add(new Field("text", TextField.TYPE_STORED, "test1")); - doc.add(new Field("text", TextField.TYPE_STORED, "test2")); - doc.add(new Field("unindexed", stored, "test1")); - doc.add(new Field("unindexed", stored, "test2")); - doc - .add(new TextField("unstored", "test1")); - doc - .add(new TextField("unstored", "test2")); - return doc; - } - - private void doAssert(Document doc, boolean fromIndex) { - IndexableField[] keywordFieldValues = doc.getFields("keyword"); - IndexableField[] textFieldValues = doc.getFields("text"); - IndexableField[] unindexedFieldValues = doc.getFields("unindexed"); - IndexableField[] unstoredFieldValues = doc.getFields("unstored"); - - assertTrue(keywordFieldValues.length == 2); - assertTrue(textFieldValues.length == 2); - assertTrue(unindexedFieldValues.length == 2); - // this test cannot work for documents retrieved from the index - // since unstored fields will obviously not be returned - if (!fromIndex) { - assertTrue(unstoredFieldValues.length == 2); - } - - assertTrue(keywordFieldValues[0].stringValue().equals("test1")); - assertTrue(keywordFieldValues[1].stringValue().equals("test2")); - assertTrue(textFieldValues[0].stringValue().equals("test1")); - assertTrue(textFieldValues[1].stringValue().equals("test2")); - assertTrue(unindexedFieldValues[0].stringValue().equals("test1")); - assertTrue(unindexedFieldValues[1].stringValue().equals("test2")); - // this test cannot work for documents retrieved from the index - // since unstored fields will obviously not be returned - if (!fromIndex) { - assertTrue(unstoredFieldValues[0].stringValue().equals("test1")); - assertTrue(unstoredFieldValues[1].stringValue().equals("test2")); - } - } - - public void testFieldSetValue() throws Exception { - - Field field = new Field("id", StringField.TYPE_STORED, "id1"); - Document doc = new Document(); - doc.add(field); - doc.add(new Field("keyword", StringField.TYPE_STORED, "test")); - - Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random, dir); - writer.addDocument(doc); - field.setValue("id2"); - writer.addDocument(doc); - field.setValue("id3"); - writer.addDocument(doc); - - IndexReader reader = writer.getReader(); - IndexSearcher searcher = newSearcher(reader); - - Query query = new TermQuery(new Term("keyword", "test")); - - // ensure that queries return expected results without DateFilter first - ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; - assertEquals(3, hits.length); - int result = 0; - for (int i = 0; i < 3; i++) { - Document doc2 = searcher.doc2(hits[i].doc); - Field f = (Field) doc2.getField("id"); - if (f.stringValue().equals("id1")) result |= 1; - else if (f.stringValue().equals("id2")) result |= 2; - else if (f.stringValue().equals("id3")) result |= 4; - else fail("unexpected id field"); - } - writer.close(); - searcher.close(); - reader.close(); - dir.close(); - assertEquals("did not see all IDs", 7, result); - } - - public void testFieldSetValueChangeBinary() { - Field field1 = new BinaryField("field1", new byte[0]); - Field field2 = new Field("field2", TextField.TYPE_STORED, ""); - try { - field1.setValue("abc"); - fail("did not hit expected exception"); - } catch (IllegalArgumentException iae) { - // expected - } - try { - field2.setValue(new byte[0]); - fail("did not hit expected exception"); - } catch (IllegalArgumentException iae) { - // expected - } - } -} Index: lucene/src/test/org/apache/lucene/index/Test2BTerms.java =================================================================== --- lucene/src/test/org/apache/lucene/index/Test2BTerms.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/Test2BTerms.java (working copy) @@ -22,7 +22,7 @@ import org.apache.lucene.search.*; import org.apache.lucene.analysis.*; import org.apache.lucene.analysis.tokenattributes.*; -import org.apache.lucene.document2.*; +import org.apache.lucene.document.*; import org.apache.lucene.index.codecs.CodecProvider; import java.io.File; import java.io.IOException; Index: lucene/src/test/org/apache/lucene/index/TestAddIndexes.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (working copy) @@ -23,11 +23,11 @@ import java.util.List; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.StringField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.index.codecs.mocksep.MockSepCodec; Index: lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestAtomicUpdate.java (working copy) @@ -18,7 +18,7 @@ import org.apache.lucene.util.*; import org.apache.lucene.store.*; -import org.apache.lucene.document2.*; +import org.apache.lucene.document.*; import org.apache.lucene.analysis.MockAnalyzer; import java.util.Random; Index: lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (working copy) @@ -27,11 +27,11 @@ import java.util.Random; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; -import org.apache.lucene.document2.NumericField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.NumericField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.DefaultSimilarity; import org.apache.lucene.search.DocIdSetIterator; @@ -267,7 +267,7 @@ final int hitCount = hits.length; assertEquals("wrong number of hits", expectedCount, hitCount); for(int i=0;i fields = d.getFields(); if (d.getField("content3") == null) { final int numFields = 5; @@ -319,7 +319,7 @@ // First document should be #21 since it's norm was // increased: - Document d = searcher.getIndexReader().document2(hits[0].doc); + Document d = searcher.getIndexReader().document(hits[0].doc); assertEquals("didn't get the right document first", "21", d.get("id")); doTestHits(hits, 34, searcher.getIndexReader()); @@ -365,7 +365,7 @@ // make sure searching sees right # hits IndexSearcher searcher = new IndexSearcher(dir, true); ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; - Document d = searcher.getIndexReader().document2(hits[0].doc); + Document d = searcher.getIndexReader().document(hits[0].doc); assertEquals("wrong first document", "21", d.get("id")); doTestHits(hits, 44, searcher.getIndexReader()); searcher.close(); @@ -384,7 +384,7 @@ searcher = new IndexSearcher(dir, true); hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; assertEquals("wrong number of hits", 43, hits.length); - d = searcher.doc2(hits[0].doc); + d = searcher.doc(hits[0].doc); assertEquals("wrong first document", "22", d.get("id")); doTestHits(hits, 43, searcher.getIndexReader()); searcher.close(); @@ -397,7 +397,7 @@ searcher = new IndexSearcher(dir, true); hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; assertEquals("wrong number of hits", 43, hits.length); - d = searcher.doc2(hits[0].doc); + d = searcher.doc(hits[0].doc); doTestHits(hits, 43, searcher.getIndexReader()); assertEquals("wrong first document", "22", d.get("id")); searcher.close(); @@ -413,7 +413,7 @@ IndexSearcher searcher = new IndexSearcher(dir, true); ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; assertEquals("wrong number of hits", 34, hits.length); - Document d = searcher.doc2(hits[0].doc); + Document d = searcher.doc(hits[0].doc); assertEquals("wrong first document", "21", d.get("id")); searcher.close(); @@ -429,7 +429,7 @@ searcher = new IndexSearcher(dir, true); hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; assertEquals("wrong number of hits", 33, hits.length); - d = searcher.doc2(hits[0].doc); + d = searcher.doc(hits[0].doc); assertEquals("wrong first document", "22", d.get("id")); doTestHits(hits, 33, searcher.getIndexReader()); searcher.close(); @@ -442,7 +442,7 @@ searcher = new IndexSearcher(dir, true); hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; assertEquals("wrong number of hits", 33, hits.length); - d = searcher.doc2(hits[0].doc); + d = searcher.doc(hits[0].doc); assertEquals("wrong first document", "22", d.get("id")); doTestHits(hits, 33, searcher.getIndexReader()); searcher.close(); @@ -683,12 +683,12 @@ for (int id=10; id<15; id++) { ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", 4, Integer.valueOf(id), Integer.valueOf(id), true, true), 100).scoreDocs; assertEquals("wrong number of hits", 1, hits.length); - Document d = searcher.doc2(hits[0].doc); + Document d = searcher.doc(hits[0].doc); assertEquals(String.valueOf(id), d.get("id")); hits = searcher.search(NumericRangeQuery.newLongRange("trieLong", 4, Long.valueOf(id), Long.valueOf(id), true, true), 100).scoreDocs; assertEquals("wrong number of hits", 1, hits.length); - d = searcher.doc2(hits[0].doc); + d = searcher.doc(hits[0].doc); assertEquals(String.valueOf(id), d.get("id")); } Index: lucene/src/test/org/apache/lucene/index/TestBinaryTerms.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestBinaryTerms.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestBinaryTerms.java (working copy) @@ -19,10 +19,10 @@ import java.io.IOException; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; @@ -67,7 +67,7 @@ bytes.length = 2; TopDocs docs = is.search(new TermQuery(new Term("bytes", bytes)), 5); assertEquals(1, docs.totalHits); - assertEquals("" + i, is.doc2(docs.scoreDocs[0].doc).get("id")); + assertEquals("" + i, is.doc(docs.scoreDocs[0].doc).get("id")); } is.close(); Index: lucene/src/test/org/apache/lucene/index/TestCheckIndex.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestCheckIndex.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestCheckIndex.java (working copy) @@ -26,10 +26,10 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.store.Directory; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.util.Constants; public class TestCheckIndex extends LuceneTestCase { Index: lucene/src/test/org/apache/lucene/index/TestCodecs.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestCodecs.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestCodecs.java (working copy) @@ -22,10 +22,10 @@ import java.util.HashSet; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.index.codecs.FieldsConsumer; import org.apache.lucene.index.codecs.FieldsProducer; Index: lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (working copy) @@ -19,10 +19,10 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.util.LuceneTestCase; Index: lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java (working copy) @@ -20,11 +20,11 @@ import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.BinaryField; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.BinaryField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; import org.junit.Test; Index: lucene/src/test/org/apache/lucene/index/TestCrash.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestCrash.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestCrash.java (working copy) @@ -24,9 +24,9 @@ import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.NoLockFactory; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; public class TestCrash extends LuceneTestCase { Index: lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java (working copy) @@ -24,9 +24,9 @@ import java.util.Collection; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; Index: lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java (working copy) @@ -20,10 +20,10 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; @@ -81,10 +81,10 @@ sis.read(dir); IndexReader reader = openReader(); assertTrue(reader != null); - Document newDoc1 = reader.document2(0); + Document newDoc1 = reader.document(0); assertTrue(newDoc1 != null); assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size()); - Document newDoc2 = reader.document2(1); + Document newDoc2 = reader.document(1); assertTrue(newDoc2 != null); assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); TermFreqVector vector = reader.getTermFreqVector(0, DocHelper.TEXT_FIELD_2_KEY); Index: lucene/src/test/org/apache/lucene/index/TestDoc.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDoc.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestDoc.java (working copy) @@ -30,9 +30,9 @@ import junit.textui.TestRunner; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.Directory; @@ -224,7 +224,7 @@ SegmentReader reader = SegmentReader.get(true, si, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR); for (int i = 0; i < reader.numDocs(); i++) - out.println(reader.document2(i)); + out.println(reader.document(i)); FieldsEnum fis = reader.fields().iterator(); String field = fis.next(); Index: lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestDocTermOrds.java (working copy) @@ -25,11 +25,11 @@ import java.util.Set; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.NumericField; -import org.apache.lucene.document2.StringField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.DocTermOrds.TermOrdsIterator; import org.apache.lucene.index.codecs.BlockTermsReader; import org.apache.lucene.index.codecs.BlockTermsWriter; Index: lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java (working copy) @@ -21,9 +21,9 @@ import java.util.Arrays; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexReader.ReaderContext; import org.apache.lucene.store.Directory; Index: lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (working copy) @@ -28,10 +28,10 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.store.Directory; import org.apache.lucene.util.AttributeSource; import org.apache.lucene.util.BytesRef; @@ -68,7 +68,7 @@ //After adding the document, we should be able to read it back in SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR); assertTrue(reader != null); - Document doc = reader.document2(0); + Document doc = reader.document(0); assertTrue(doc != null); //System.out.println("Document: " + doc); Index: lucene/src/test/org/apache/lucene/index/TestFieldInfos.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestFieldInfos.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestFieldInfos.java (working copy) @@ -19,7 +19,7 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IndexOutput; Index: lucene/src/test/org/apache/lucene/index/TestFieldsReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (working copy) @@ -22,9 +22,9 @@ import java.util.*; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.NumericField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.FieldCache; import org.apache.lucene.store.BufferedIndexInput; @@ -38,7 +38,7 @@ public class TestFieldsReader extends LuceneTestCase { private static Directory dir; - private static org.apache.lucene.document2.Document testDoc = new org.apache.lucene.document2.Document(); + private static Document testDoc = new Document(); private static FieldInfos fieldInfos = null; @BeforeClass @@ -67,7 +67,7 @@ assertTrue(dir != null); assertTrue(fieldInfos != null); IndexReader reader = IndexReader.open(dir); - Document doc = reader.document2(0); + Document doc = reader.document(0); assertTrue(doc != null); assertTrue(doc.getField(DocHelper.TEXT_FIELD_1_KEY) != null); @@ -90,7 +90,7 @@ assertTrue(field.storeTermVectorOffsets() == false); assertTrue(field.storeTermVectorPositions() == false); - Document2StoredFieldVisitor visitor = new Document2StoredFieldVisitor(DocHelper.TEXT_FIELD_3_KEY); + DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(DocHelper.TEXT_FIELD_3_KEY); reader.document(0, visitor); final List fields = visitor.getDocument().getFields(); assertEquals(1, fields.size()); @@ -203,13 +203,13 @@ for(int i=0;i<2;i++) { try { - reader.document2(i); + reader.document(i); } catch (IOException ioe) { // expected exc = true; } try { - reader.document2(i); + reader.document(i); } catch (IOException ioe) { // expected exc = true; @@ -231,8 +231,8 @@ final Number[] answers = new Number[numDocs]; final NumericField.DataType[] typeAnswers = new NumericField.DataType[numDocs]; for(int id=0;id fieldable1 = doc1.getFields(); List fieldable2 = doc2.getFields(); assertEquals("Different numbers of fields for doc " + i + ".", fieldable1.size(), fieldable2.size()); Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java (working copy) @@ -21,9 +21,9 @@ import org.apache.lucene.search.DefaultSimilarity; import org.apache.lucene.search.Similarity; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.LuceneTestCase; Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (working copy) @@ -24,11 +24,11 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.StringField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.SegmentNorms; import org.apache.lucene.search.DefaultSimilarity; Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderDelete.java (working copy) @@ -20,9 +20,9 @@ import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java (working copy) @@ -20,9 +20,9 @@ import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.search.DefaultSimilarity; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (working copy) @@ -30,11 +30,11 @@ import java.util.Set; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.StringField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.DefaultSimilarity; import org.apache.lucene.search.FieldCache; @@ -188,7 +188,7 @@ if (i>0) { int k = i-1; int n = j + k*M; - Document prevItereationDoc = reader.document2(n); + Document prevItereationDoc = reader.document(n); assertNotNull(prevItereationDoc); String id = prevItereationDoc.get("id"); assertEquals(k+"_"+j, id); @@ -777,7 +777,7 @@ new TermQuery(new Term("field1", "a" + rnd.nextInt(refreshed.maxDoc()))), null, 1000).scoreDocs; if (hits.length > 0) { - searcher.doc2(hits[0].doc); + searcher.doc(hits[0].doc); } searcher.close(); if (refreshed != r) { @@ -1105,7 +1105,7 @@ assertTrue(r1 != r3); r1.close(); try { - r1.document2(2); + r1.document(2); fail("did not hit exception"); } catch (AlreadyClosedException ace) { // expected Index: lucene/src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -42,11 +42,11 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.document2.BinaryField; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.BinaryField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldCache; @@ -1011,7 +1011,7 @@ w.close(); IndexReader ir = IndexReader.open(dir, true); - Document doc2 = ir.document2(0); + Document doc2 = ir.document(0); IndexableField f2 = doc2.getField("binary"); b = f2.binaryValue(null).bytes; assertTrue(b != null); @@ -1282,20 +1282,20 @@ w.close(); IndexReader ir = IndexReader.open(dir, true); - Document doc2 = ir.document2(0); + Document doc2 = ir.document(0); IndexableField f3 = doc2.getField("binary"); b = f3.binaryValue(null).bytes; assertTrue(b != null); assertEquals(17, b.length, 17); assertEquals(87, b[0]); - assertTrue(ir.document2(0).getField("binary").binaryValue(null)!=null); - assertTrue(ir.document2(1).getField("binary").binaryValue(null)!=null); - assertTrue(ir.document2(2).getField("binary").binaryValue(null)!=null); + assertTrue(ir.document(0).getField("binary").binaryValue(null)!=null); + assertTrue(ir.document(1).getField("binary").binaryValue(null)!=null); + assertTrue(ir.document(2).getField("binary").binaryValue(null)!=null); - assertEquals("value", ir.document2(0).get("string")); - assertEquals("value", ir.document2(1).get("string")); - assertEquals("value", ir.document2(2).get("string")); + assertEquals("value", ir.document(0).get("string")); + assertEquals("value", ir.document(1).get("string")); + assertEquals("value", ir.document(2).get("string")); // test that the terms were indexed. @@ -1324,7 +1324,7 @@ doc.add(newField("zzz", "1 2 3", customType)); w.addDocument(doc); IndexReader r = w.getReader(); - Document doc2 = r.document2(0); + Document doc2 = r.document(0); Iterator it = doc2.getFields().iterator(); assertTrue(it.hasNext()); Field f = (Field) it.next(); @@ -1668,7 +1668,7 @@ } TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1); assertEquals(1, hits.totalHits); - Document doc = r.document2(hits.scoreDocs[0].doc); + Document doc = r.document(hits.scoreDocs[0].doc); Document docExp = docs.get(testID); for(int i=0;i= 1); - Document result = reader.document2(0); + Document result = reader.document(0); assertTrue(result != null); //There are 2 unstored fields on the document that are not preserved across writing assertTrue(DocHelper.numFields(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size()); Index: lucene/src/test/org/apache/lucene/index/TestSegmentTermDocs.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestSegmentTermDocs.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestSegmentTermDocs.java (working copy) @@ -20,8 +20,8 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.store.Directory; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.util.BytesRef; import java.io.IOException; Index: lucene/src/test/org/apache/lucene/index/TestSegmentTermEnum.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestSegmentTermEnum.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestSegmentTermEnum.java (working copy) @@ -23,8 +23,8 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util._TestUtil; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; Index: lucene/src/test/org/apache/lucene/index/TestSizeBoundedOptimize.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestSizeBoundedOptimize.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestSizeBoundedOptimize.java (working copy) @@ -19,7 +19,7 @@ import java.io.IOException; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; Index: lucene/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestSnapshotDeletionPolicy.java (working copy) @@ -22,9 +22,9 @@ import java.util.Random; import java.io.IOException; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IndexInput; import org.apache.lucene.analysis.MockAnalyzer; Index: lucene/src/test/org/apache/lucene/index/TestStressAdvance.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestStressAdvance.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/index/TestStressAdvance.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.util.*; import org.apache.lucene.store.*; -import org.apache.lucene.document2.*; +import org.apache.lucene.document.*; public class TestStressAdvance extends LuceneTestCase { @@ -62,7 +62,7 @@ final IndexReader r = w.getReader(); final int[] idToDocID = new int[r.maxDoc()]; for(int docID=0;docID? " + score1, score > score1); @@ -394,10 +394,10 @@ float score2 = h[2].score; float score3 = h[3].score; - String doc0 = s.doc2(h[0].doc).get("id"); - String doc1 = s.doc2(h[1].doc).get("id"); - String doc2 = s.doc2(h[2].doc).get("id"); - String doc3 = s.doc2(h[3].doc).get("id"); + String doc0 = s.doc(h[0].doc).get("id"); + String doc1 = s.doc(h[1].doc).get("id"); + String doc2 = s.doc(h[2].doc).get("id"); + String doc3 = s.doc(h[3].doc).get("id"); assertTrue("doc0 should be d2 or d4: " + doc0, doc0.equals("d2") || doc0.equals("d4")); @@ -448,10 +448,10 @@ float score2 = h[2].score; float score3 = h[3].score; - String doc0 = s.doc2(h[0].doc).get("id"); - String doc1 = s.doc2(h[1].doc).get("id"); - String doc2 = s.doc2(h[2].doc).get("id"); - String doc3 = s.doc2(h[3].doc).get("id"); + String doc0 = s.doc(h[0].doc).get("id"); + String doc1 = s.doc(h[1].doc).get("id"); + String doc2 = s.doc(h[2].doc).get("id"); + String doc3 = s.doc(h[3].doc).get("id"); assertEquals("doc0 should be d4: ", "d4", doc0); assertEquals("doc1 should be d3: ", "d3", doc1); @@ -491,7 +491,7 @@ DecimalFormat f = new DecimalFormat("0.000000000"); for (int i = 0; i < h.length; i++) { - Document d = searcher.doc2(h[i].doc); + Document d = searcher.doc(h[i].doc); float score = h[i].score; System.err .println("#" + i + ": " + f.format(score) + " - " + d.get("id")); Index: lucene/src/test/org/apache/lucene/search/TestDocBoost.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestDocBoost.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestDocBoost.java (working copy) @@ -20,7 +20,7 @@ import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.*; +import org.apache.lucene.document.*; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; Index: lucene/src/test/org/apache/lucene/search/TestDocIdSet.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestDocIdSet.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestDocIdSet.java (working copy) @@ -24,9 +24,9 @@ import junit.framework.Assert; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.StringField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.RandomIndexWriter; Index: lucene/src/test/org/apache/lucene/search/TestElevationComparator.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestElevationComparator.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestElevationComparator.java (working copy) @@ -18,9 +18,9 @@ */ import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.*; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.search.FieldValueHitQueue.Entry; Index: lucene/src/test/org/apache/lucene/search/TestExplanations.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestExplanations.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestExplanations.java (working copy) @@ -20,10 +20,10 @@ import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.queryParser.ParseException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.StringField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; Index: lucene/src/test/org/apache/lucene/search/TestFieldCache.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFieldCache.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestFieldCache.java (working copy) @@ -17,9 +17,9 @@ */ import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.RandomIndexWriter; Index: lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestFieldCacheRangeFilter.java (working copy) @@ -24,9 +24,9 @@ import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; import org.apache.lucene.store.Directory; import org.junit.Test; Index: lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestFieldCacheTermsFilter.java (working copy) @@ -19,9 +19,9 @@ import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.store.Directory; Index: lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestFilteredQuery.java (working copy) @@ -20,9 +20,9 @@ import java.util.BitSet; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; Index: lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestFilteredSearch.java (working copy) @@ -21,8 +21,8 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexWriter; Index: lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestFuzzyQuery.java (working copy) @@ -22,9 +22,9 @@ import java.io.IOException; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.RandomIndexWriter; @@ -84,7 +84,7 @@ assertEquals("3 documents should match", 3, hits.length); List order = Arrays.asList("bbbbb","abbbb","aabbb"); for (int i = 0; i < hits.length; i++) { - final String term = searcher.doc2(hits[i].doc).get("field"); + final String term = searcher.doc(hits[i].doc).get("field"); //System.out.println(hits[i].score); assertEquals(order.get(i), term); } @@ -96,7 +96,7 @@ assertEquals("only 2 documents should match", 2, hits.length); order = Arrays.asList("bbbbb","abbbb"); for (int i = 0; i < hits.length; i++) { - final String term = searcher.doc2(hits[i].doc).get("field"); + final String term = searcher.doc(hits[i].doc).get("field"); //System.out.println(hits[i].score); assertEquals(order.get(i), term); } @@ -113,43 +113,43 @@ query = new FuzzyQuery(new Term("field", "aaaaa"), FuzzyQuery.defaultMinSimilarity, 0); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); // default allows for up to two edits: - assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc2(hits[2].doc).get("field"), ("aaabb")); + assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); // query similar to a word in the index: query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 0); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc2(hits[2].doc).get("field"), ("aaabb")); + assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); // now with prefix query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 1); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc2(hits[2].doc).get("field"), ("aaabb")); + assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 2); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc2(hits[2].doc).get("field"), ("aaabb")); + assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 3); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); - assertEquals(searcher.doc2(hits[2].doc).get("field"), ("aaabb")); + assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc(hits[2].doc).get("field"), ("aaabb")); query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 4); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(2, hits.length); - assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaa")); - assertEquals(searcher.doc2(hits[1].doc).get("field"), ("aaaab")); + assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaa")); + assertEquals(searcher.doc(hits[1].doc).get("field"), ("aaaab")); query = new FuzzyQuery(new Term("field", "aaaac"), FuzzyQuery.defaultMinSimilarity, 5); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(0, hits.length); @@ -158,25 +158,25 @@ query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 0); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc2(hits[0].doc).get("field"), ("ddddd")); + assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); // now with prefix query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 1); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc2(hits[0].doc).get("field"), ("ddddd")); + assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 2); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc2(hits[0].doc).get("field"), ("ddddd")); + assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 3); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc2(hits[0].doc).get("field"), ("ddddd")); + assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 4); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc2(hits[0].doc).get("field"), ("ddddd")); + assertEquals(searcher.doc(hits[0].doc).get("field"), ("ddddd")); query = new FuzzyQuery(new Term("field", "ddddX"), FuzzyQuery.defaultMinSimilarity, 5); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(0, hits.length); @@ -212,17 +212,17 @@ query = new FuzzyQuery(new Term("field", "aaaaccc"), 0.5f, 0); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaaaa")); + assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaaaa")); // now with prefix query = new FuzzyQuery(new Term("field", "aaaaccc"), 0.5f, 1); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaaaa")); + assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaaaa")); query = new FuzzyQuery(new Term("field", "aaaaccc"), 0.5f, 4); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(1, hits.length); - assertEquals(searcher.doc2(hits[0].doc).get("field"), ("aaaaaaa")); + assertEquals(searcher.doc(hits[0].doc).get("field"), ("aaaaaaa")); query = new FuzzyQuery(new Term("field", "aaaaccc"), 0.5f, 5); hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(0, hits.length); @@ -377,9 +377,9 @@ ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs; assertEquals(3, hits.length); // normally, 'Lucenne' would be the first result as IDF will skew the score. - assertEquals("Lucene", reader.document2(hits[0].doc).get("field")); - assertEquals("Lucene", reader.document2(hits[1].doc).get("field")); - assertEquals("Lucenne", reader.document2(hits[2].doc).get("field")); + assertEquals("Lucene", reader.document(hits[0].doc).get("field")); + assertEquals("Lucene", reader.document(hits[1].doc).get("field")); + assertEquals("Lucenne", reader.document(hits[2].doc).get("field")); searcher.close(); reader.close(); directory.close(); @@ -417,7 +417,7 @@ IndexSearcher searcher = newSearcher(r); ScoreDoc[] hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("Giga byte", searcher.doc2(hits[0].doc).get("field")); + assertEquals("Giga byte", searcher.doc(hits[0].doc).get("field")); searcher.close(); r.close(); index.close(); @@ -443,28 +443,28 @@ FuzzyQuery q = (FuzzyQuery) qp.parse("fouba~2"); ScoreDoc[] hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("foobar", searcher.doc2(hits[0].doc).get("field")); + assertEquals("foobar", searcher.doc(hits[0].doc).get("field")); q = (FuzzyQuery) qp.parse("foubara~2"); hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("foobar", searcher.doc2(hits[0].doc).get("field")); + assertEquals("foobar", searcher.doc(hits[0].doc).get("field")); q = (FuzzyQuery) qp.parse("t~3"); hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("test", searcher.doc2(hits[0].doc).get("field")); + assertEquals("test", searcher.doc(hits[0].doc).get("field")); q = new FuzzyQuery(new Term("field", "a"), 4f, 0, 50); hits = searcher.search(q, 10).scoreDocs; assertEquals(1, hits.length); - assertEquals("test", searcher.doc2(hits[0].doc).get("field")); + assertEquals("test", searcher.doc(hits[0].doc).get("field")); q = new FuzzyQuery(new Term("field", "a"), 6f, 0, 50); hits = searcher.search(q, 10).scoreDocs; assertEquals(2, hits.length); - assertEquals("test", searcher.doc2(hits[0].doc).get("field")); - assertEquals("foobar", searcher.doc2(hits[1].doc).get("field")); + assertEquals("test", searcher.doc(hits[0].doc).get("field")); + assertEquals("foobar", searcher.doc(hits[1].doc).get("field")); searcher.close(); reader.close(); Index: lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestFuzzyQuery2.java (working copy) @@ -23,9 +23,9 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; Index: lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestMatchAllDocsQuery.java (working copy) @@ -20,9 +20,9 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexReader; @@ -55,9 +55,9 @@ hits = is.search(new MatchAllDocsQuery(), null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals("one", is.doc2(hits[0].doc).get("key")); - assertEquals("two", is.doc2(hits[1].doc).get("key")); - assertEquals("three four", is.doc2(hits[2].doc).get("key")); + assertEquals("one", is.doc(hits[0].doc).get("key")); + assertEquals("two", is.doc(hits[1].doc).get("key")); + assertEquals("three four", is.doc(hits[2].doc).get("key")); // assert with norms scoring turned on @@ -65,9 +65,9 @@ hits = is.search(normsQuery, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals("three four", is.doc2(hits[0].doc).get("key")); - assertEquals("two", is.doc2(hits[1].doc).get("key")); - assertEquals("one", is.doc2(hits[2].doc).get("key")); + assertEquals("three four", is.doc(hits[0].doc).get("key")); + assertEquals("two", is.doc(hits[1].doc).get("key")); + assertEquals("one", is.doc(hits[2].doc).get("key")); // change norm & retest is.getIndexReader().setNorm(0, "key", is.getSimilarityProvider().get("key").encodeNormValue(400f)); @@ -75,9 +75,9 @@ hits = is.search(normsQuery, null, 1000).scoreDocs; assertEquals(3, hits.length); - assertEquals("one", is.doc2(hits[0].doc).get("key")); - assertEquals("three four", is.doc2(hits[1].doc).get("key")); - assertEquals("two", is.doc2(hits[2].doc).get("key")); + assertEquals("one", is.doc(hits[0].doc).get("key")); + assertEquals("three four", is.doc(hits[1].doc).get("key")); + assertEquals("two", is.doc(hits[2].doc).get("key")); // some artificial queries to trigger the use of skipTo(): Index: lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestMultiPhraseQuery.java (working copy) @@ -33,10 +33,10 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.StringField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.store.RAMDirectory; Index: lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestMultiTermConstantScore.java (working copy) @@ -19,10 +19,10 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.StringField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexWriterConfig; Index: lucene/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestMultiTermQueryRewrites.java (working copy) @@ -18,9 +18,9 @@ */ import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiReader; Index: lucene/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestMultiThreadTermVectors.java (working copy) @@ -20,7 +20,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.*; +import org.apache.lucene.document.*; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.TermFreqVector; Index: lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java (working copy) @@ -22,9 +22,9 @@ import java.text.DecimalFormatSymbols; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.NumericField; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.NumericField; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.store.Directory; Index: lucene/src/test/org/apache/lucene/search/TestNot.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestNot.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestNot.java (working copy) @@ -24,8 +24,8 @@ import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.store.Directory; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; /** Similarity unit test. * Index: lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java (working copy) @@ -18,9 +18,9 @@ */ import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.NumericField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexWriter; @@ -150,9 +150,9 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count"+type, count, sd.length ); - Document doc=searcher.doc2(sd[0].doc); + Document doc=searcher.doc(sd[0].doc); assertEquals("First doc"+type, 2*distance+startOffset, Integer.parseInt(doc.get(field)) ); - doc=searcher.doc2(sd[sd.length-1].doc); + doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc"+type, (1+count)*distance+startOffset, Integer.parseInt(doc.get(field)) ); if (i>0 && (searcher.getIndexReader().getSequentialSubReaders() == null || @@ -211,9 +211,9 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); - Document doc=searcher.doc2(sd[0].doc); + Document doc=searcher.doc(sd[0].doc); assertEquals("First doc", startOffset, Integer.parseInt(doc.get(field)) ); - doc=searcher.doc2(sd[sd.length-1].doc); + doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, Integer.parseInt(doc.get(field)) ); q=NumericRangeQuery.newIntRange(field, precisionStep, null, upper, false, true); @@ -221,9 +221,9 @@ sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); - doc=searcher.doc2(sd[0].doc); + doc=searcher.doc(sd[0].doc); assertEquals("First doc", startOffset, Integer.parseInt(doc.get(field)) ); - doc=searcher.doc2(sd[sd.length-1].doc); + doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, Integer.parseInt(doc.get(field)) ); } @@ -252,9 +252,9 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", noDocs-count, sd.length ); - Document doc=searcher.doc2(sd[0].doc); + Document doc=searcher.doc(sd[0].doc); assertEquals("First doc", count*distance+startOffset, Integer.parseInt(doc.get(field)) ); - doc=searcher.doc2(sd[sd.length-1].doc); + doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (noDocs-1)*distance+startOffset, Integer.parseInt(doc.get(field)) ); q=NumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, false); @@ -262,9 +262,9 @@ sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", noDocs-count, sd.length ); - doc=searcher.doc2(sd[0].doc); + doc=searcher.doc(sd[0].doc); assertEquals("First doc", count*distance+startOffset, Integer.parseInt(doc.get(field)) ); - doc=searcher.doc2(sd[sd.length-1].doc); + doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (noDocs-1)*distance+startOffset, Integer.parseInt(doc.get(field)) ); } @@ -502,9 +502,9 @@ if (topDocs.totalHits==0) continue; ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); - int last=Integer.parseInt(searcher.doc2(sd[0].doc).get(field)); + int last=Integer.parseInt(searcher.doc(sd[0].doc).get(field)); for (int j=1; jact ); last=act; } Index: lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (working copy) @@ -18,9 +18,9 @@ */ import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.NumericField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.RandomIndexWriter; @@ -150,9 +150,9 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count"+type, count, sd.length ); - Document doc=searcher.doc2(sd[0].doc); + Document doc=searcher.doc(sd[0].doc); assertEquals("First doc"+type, 2*distance+startOffset, Long.parseLong(doc.get(field)) ); - doc=searcher.doc2(sd[sd.length-1].doc); + doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc"+type, (1+count)*distance+startOffset, Long.parseLong(doc.get(field)) ); if (i>0 && (searcher.getIndexReader().getSequentialSubReaders() == null || @@ -217,9 +217,9 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); - Document doc=searcher.doc2(sd[0].doc); + Document doc=searcher.doc(sd[0].doc); assertEquals("First doc", startOffset, Long.parseLong(doc.get(field)) ); - doc=searcher.doc2(sd[sd.length-1].doc); + doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, Long.parseLong(doc.get(field)) ); q=NumericRangeQuery.newLongRange(field, precisionStep, null, upper, false, true); @@ -227,9 +227,9 @@ sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); - doc=searcher.doc2(sd[0].doc); + doc=searcher.doc(sd[0].doc); assertEquals("First doc", startOffset, Long.parseLong(doc.get(field)) ); - doc=searcher.doc2(sd[sd.length-1].doc); + doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, Long.parseLong(doc.get(field)) ); } @@ -263,9 +263,9 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", noDocs-count, sd.length ); - Document doc=searcher.doc2(sd[0].doc); + Document doc=searcher.doc(sd[0].doc); assertEquals("First doc", count*distance+startOffset, Long.parseLong(doc.get(field)) ); - doc=searcher.doc2(sd[sd.length-1].doc); + doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (noDocs-1)*distance+startOffset, Long.parseLong(doc.get(field)) ); q=NumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, false); @@ -273,9 +273,9 @@ sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", noDocs-count, sd.length ); - doc=searcher.doc2(sd[0].doc); + doc=searcher.doc(sd[0].doc); assertEquals("First doc", count*distance+startOffset, Long.parseLong(doc.get(field)) ); - doc=searcher.doc2(sd[sd.length-1].doc); + doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (noDocs-1)*distance+startOffset, Long.parseLong(doc.get(field)) ); } @@ -534,9 +534,9 @@ if (topDocs.totalHits==0) continue; ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); - long last=Long.parseLong(searcher.doc2(sd[0].doc).get(field)); + long last=Long.parseLong(searcher.doc(sd[0].doc).get(field)); for (int j=1; jact ); last=act; } Index: lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestPhrasePrefixQuery.java (working copy) @@ -18,8 +18,8 @@ */ import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.IndexReader; Index: lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestPhraseQuery.java (working copy) @@ -20,7 +20,7 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.analysis.*; import org.apache.lucene.analysis.tokenattributes.*; -import org.apache.lucene.document2.*; +import org.apache.lucene.document.*; import org.apache.lucene.index.*; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.queryParser.QueryParser; Index: lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestPositionIncrement.java (working copy) @@ -29,8 +29,8 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.IndexReader; Index: lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestPrefixFilter.java (working copy) @@ -22,8 +22,8 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.StringField; /** * Tests {@link PrefixFilter} class. Index: lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestPrefixInBooleanQuery.java (working copy) @@ -18,9 +18,9 @@ */ import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; Index: lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestPrefixQuery.java (working copy) @@ -24,8 +24,8 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.StringField; /** * Tests {@link PrefixQuery} class. Index: lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestPrefixRandom.java (working copy) @@ -21,9 +21,9 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; Index: lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestQueryWrapperFilter.java (working copy) @@ -17,8 +17,8 @@ * limitations under the License. */ -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; Index: lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestRegexpQuery.java (working copy) @@ -20,8 +20,8 @@ import java.io.IOException; import java.util.Arrays; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; Index: lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestRegexpRandom.java (working copy) @@ -23,10 +23,10 @@ import java.util.Locale; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; Index: lucene/src/test/org/apache/lucene/search/TestRegexpRandom2.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestRegexpRandom2.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestRegexpRandom2.java (working copy) @@ -24,9 +24,9 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiFields; import org.apache.lucene.index.Term; Index: lucene/src/test/org/apache/lucene/search/TestScorerPerf.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestScorerPerf.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestScorerPerf.java (working copy) @@ -12,8 +12,8 @@ import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.StringField; /** * Licensed to the Apache Software Foundation (ASF) under one or more Index: lucene/src/test/org/apache/lucene/search/TestSearchWithThreads.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestSearchWithThreads.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestSearchWithThreads.java (working copy) @@ -20,9 +20,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; Index: lucene/src/test/org/apache/lucene/search/TestSetNorm.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestSetNorm.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestSetNorm.java (working copy) @@ -21,7 +21,7 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.*; +import org.apache.lucene.document.*; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexWriter; Index: lucene/src/test/org/apache/lucene/search/TestSimilarity.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestSimilarity.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestSimilarity.java (working copy) @@ -28,8 +28,8 @@ import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.search.Explanation.IDFExplanation; /** Similarity unit test. Index: lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestSimilarityProvider.java (working copy) @@ -18,9 +18,9 @@ */ import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.FieldInvertState; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriterConfig; Index: lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestSloppyPhraseQuery.java (working copy) @@ -20,10 +20,10 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; Index: lucene/src/test/org/apache/lucene/search/TestSort.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestSort.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/TestSort.java (working copy) @@ -25,11 +25,11 @@ import java.util.concurrent.TimeUnit; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.StringField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; @@ -352,7 +352,7 @@ int lastDocId = 0; boolean fail = false; for (int x = 0; x < n; ++x) { - Document doc2 = searcher.doc2(result[x].doc); + Document doc2 = searcher.doc(result[x].doc); IndexableField[] v = doc2.getFields("tracer"); IndexableField[] v2 = doc2.getFields("tracer2"); for (int j = 0; j < v.length; ++j) { @@ -958,7 +958,7 @@ StringBuilder buff = new StringBuilder(10); int n = result.length; for (int i=0; i 0); Index: lucene/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/function/TestFieldScoreQuery.java (working copy) @@ -86,7 +86,7 @@ assertEquals("All docs should be matched!",N_DOCS,h.length); String prevID = "ID"+(N_DOCS+1); // greater than all ids of docs in this test for (int i=0; i 7.0 assertEquals("score of " + id + " shuould be " + expectedScore + " != " + score, expectedScore, score, TEST_SCORE_TOLERANCE_DELTA); } Index: lucene/src/test/org/apache/lucene/search/function/TestOrdValues.java =================================================================== --- lucene/src/test/org/apache/lucene/search/function/TestOrdValues.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/function/TestOrdValues.java (working copy) @@ -78,7 +78,7 @@ : "IC"; // smaller than all ids of docs in this test ("ID0001", etc.) for (int i = 0; i < h.length; i++) { - String resID = s.doc2(h[i].doc).get(ID_FIELD); + String resID = s.doc(h[i].doc).get(ID_FIELD); log(i + ". score=" + h[i].score + " - " + resID); log(s.explain(q, h[i].doc)); if (inOrder) { @@ -123,7 +123,7 @@ ScoreDoc sd[] = td.scoreDocs; for (int i = 0; i < sd.length; i++) { float score = sd[i].score; - String id = s.getIndexReader().document2(sd[i].doc).get(ID_FIELD); + String id = s.getIndexReader().document(sd[i].doc).get(ID_FIELD); log("-------- " + i + ". Explain doc " + id); log(s.explain(q, sd[i].doc)); float expectedScore = N_DOCS - i; Index: lucene/src/test/org/apache/lucene/search/function/TestValueSource.java =================================================================== --- lucene/src/test/org/apache/lucene/search/function/TestValueSource.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/function/TestValueSource.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.analysis.*; import org.apache.lucene.index.*; import org.apache.lucene.index.IndexReader.AtomicReaderContext; -import org.apache.lucene.document2.*; +import org.apache.lucene.document.*; public class TestValueSource extends LuceneTestCase { Index: lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java =================================================================== --- lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/payloads/PayloadHelper.java (working copy) @@ -22,9 +22,9 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Payload; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.util.English; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.index.IndexReader; Index: lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/payloads/TestPayloadNearQuery.java (working copy) @@ -24,9 +24,9 @@ import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.FieldInvertState; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Payload; Index: lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/payloads/TestPayloadTermQuery.java (working copy) @@ -43,10 +43,10 @@ import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; -import org.apache.lucene.document2.Field; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import java.io.Reader; import java.io.IOException; Index: lucene/src/test/org/apache/lucene/search/spans/TestBasics.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestBasics.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/spans/TestBasics.java (working copy) @@ -31,9 +31,9 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Payload; import org.apache.lucene.index.RandomIndexWriter; Index: lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/spans/TestFieldMaskingSpanQuery.java (working copy) @@ -21,9 +21,9 @@ import java.util.Set; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; Index: lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/spans/TestNearSpansOrdered.java (working copy) @@ -18,8 +18,8 @@ */ import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexReader.ReaderContext; Index: lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (working copy) @@ -30,9 +30,9 @@ import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.IndexReader; Index: lucene/src/test/org/apache/lucene/search/spans/TestSpanFirstQuery.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestSpanFirstQuery.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/spans/TestSpanFirstQuery.java (working copy) @@ -20,8 +20,8 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; Index: lucene/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/spans/TestSpanMultiTermQueryWrapper.java (working copy) @@ -17,9 +17,9 @@ * limitations under the License. */ -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; -import org.apache.lucene.document2.Field; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; Index: lucene/src/test/org/apache/lucene/search/spans/TestSpans.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestSpans.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/spans/TestSpans.java (working copy) @@ -37,10 +37,10 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.StringField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.ReaderUtil; Index: lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java =================================================================== --- lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/search/spans/TestSpansAdvanced.java (working copy) @@ -24,10 +24,10 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenFilter; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.StringField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -165,7 +165,7 @@ int id = topdocs.scoreDocs[i].doc; float score = topdocs.scoreDocs[i].score; - Document doc = s.doc2(id); + Document doc = s.doc(id); assertEquals(expectedIds[i], doc.get(FIELD_ID)); boolean scoreEq = Math.abs(expectedScores[i] - score) < tolerance; if (!scoreEq) { Index: lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java =================================================================== --- lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/store/TestBufferedIndexInput.java (working copy) @@ -27,8 +27,8 @@ import java.util.Random; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; Index: lucene/src/test/org/apache/lucene/store/TestLockFactory.java =================================================================== --- lucene/src/test/org/apache/lucene/store/TestLockFactory.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/store/TestLockFactory.java (working copy) @@ -24,8 +24,8 @@ import java.util.Map; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; Index: lucene/src/test/org/apache/lucene/store/TestMultiMMap.java =================================================================== --- lucene/src/test/org/apache/lucene/store/TestMultiMMap.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/store/TestMultiMMap.java (working copy) @@ -21,9 +21,9 @@ import java.util.Random; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.util.LuceneTestCase; @@ -80,7 +80,7 @@ int numAsserts = atLeast(100); for (int i = 0; i < numAsserts; i++) { int docID = random.nextInt(numDocs); - assertEquals("" + docID, reader.document2(docID).get("docid")); + assertEquals("" + docID, reader.document(docID).get("docid")); } reader.close(); dir.close(); Index: lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java =================================================================== --- lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java (working copy) @@ -26,9 +26,9 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -88,7 +88,7 @@ // search for all documents for (int i = 0; i < docsToAdd; i++) { - Document doc = searcher.doc2(i); + Document doc = searcher.doc(i); assertTrue(doc.getField("content") != null); } Index: lucene/src/test/org/apache/lucene/store/TestWindowsMMap.java =================================================================== --- lucene/src/test/org/apache/lucene/store/TestWindowsMMap.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/store/TestWindowsMMap.java (working copy) @@ -23,8 +23,8 @@ import org.apache.lucene.util._TestUtil; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexWriterConfig.OpenMode; Index: lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java =================================================================== --- lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/util/TestFieldCacheSanityChecker.java (working copy) @@ -17,8 +17,8 @@ */ import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.StringField; import org.apache.lucene.search.FieldCache; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultiReader; Index: lucene/src/test/org/apache/lucene/util/fst/TestFSTs.java =================================================================== --- lucene/src/test/org/apache/lucene/util/fst/TestFSTs.java (revision 1153521) +++ lucene/src/test/org/apache/lucene/util/fst/TestFSTs.java (working copy) @@ -28,7 +28,7 @@ import java.util.*; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; Index: modules/run.txt =================================================================== --- modules/run.txt (revision 0) +++ modules/run.txt (revision 0) @@ -0,0 +1,1391 @@ +Buildfile: C:\Users\ntankovic\workspace\Lucene\modules\build.xml + +test: + +test: + [echo] Building analyzers-common... + +common.init: + +build-lucene: + +jflex-uptodate-check: + +jflex-notice: + +javacc-uptodate-check: + +javacc-notice: + [echo] + [echo] One or more of the JavaCC .jj files is newer than its corresponding + [echo] .java file. Run the "javacc" target to regenerate the artifacts. + [echo] + +init: + +clover.setup: + +clover.info: + [echo] + [echo] Clover not found. Code coverage reports disabled. + [echo] + +clover: + +common.compile-core: + +compile-core: + +compile-test-framework: + +compile-test: + [javac] Compiling 1 source file to C:\Users\ntankovic\workspace\Lucene\lucene\build\classes\test + +init: + +test: + [echo] Building analyzers-common... + +common.init: + +build-lucene: + +init: + +compile-test: + [echo] Building analyzers-common... + +jflex-uptodate-check: + +jflex-notice: + [echo] + [echo] One or more of the JFlex .jflex files is newer than its corresponding + [echo] .java file. Run the "jflex" target to regenerate the artifacts. + [echo] + +common.init: + +build-lucene: + +init: + +clover.setup: + +clover.info: + [echo] + [echo] Clover not found. Code coverage reports disabled. + [echo] + +clover: + +common.compile-core: + +compile-core: + +compile-test-framework: + +common.compile-test: + +common.compile-tools: + +compile-tools: + +check-legal-lucene: + [java] Could not find directory:C:\Users\ntankovic\workspace\Lucene\modules\analysis\common\lib + [java] Could not find directory:C:\Users\ntankovic\workspace\Lucene\modules\analysis\common\contrib\queries\lib + +validate-lucene: + +junit-mkdir: + +junit-sequential: + +junit-parallel: + [junit] Testsuite: org.apache.lucene.analysis.ar.TestArabicLetterTokenizer + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0,396 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.charfilter.TestCharFilter + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,023 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.core.TestKeywordAnalyzer + [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 0,977 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.ar.TestArabicAnalyzer + [junit] Tests run: 7, Failures: 0, Errors: 0, Time elapsed: 1,967 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.charfilter.HTMLStripCharFilterTest + [junit] Tests run: 10, Failures: 0, Errors: 0, Time elapsed: 0,297 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.de.TestGermanAnalyzer + [junit] Tests run: 5, Failures: 0, Errors: 0, Time elapsed: 1,598 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.core.TestClassicAnalyzer + [junit] Tests run: 28, Failures: 0, Errors: 0, Time elapsed: 1,325 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.da.TestDanishAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 1,026 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.en.TestEnglishMinimalStemFilter + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 1,655 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.fi.TestFinnishAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 1,031 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.en.TestKStemmer + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 5,074 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.hi.TestHindiAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,704 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.in.TestIndicNormalizer + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,11 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.miscellaneous.TestEmptyTokenStream + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,076 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.miscellaneous.TestPrefixAwareTokenFilter + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,018 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.ngram.NGramTokenFilterTest + [junit] Tests run: 8, Failures: 0, Errors: 0, Time elapsed: 0,053 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.fi.TestFinnishLightStemFilter + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0,93 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.pattern.TestPatternTokenizer + [junit] Testsuite: org.apache.lucene.analysis.hi.TestHindiNormalizer + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0,032 sec + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0,025 sec + [junit] + [junit] + [junit] Testsuite: org.apache.lucene.analysis.it.TestItalianAnalyzer + [junit] Tests run: 6, Failures: 0, Errors: 0, Time elapsed: 0,661 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.miscellaneous.TestHyphenatedWordsFilter + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0,013 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.miscellaneous.TestRemoveDuplicatesTokenFilter + [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 0,221 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.pt.TestPortugueseMinimalStemFilter + [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 1,338 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.ngram.NGramTokenizerTest + [junit] Tests run: 7, Failures: 0, Errors: 0, Time elapsed: 0,025 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterTest + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,069 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.shingle.ShingleAnalyzerWrapperTest + [junit] Tests run: 14, Failures: 0, Errors: 0, Time elapsed: 0,38 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.sv.TestSwedishAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,332 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.util.TestCharArraySet + [junit] Tests run: 17, Failures: 0, Errors: 0, Time elapsed: 0,046 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.pt.TestPortugueseStemFilter + [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 0,922 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.shingle.ShingleFilterTest + [junit] Tests run: 36, Failures: 0, Errors: 0, Time elapsed: 0,117 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.sv.TestSwedishLightStemFilter + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0,545 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.util.TestCharTokenizers + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,082 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.ar.TestArabicNormalizationFilter + [junit] Tests run: 14, Failures: 0, Errors: 0, Time elapsed: 0,251 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.ar.TestArabicStemFilter + [junit] Tests run: 22, Failures: 0, Errors: 0, Time elapsed: 0,946 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.charfilter.TestMappingCharFilter + [junit] Tests run: 12, Failures: 0, Errors: 0, Time elapsed: 1,565 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.cjk.TestCJKTokenizer + [junit] Tests run: 13, Failures: 0, Errors: 0, Time elapsed: 1,335 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.core.TestStopAnalyzer + [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 0,026 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.core.TestStandardAnalyzer + [junit] Tests run: 28, Failures: 0, Errors: 0, Time elapsed: 2,172 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.de.TestGermanLightStemFilter + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 1,971 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.en.TestPorterStemFilter + [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 3,628 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.fr.TestElision + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,047 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.hi.TestHindiStemmer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,089 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.it.TestItalianLightStemFilter + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0,417 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.de.TestGermanMinimalStemFilter + [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 6,434 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.miscellaneous.TestKeepWordFilter + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,021 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.miscellaneous.TestSingleTokenTokenFilter + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,044 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.es.TestSpanishAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,889 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.nl.TestDutchStemmer + [junit] Tests run: 9, Failures: 0, Errors: 0, Time elapsed: 0,646 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.payloads.NumericPayloadTokenFilterTest + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,044 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.fr.TestFrenchAnalyzer + [junit] Tests run: 8, Failures: 0, Errors: 0, Time elapsed: 0,881 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.hu.TestHungarianAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,297 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.query.QueryAutoStopWordAnalyzerTest + [junit] Tests run: 8, Failures: 0, Errors: 0, Time elapsed: 0,843 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.lv.TestLatvianAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,449 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.sinks.DateRecognizerSinkTokenizerTest + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,076 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.miscellaneous.TestKeywordMarkerFilter + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0,013 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.synonym.TestSynonymFilter + [junit] Tests run: 7, Failures: 0, Errors: 0, Time elapsed: 0,031 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.util.TestWordlistLoader + [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 0,016 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.miscellaneous.TestStemmerOverrideFilter + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,014 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.no.TestNorwegianAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,275 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.payloads.TokenOffsetPayloadTokenFilterTest + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,022 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.reverse.TestReverseStringFilter + [junit] Tests run: 7, Failures: 0, Errors: 0, Time elapsed: 0,019 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.sinks.TestTeeSinkTokenFilter + [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 0,507 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.th.TestThaiAnalyzer + [junit] Tests run: 9, Failures: 0, Errors: 0, Time elapsed: 0,341 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.wikipedia.WikipediaTokenizerTest + [junit] Tests run: 6, Failures: 0, Errors: 0, Time elapsed: 0,061 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.bg.TestBulgarianAnalyzer + [junit] Tests run: 7, Failures: 0, Errors: 0, Time elapsed: 1,042 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.cn.TestChineseTokenizer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,041 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.core.TestStopFilter + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,995 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.de.TestGermanStemFilter + [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 1,47 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.bg.TestBulgarianStemmer + [junit] Tests run: 6, Failures: 0, Errors: 0, Time elapsed: 2,676 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.commongrams.CommonGramsFilterTest + [junit] Tests run: 9, Failures: 0, Errors: 0, Time elapsed: 0,571 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.es.TestSpanishLightStemFilter + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 3,195 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.fr.TestFrenchLightStemFilter + [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 0,74 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.hu.TestHungarianLightStemFilter + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,646 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.lv.TestLatvianStemmer + [junit] Tests run: 9, Failures: 0, Errors: 0, Time elapsed: 0,096 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.miscellaneous.TestLengthFilter + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0,061 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.miscellaneous.TestTrimFilter + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,048 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.core.TestUAX29URLEmailTokenizer + [junit] Tests run: 31, Failures: 0, Errors: 0, Time elapsed: 2,717 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.path.TestPathHierarchyTokenizer + [junit] Tests run: 15, Failures: 0, Errors: 0, Time elapsed: 0,135 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilterTest + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,091 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.el.GreekAnalyzerTest + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 1,67 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.ro.TestRomanianAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 1,621 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.sinks.TokenRangeSinkTokenizerTest + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,013 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.eu.TestBasqueAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,76 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.tr.TestTurkishAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,87 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.fr.TestFrenchMinimalStemFilter + [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 0,908 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.hy.TestArmenianAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,276 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.miscellaneous.PatternAnalyzerTest + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,099 sec + [junit] + [junit] Testsuite: org.apache.lucene.collation.TestCollationKeyAnalyzer + [junit] Tests run: 5, Failures: 0, Errors: 0, Time elapsed: 1,621 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.miscellaneous.TestLimitTokenCountAnalyzer + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0,549 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.miscellaneous.TestWordDelimiterFilter + [junit] Tests run: 8, Failures: 0, Errors: 0, Time elapsed: 0,042 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.path.TestReversePathHierarchyTokenizer + [junit] Tests run: 12, Failures: 0, Errors: 0, Time elapsed: 0,029 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.position.PositionFilterTest + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,042 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.ru.TestRussianAnalyzer + [junit] Tests run: 5, Failures: 0, Errors: 0, Time elapsed: 0,485 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.sinks.TokenTypeSinkTokenizerTest + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,054 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.tr.TestTurkishLowerCaseFilter + [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 0,074 sec + [junit] + [junit] Testsuite: org.apache.lucene.collation.TestCollationKeyFilter + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,591 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.br.TestBrazilianStemmer + [junit] Tests run: 6, Failures: 0, Errors: 0, Time elapsed: 1,82 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.compound.TestCompoundWordTokenFilter + [junit] Tests run: 6, Failures: 0, Errors: 0, Time elapsed: 2,205 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.ca.TestCatalanAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 3,435 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.cz.TestCzechAnalyzer + [junit] Tests run: 6, Failures: 0, Errors: 0, Time elapsed: 1,866 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.el.TestGreekStemmer + [junit] Tests run: 6, Failures: 0, Errors: 0, Time elapsed: 0,801 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.core.TestAnalyzers + [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 1,207 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.fa.TestPersianAnalyzer + [junit] Tests run: 8, Failures: 0, Errors: 0, Time elapsed: 0,742 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.cz.TestCzechStemmer + [junit] Tests run: 8, Failures: 0, Errors: 0, Time elapsed: 1,317 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.en.TestEnglishAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,831 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.fa.TestPersianNormalizationFilter + [junit] Tests run: 6, Failures: 0, Errors: 0, Time elapsed: 0,182 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.gl.TestGalicianAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 2,102 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.gl.TestGalicianStemFilter + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,989 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.id.TestIndonesianStemmer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,036 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.id.TestIndonesianAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,606 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.miscellaneous.TestCapitalizationFilter + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,506 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.miscellaneous.TestPrefixAndSuffixAwareTokenFilter + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,09 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.miscellaneous.TestASCIIFoldingFilter + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0,062 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.miscellaneous.TestPerFieldAnalzyerWrapper + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,414 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.ngram.EdgeNGramTokenizerTest + [junit] Tests run: 9, Failures: 0, Errors: 0, Time elapsed: 0,124 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.pattern.TestPatternReplaceFilter + [junit] Tests run: 5, Failures: 0, Errors: 0, Time elapsed: 0,028 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.ngram.EdgeNGramTokenFilterTest + [junit] Tests run: 10, Failures: 0, Errors: 0, Time elapsed: 0,176 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.pattern.TestPatternReplaceCharFilter + [junit] Tests run: 9, Failures: 0, Errors: 0, Time elapsed: 0,187 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.pt.TestPortugueseAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,468 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.ru.TestRussianLetterTokenizer + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0,027 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.snowball.TestSnowball + [junit] Tests run: 7, Failures: 0, Errors: 0, Time elapsed: 0,068 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.pt.TestPortugueseLightStemFilter + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 1,102 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.util.TestCharacterUtils + [junit] Tests run: 7, Failures: 0, Errors: 0, Time elapsed: 0,032 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.ru.TestRussianLightStemFilter + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0,693 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.snowball.TestSnowballVocab + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 3,968 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.util.TestCharArrayMap + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,158 sec + [junit] + +common.test: + [echo] Building analyzers-icu... + +common.init: + +build-lucene: + +jflex-uptodate-check: + +jflex-notice: + +javacc-uptodate-check: + +javacc-notice: + [echo] + [echo] One or more of the JavaCC .jj files is newer than its corresponding + [echo] .java file. Run the "javacc" target to regenerate the artifacts. + [echo] + +init: + +clover.setup: + +clover.info: + [echo] + [echo] Clover not found. Code coverage reports disabled. + [echo] + +clover: + +common.compile-core: + +compile-core: + +compile-test-framework: + +compile-test: + [javac] Compiling 1 source file to C:\Users\ntankovic\workspace\Lucene\lucene\build\classes\test + +init: + +test: + [echo] Building analyzers-icu... + +common.init: + +build-lucene: + +init: + +compile-test: + [echo] Building analyzers-icu... + +build-analyzers-common: + +common.init: + +build-lucene: + +init: + +clover.setup: + +clover.info: + [echo] + [echo] Clover not found. Code coverage reports disabled. + [echo] + +clover: + +common.compile-core: + +compile-core: + +compile-test-framework: + +common.compile-test: + +common.compile-tools: + +compile-tools: + +check-legal-lucene: + [java] ---------------------- + [java] Starting on dir: C:\Users\ntankovic\workspace\Lucene\modules\analysis\icu\lib + [java] Found a license for every file in C:\Users\ntankovic\workspace\Lucene\modules\analysis\icu\lib + [java] Could not find directory:C:\Users\ntankovic\workspace\Lucene\modules\analysis\icu\contrib\queries\lib + +validate-lucene: + +junit-mkdir: + +junit-sequential: + +junit-parallel: + [junit] Testsuite: org.apache.lucene.analysis.icu.segmentation.TestCharArrayIterator + [junit] Tests run: 7, Failures: 0, Errors: 0, Time elapsed: 0,498 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.icu.segmentation.TestLaoBreakIterator + [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 0,642 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.icu.segmentation.TestICUTokenizer + [junit] Tests run: 28, Failures: 0, Errors: 0, Time elapsed: 3,323 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.icu.TestICUFoldingFilter + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 1,905 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.icu.TestICUNormalizer2Filter + [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 2,309 sec + [junit] + [junit] Testsuite: org.apache.lucene.collation.TestICUCollationKeyAnalyzer + [junit] Tests run: 5, Failures: 0, Errors: 0, Time elapsed: 1,347 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.icu.TestICUTransformFilter + [junit] Tests run: 7, Failures: 0, Errors: 0, Time elapsed: 4,911 sec + [junit] + [junit] Testsuite: org.apache.lucene.collation.TestICUCollationKeyFilter + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 1,591 sec + [junit] + +common.test: + [echo] Building analyzers-phonetic... + +common.init: + +build-lucene: + +jflex-uptodate-check: + +jflex-notice: + +javacc-uptodate-check: + +javacc-notice: + [echo] + [echo] One or more of the JavaCC .jj files is newer than its corresponding + [echo] .java file. Run the "javacc" target to regenerate the artifacts. + [echo] + +init: + +clover.setup: + +clover.info: + [echo] + [echo] Clover not found. Code coverage reports disabled. + [echo] + +clover: + +common.compile-core: + +compile-core: + +compile-test-framework: + +compile-test: + [javac] Compiling 1 source file to C:\Users\ntankovic\workspace\Lucene\lucene\build\classes\test + +init: + +test: + [echo] Building analyzers-phonetic... + +common.init: + +build-lucene: + +init: + +compile-test: + [echo] Building analyzers-phonetic... + +build-analyzers-common: + +common.init: + +build-lucene: + +init: + +clover.setup: + +clover.info: + [echo] + [echo] Clover not found. Code coverage reports disabled. + [echo] + +clover: + +common.compile-core: + +compile-core: + +compile-test-framework: + +common.compile-test: + +compile-tools: + +check-legal-lucene: + [java] ---------------------- + [java] Starting on dir: C:\Users\ntankovic\workspace\Lucene\modules\analysis\phonetic\lib + [java] Found a license for every file in C:\Users\ntankovic\workspace\Lucene\modules\analysis\phonetic\lib + [java] Could not find directory:C:\Users\ntankovic\workspace\Lucene\modules\analysis\phonetic\contrib\queries\lib + +validate-lucene: + +junit-mkdir: + +junit-sequential: + +junit-parallel: + [junit] Testsuite: org.apache.lucene.analysis.phonetic.DoubleMetaphoneFilterTest + [junit] Tests run: 6, Failures: 0, Errors: 0, Time elapsed: 0,302 sec + [junit] + [junit] Testsuite: org.apache.lucene.analysis.phonetic.TestPhoneticFilter + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,375 sec + [junit] + +common.test: + [echo] Building analyzers-smartcn... + +common.init: + +build-lucene: + +jflex-uptodate-check: + +jflex-notice: + +javacc-uptodate-check: + +javacc-notice: + [echo] + [echo] One or more of the JavaCC .jj files is newer than its corresponding + [echo] .java file. Run the "javacc" target to regenerate the artifacts. + [echo] + +init: + +clover.setup: + +clover.info: + [echo] + [echo] Clover not found. Code coverage reports disabled. + [echo] + +clover: + +common.compile-core: + +compile-core: + +compile-test-framework: + +compile-test: + [javac] Compiling 1 source file to C:\Users\ntankovic\workspace\Lucene\lucene\build\classes\test + +init: + +test: + [echo] Building analyzers-smartcn... + +common.init: + +build-lucene: + +init: + +compile-test: + [echo] Building analyzers-smartcn... + +build-analyzers-common: + +common.init: + +build-lucene: + +init: + +clover.setup: + +clover.info: + [echo] + [echo] Clover not found. Code coverage reports disabled. + [echo] + +clover: + +common.compile-core: + +compile-core: + +compile-test-framework: + +common.compile-test: + +compile-tools: + +check-legal-lucene: + [java] Could not find directory:C:\Users\ntankovic\workspace\Lucene\modules\analysis\smartcn\lib + [java] Could not find directory:C:\Users\ntankovic\workspace\Lucene\modules\analysis\smartcn\contrib\queries\lib + +validate-lucene: + +junit-mkdir: + +junit-sequential: + +junit-parallel: + [junit] Testsuite: org.apache.lucene.analysis.cn.smart.TestSmartChineseAnalyzer + [junit] Tests run: 17, Failures: 0, Errors: 0, Time elapsed: 3,116 sec + [junit] + +common.test: + [echo] Building analyzers-stempel... + +common.init: + +build-lucene: + +jflex-uptodate-check: + +jflex-notice: + +javacc-uptodate-check: + +javacc-notice: + [echo] + [echo] One or more of the JavaCC .jj files is newer than its corresponding + [echo] .java file. Run the "javacc" target to regenerate the artifacts. + [echo] + +init: + +clover.setup: + +clover.info: + [echo] + [echo] Clover not found. Code coverage reports disabled. + [echo] + +clover: + +common.compile-core: + +compile-core: + +compile-test-framework: + +compile-test: + [javac] Compiling 1 source file to C:\Users\ntankovic\workspace\Lucene\lucene\build\classes\test + +init: + +test: + [echo] Building analyzers-stempel... + +common.init: + +build-lucene: + +init: + +compile-test: + [echo] Building analyzers-stempel... + +build-analyzers-common: + +common.init: + +build-lucene: + +init: + +clover.setup: + +clover.info: + [echo] + [echo] Clover not found. Code coverage reports disabled. + [echo] + +clover: + +common.compile-core: + +compile-core: + +compile-test-framework: + +common.compile-test: + +compile-tools: + +check-legal-lucene: + [java] Could not find directory:C:\Users\ntankovic\workspace\Lucene\modules\analysis\stempel\lib + [java] Could not find directory:C:\Users\ntankovic\workspace\Lucene\modules\analysis\stempel\contrib\queries\lib + +validate-lucene: + +junit-mkdir: + +junit-sequential: + +junit-parallel: + [junit] Testsuite: org.egothor.stemmer.TestCompile + [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 0,747 sec + [junit] + [junit] ------------- Standard Output --------------- + [junit] /C:/Users/ntankovic/workspace/Lucene/modules/analysis/build/stempel/classes/test/org/egothor/stemmer/testRules.txt + [junit] /C:/Users/ntankovic/workspace/Lucene/modules/analysis/build/stempel/classes/test/org/egothor/stemmer/testRules.txt + [junit] /C:/Users/ntankovic/workspace/Lucene/modules/analysis/build/stempel/classes/test/org/egothor/stemmer/testRules.txt + [junit] ------------- ---------------- --------------- + [junit] Testsuite: org.apache.lucene.analysis.pl.TestPolishAnalyzer + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 1,894 sec + [junit] + [junit] Testsuite: org.egothor.stemmer.TestStemmer + [junit] Tests run: 6, Failures: 0, Errors: 0, Time elapsed: 0,272 sec + [junit] + +common.test: + [echo] Building benchmark... + +common.init: + +build-lucene: + +jflex-uptodate-check: + +jflex-notice: + +javacc-uptodate-check: + +javacc-notice: + [echo] + [echo] One or more of the JavaCC .jj files is newer than its corresponding + [echo] .java file. Run the "javacc" target to regenerate the artifacts. + [echo] + +init: + +clover.setup: + +clover.info: + [echo] + [echo] Clover not found. Code coverage reports disabled. + [echo] + +clover: + +common.compile-core: + +compile-core: + +compile-test-framework: + +compile-test: + [javac] Compiling 1 source file to C:\Users\ntankovic\workspace\Lucene\lucene\build\classes\test + +contrib-build.init: + +compile-memory: + +compile-highlighter: + +compile-analyzers-common: + +init: + +test: + [echo] Building benchmark... + +common.init: + +build-lucene: + +contrib-build.init: + +compile-memory: + +compile-highlighter: + +compile-analyzers-common: + +init: + +compile-test: + [echo] Building benchmark... + +common.init: + +build-lucene: + +contrib-build.init: + +compile-memory: + +compile-highlighter: + +compile-analyzers-common: + +init: + +clover.setup: + +clover.info: + [echo] + [echo] Clover not found. Code coverage reports disabled. + [echo] + +clover: + +compile-core: + +compile-test-framework: + +common.compile-test: + +compile-tools: + +check-legal-lucene: + [java] ---------------------- + [java] Starting on dir: C:\Users\ntankovic\workspace\Lucene\modules\benchmark\lib + [java] Found a license for every file in C:\Users\ntankovic\workspace\Lucene\modules\benchmark\lib + [java] Could not find directory:C:\Users\ntankovic\workspace\Lucene\modules\benchmark\contrib\queries\lib + +validate-lucene: + +junit-mkdir: + +junit-sequential: + +junit-parallel: + [junit] Testsuite: org.apache.lucene.benchmark.byTask.TestPerfTasksParse + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0,669 sec + [junit] + [junit] Testsuite: org.apache.lucene.benchmark.byTask.feeds.DocMakerTest + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0,516 sec + [junit] + [junit] Testsuite: org.apache.lucene.benchmark.byTask.feeds.LineDocSourceTest + [junit] Tests run: 8, Failures: 0, Errors: 0, Time elapsed: 5,566 sec + [junit] + [junit] Testsuite: org.apache.lucene.benchmark.byTask.utils.StreamUtilsTest + [junit] Tests run: 6, Failures: 0, Errors: 0, Time elapsed: 0,316 sec + [junit] + [junit] Testsuite: org.apache.lucene.benchmark.byTask.feeds.TrecContentSourceTest + [junit] Tests run: 6, Failures: 0, Errors: 0, Time elapsed: 0,854 sec + [junit] + [junit] Testsuite: org.apache.lucene.benchmark.byTask.utils.TestConfig + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,031 sec + [junit] + [junit] Testsuite: org.apache.lucene.benchmark.byTask.tasks.alt.AltPackageTaskTest + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0,646 sec + [junit] + [junit] Testsuite: org.apache.lucene.benchmark.byTask.TestPerfTasksLogic + [junit] Tests run: 23, Failures: 0, Errors: 0, Time elapsed: 18,759 sec + [junit] + [junit] ------------- Standard Output --------------- + [junit] ------------> starting task: Seq + [junit] 0,74 sec --> main added 1000 docs + [junit] ------------> starting task: Seq + [junit] ------------> starting task: Seq + [junit] + [junit] ------------> Report Sum By Prefix (X) (1 about 1 out of 1012) + [junit] Operation round runCnt recsPerRun rec/s elapsedSec avgUsedMem avgTotalMem + [junit] XSearch_2_Par 0 1 5003 7.250,72 0,69 16.266.736 81.199.104 + [junit] + [junit] ------------> starting task: Seq + [junit] ------------> starting task: Seq + [junit] 0,32 sec --> main added 1000 docs + [junit] ------------> starting task: Seq + [junit] 0,23 sec --> main added 1000 docs + [junit] ------------> starting task: Seq + [junit] ------------> starting task: Seq + [junit] ------------> starting task: Seq + [junit] ------------> starting task: Seq + [junit] ------------> starting task: Seq + [junit] ------------> starting task: Seq + [junit] ------------> starting task: CreateIndex + [junit] ------------> starting task: Seq_Exhaust + [junit] ------------> starting task: Seq_Exhaust + [junit] ------------> starting task: WaitForMerges + [junit] ------------> starting task: CloseIndex + [junit] ------------> starting task: Rounds_2 + [junit] ------------> starting task: Seq + [junit] --> processed (delete) 4 docs + [junit] + [junit] --> Round 0-->1: compound:true-->false doc.term.vector:false-->true + [junit] + [junit] + [junit] --> Round 1-->2: compound:false-->true doc.term.vector:true-->false + [junit] + [junit] ------------> starting task: Seq + [junit] ------------> starting task: Rounds + [junit] + [junit] ------------> Report Sum By (any) Name (4 about 4 out of 5) + [junit] Operation round runCnt recsPerRun rec/s elapsedSec avgUsedMem avgTotalMem + [junit] Rounds 0 1 20 338,98 0,06 2.636.928 114.884.608 + [junit] CreateIndex - - - 0 - - 1 - - - - 0 - - - 0,00 - - 0,00 - 2.284.032 - 114.884.608 + [junit] AddDocs_Exhaust 0 1 20 6.666,67 0,00 2.284.032 114.884.608 + [junit] CloseIndex - - - 0 - - 1 - - - - 0 - - - 0,00 - - 0,00 - 2.636.928 - 114.884.608 + [junit] + [junit] ------------> starting task: Seq + [junit] ------------> starting task: Rounds + [junit] + [junit] ------------> Report Sum By (any) Name (4 about 4 out of 5) + [junit] Operation round runCnt recsPerRun rec/s elapsedSec avgUsedMem avgTotalMem + [junit] Rounds 0 1 22 360,66 0,06 2.503.976 114.884.608 + [junit] CreateIndex - - - 0 - - 1 - - - - 1 - 1.000,00 - - 0,00 - 1.851.344 - 114.884.608 + [junit] AddDocs_Exhaust 0 1 20 10.000,00 0,00 1.851.344 114.884.608 + [junit] CloseIndex - - - 0 - - 1 - - - - 1 - - 500,00 - - 0,00 - 2.204.240 - 114.884.608 + [junit] + [junit] Changed Locale to: null + [junit] + [junit] --> Round 0-->1 + [junit] + [junit] Changed Locale to: root locale + [junit] + [junit] --> Round 0-->1 + [junit] + [junit] Changed Locale to: de + [junit] + [junit] --> Round 0-->1 + [junit] + [junit] Changed Locale to: en_US + [junit] + [junit] --> Round 0-->1 + [junit] + [junit] Changed Locale to: no_NO_NY + [junit] + [junit] --> Round 0-->1 + [junit] + [junit] Changed Locale to: root locale + [junit] Changed Analyzer to: org.apache.lucene.collation.CollationKeyAnalyzer() + [junit] + [junit] --> Round 0-->1 + [junit] + [junit] Changed Locale to: de + [junit] Changed Analyzer to: org.apache.lucene.collation.CollationKeyAnalyzer(de) + [junit] + [junit] --> Round 0-->1 + [junit] + [junit] Changed Locale to: en_US + [junit] Changed Analyzer to: org.apache.lucene.collation.CollationKeyAnalyzer(en_US) + [junit] + [junit] --> Round 0-->1 + [junit] + [junit] Changed Locale to: no_NO_NY + [junit] Changed Analyzer to: org.apache.lucene.collation.CollationKeyAnalyzer(no_NO_NY) + [junit] + [junit] --> Round 0-->1 + [junit] + [junit] ------------> starting task: Seq + [junit] Changed Analyzer to: ShingleAnalyzerWrapper, wrapping ShingleFilter over org.apache.lucene.analysis.standard.StandardAnalyzer + [junit] ------------> starting task: Seq + [junit] Changed Analyzer to: ShingleAnalyzerWrapper, wrapping ShingleFilter over org.apache.lucene.analysis.standard.StandardAnalyzer + [junit] ------------> starting task: Seq + [junit] Changed Analyzer to: ShingleAnalyzerWrapper, wrapping ShingleFilter over org.apache.lucene.analysis.core.WhitespaceAnalyzer + [junit] ------------> starting task: Seq + [junit] Changed Analyzer to: ShingleAnalyzerWrapper, wrapping ShingleFilter over org.apache.lucene.analysis.core.WhitespaceAnalyzer + [junit] ------------- ---------------- --------------- + [junit] Testsuite: org.apache.lucene.benchmark.byTask.feeds.demohtml.TestHtmlParser + [junit] Tests run: 11, Failures: 0, Errors: 0, Time elapsed: 0,141 sec + [junit] + [junit] ------------- Standard Error ----------------- + [junit] WARNING: test method: 'testTitle' left thread running: Thread[Thread-197,5,] + [junit] RESOURCE LEAK: test method: 'testTitle' left 1 thread(s) running + [junit] NOTE: reproduce with: ant test -Dtestcase=TestHtmlParser -Dtestmethod=testTitle -Dtests.seed=-7700909348047933316:-5235436117095486309 + [junit] ------------- ---------------- --------------- + [junit] Testsuite: org.apache.lucene.benchmark.quality.TestQualityRun + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 5,972 sec + [junit] + [junit] ------------- Standard Output --------------- + [junit] ------------> starting task: Seq + [junit] ------------- ---------------- --------------- + [junit] Testsuite: org.apache.lucene.benchmark.byTask.tasks.CreateIndexTaskTest + [junit] Tests run: 5, Failures: 0, Errors: 0, Time elapsed: 0,866 sec + [junit] + [junit] Testsuite: org.apache.lucene.benchmark.byTask.tasks.PerfTaskTest + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,558 sec + [junit] + [junit] Testsuite: org.apache.lucene.benchmark.byTask.tasks.WriteLineDocTaskTest + [junit] Tests run: 10, Failures: 0, Errors: 0, Time elapsed: 0,824 sec + [junit] + +common.test: + [echo] Building grouping... + +common.init: + +build-lucene: + +jflex-uptodate-check: + +jflex-notice: + +javacc-uptodate-check: + +javacc-notice: + [echo] + [echo] One or more of the JavaCC .jj files is newer than its corresponding + [echo] .java file. Run the "javacc" target to regenerate the artifacts. + [echo] + +init: + +clover.setup: + +clover.info: + [echo] + [echo] Clover not found. Code coverage reports disabled. + [echo] + +clover: + +common.compile-core: + +compile-core: + +compile-test-framework: + +compile-test: + [javac] Compiling 1 source file to C:\Users\ntankovic\workspace\Lucene\lucene\build\classes\test + +init: + +test: + [echo] Building grouping... + +common.init: + +build-lucene: + +init: + +compile-test: + [echo] Building grouping... + +common.init: + +build-lucene: + +init: + +clover.setup: + +clover.info: + [echo] + [echo] Clover not found. Code coverage reports disabled. + [echo] + +clover: + +compile-core: + +compile-test-framework: + +common.compile-test: + +compile-tools: + +check-legal-lucene: + [java] Could not find directory:C:\Users\ntankovic\workspace\Lucene\modules\grouping\lib + [java] Could not find directory:C:\Users\ntankovic\workspace\Lucene\modules\grouping\contrib\queries\lib + +validate-lucene: + +junit-mkdir: + +junit-sequential: + +junit-parallel: + [junit] Testsuite: org.apache.lucene.search.grouping.TermAllGroupsCollectorTest + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 5,084 sec + [junit] + [junit] Testsuite: org.apache.lucene.search.grouping.TestGrouping + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 12,062 sec + [junit] + +common.test: + [echo] Building suggest... + +common.init: + +build-lucene: + +jflex-uptodate-check: + +jflex-notice: + +javacc-uptodate-check: + +javacc-notice: + [echo] + [echo] One or more of the JavaCC .jj files is newer than its corresponding + [echo] .java file. Run the "javacc" target to regenerate the artifacts. + [echo] + +init: + +clover.setup: + +clover.info: + [echo] + [echo] Clover not found. Code coverage reports disabled. + [echo] + +clover: + +common.compile-core: + +compile-core: + +compile-test-framework: + +compile-test: + [javac] Compiling 1 source file to C:\Users\ntankovic\workspace\Lucene\lucene\build\classes\test + +init: + +test: + [echo] Building suggest... + +common.init: + +build-lucene: + +init: + +compile-test: + [echo] Building suggest... + +compile-analyzers-common: + +common.init: + +build-lucene: + +init: + +clover.setup: + +clover.info: + [echo] + [echo] Clover not found. Code coverage reports disabled. + [echo] + +clover: + +common.compile-core: + +compile-core: + +compile-test-framework: + +common.compile-test: + +compile-tools: + +check-legal-lucene: + [java] Could not find directory:C:\Users\ntankovic\workspace\Lucene\modules\suggest\lib + [java] Could not find directory:C:\Users\ntankovic\workspace\Lucene\modules\suggest\contrib\queries\lib + +validate-lucene: + +junit-mkdir: + +junit-sequential: + +junit-parallel: + [junit] Testsuite: org.apache.lucene.search.spell.TestJaroWinklerDistance + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,253 sec + [junit] + [junit] Testsuite: org.apache.lucene.search.suggest.PersistenceTest + [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 0,39 sec + [junit] + [junit] Testsuite: org.apache.lucene.search.spell.TestDirectSpellChecker + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 2,81 sec + [junit] + [junit] Testsuite: org.apache.lucene.search.suggest.LookupBenchmarkTest + [junit] Tests run: 0, Failures: 0, Errors: 0, Time elapsed: 0,01 sec + [junit] + [junit] ------------- Standard Error ----------------- + [junit] NOTE: Ignoring test class 'LookupBenchmarkTest': COMMENT ME TO RUN BENCHMARKS! + [junit] ------------- ---------------- --------------- + [junit] Testsuite: org.apache.lucene.search.spell.TestLevenshteinDistance + [junit] Tests run: 2, Failures: 0, Errors: 0, Time elapsed: 0,29 sec + [junit] + [junit] Testsuite: org.apache.lucene.search.spell.TestLuceneDictionary + [junit] Tests run: 6, Failures: 0, Errors: 0, Time elapsed: 1,571 sec + [junit] + [junit] Testsuite: org.apache.lucene.search.spell.TestNGramDistance + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 0,246 sec + [junit] + [junit] Testsuite: org.apache.lucene.search.spell.TestPlainTextDictionary + [junit] Tests run: 1, Failures: 0, Errors: 0, Time elapsed: 0,601 sec + [junit] + [junit] Testsuite: org.apache.lucene.search.suggest.fst.FSTLookupTest + [junit] Tests run: 8, Failures: 0, Errors: 0, Time elapsed: 4,477 sec + [junit] + [junit] Testsuite: org.apache.lucene.search.spell.TestSpellChecker + [junit] Tests run: 4, Failures: 0, Errors: 0, Time elapsed: 13,892 sec + [junit] + +common.test: + +BUILD SUCCESSFUL +Total time: 3 minutes 12 seconds Index: modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java (revision 1153521) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestClassicAnalyzer.java (working copy) @@ -3,8 +3,8 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.standard.ClassicAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.IndexReader; Index: modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java (revision 1153521) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/core/TestKeywordAnalyzer.java (working copy) @@ -22,10 +22,10 @@ import org.apache.lucene.analysis.BaseTokenStreamTestCase; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.StringField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; Index: modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java (revision 1153521) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/miscellaneous/TestLimitTokenCountAnalyzer.java (working copy) @@ -25,8 +25,8 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; Index: modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java (revision 1153521) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/query/QueryAutoStopWordAnalyzerTest.java (working copy) @@ -28,9 +28,9 @@ import org.apache.lucene.analysis.core.LetterTokenizer; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; Index: modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (revision 1153521) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (working copy) @@ -27,9 +27,9 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; Index: modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java (revision 1153521) +++ modules/analysis/common/src/test/org/apache/lucene/analysis/sinks/TestTeeSinkTokenFilter.java (working copy) @@ -27,10 +27,10 @@ import org.apache.lucene.analysis.standard.StandardTokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.TermPositionVector; Index: modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java =================================================================== --- modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java (revision 1153521) +++ modules/analysis/common/src/test/org/apache/lucene/collation/CollationTestBase.java (working copy) @@ -37,11 +37,11 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.StringField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IndexableBinaryStringTools; import org.apache.lucene.util.LuceneTestCase; @@ -249,7 +249,7 @@ StringBuilder buff = new StringBuilder(10); int n = result.length; for (int i = 0 ; i < n ; ++i) { - Document doc = searcher.doc2(result[i].doc); + Document doc = searcher.doc(result[i].doc); IndexableField[] v = doc.getFields("tracer"); for (int j = 0 ; j < v.length ; ++j) { buff.append(v[j].stringValue()); Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocData.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocData.java (revision 1153521) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocData.java (working copy) @@ -20,7 +20,7 @@ import java.util.Date; import java.util.Properties; -import org.apache.lucene.document2.DateTools; +import org.apache.lucene.document.DateTools; /** Output of parsing (e.g. HTML parsing) of an input document. */ public class DocData { Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java (revision 1153521) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/feeds/DocMaker.java (working copy) @@ -32,12 +32,12 @@ import org.apache.lucene.benchmark.byTask.utils.Config; import org.apache.lucene.benchmark.byTask.utils.Format; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.NumericField; -import org.apache.lucene.document2.StringField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.NumericField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; /** * Creates {@link Document} objects. Uses a {@link ContentSource} to generate Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddDocTask.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddDocTask.java (revision 1153521) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddDocTask.java (working copy) @@ -19,7 +19,7 @@ import org.apache.lucene.benchmark.byTask.PerfRunData; import org.apache.lucene.benchmark.byTask.feeds.DocMaker; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; /** * Add a document, optionally with of a certain size. Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/BenchmarkHighlighter.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/BenchmarkHighlighter.java (revision 1153521) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/BenchmarkHighlighter.java (working copy) @@ -18,7 +18,7 @@ */ import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; public abstract class BenchmarkHighlighter { Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java (revision 1153521) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java (working copy) @@ -27,7 +27,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.benchmark.byTask.PerfRunData; import org.apache.lucene.benchmark.byTask.feeds.QueryMaker; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.MultiFields; @@ -99,7 +99,7 @@ Bits delDocs = MultiFields.getDeletedDocs(reader); for (int m = 0; m < reader.maxDoc(); m++) { if (null == delDocs || ! delDocs.get(m)) { - doc = reader.document2(m); + doc = reader.document(m); res += (doc == null ? 0 : 1); } } @@ -144,7 +144,7 @@ System.out.println("numDocs() = " + reader.numDocs()); for(int i=0;i> topGroups = c1.getTopGroups(groupOffset, fillFields); + Collection<SearchGroup<BytesRef>> topGroups = c1.getTopGroups(groupOffset, fillFields); if (topGroups == null) { // No groups matched Index: modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java =================================================================== --- modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java (revision 1153521) +++ modules/grouping/src/test/org/apache/lucene/search/grouping/TermAllGroupsCollectorTest.java (working copy) @@ -18,10 +18,10 @@ */ import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; Index: modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java =================================================================== --- modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java (revision 1153521) +++ modules/grouping/src/test/org/apache/lucene/search/grouping/TestGrouping.java (working copy) @@ -18,12 +18,12 @@ package org.apache.lucene.search.grouping; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.FieldType; -import org.apache.lucene.document2.NumericField; -import org.apache.lucene.document2.StringField; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.NumericField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; Index: modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java =================================================================== --- modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java (revision 1153521) +++ modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java (working copy) @@ -24,9 +24,9 @@ import java.util.List; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.StringField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.StringField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; @@ -388,7 +388,7 @@ SuggestWord sugWord = new SuggestWord(); for (int i = 0; i < stop; i++) { - sugWord.string = indexSearcher.doc2(hits[i].doc).get(F_WORD); // get orig word + sugWord.string = indexSearcher.doc(hits[i].doc).get(F_WORD); // get orig word // don't suggest a word for itself, that would be silly if (sugWord.string.equals(word)) { Index: modules/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java =================================================================== --- modules/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java (revision 1153521) +++ modules/suggest/src/test/org/apache/lucene/search/spell/TestDirectSpellChecker.java (working copy) @@ -19,9 +19,9 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; Index: modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java =================================================================== --- modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java (revision 1153521) +++ modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java (working copy) @@ -22,8 +22,8 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.store.Directory; Index: modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java =================================================================== --- modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java (revision 1153521) +++ modules/suggest/src/test/org/apache/lucene/search/spell/TestSpellChecker.java (working copy) @@ -27,8 +27,8 @@ import java.util.concurrent.TimeUnit; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; Index: solr/data/spellchecker3/segments.gen =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Property changes on: solr\data\spellchecker3\segments.gen ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Index: solr/data/spellchecker3/segments_1 =================================================================== Cannot display: file marked as a binary type. svn:mime-type = application/octet-stream Property changes on: solr\data\spellchecker3\segments_1 ___________________________________________________________________ Added: svn:mime-type + application/octet-stream Index: solr/src/java/org/apache/solr/core/QuerySenderListener.java =================================================================== --- solr/src/java/org/apache/solr/core/QuerySenderListener.java (revision 1153521) +++ solr/src/java/org/apache/solr/core/QuerySenderListener.java (working copy) @@ -65,7 +65,7 @@ if (o instanceof DocList) { DocList docs = (DocList)o; for (DocIterator iter = docs.iterator(); iter.hasNext();) { - newSearcher.doc2(iter.nextDoc()); + newSearcher.doc(iter.nextDoc()); } } } Index: solr/src/java/org/apache/solr/handler/MoreLikeThisHandler.java =================================================================== --- solr/src/java/org/apache/solr/handler/MoreLikeThisHandler.java (revision 1153521) +++ solr/src/java/org/apache/solr/handler/MoreLikeThisHandler.java (working copy) @@ -29,7 +29,7 @@ import java.util.Map; import java.util.regex.Pattern; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.queryParser.ParseException; @@ -342,7 +342,7 @@ public DocListAndSet getMoreLikeThis( int id, int start, int rows, List filters, List terms, int flags ) throws IOException { - Document doc = reader.document2(id); + Document doc = reader.document(id); rawMLTQuery = mlt.like(id); boostedMLTQuery = getBoostedQuery( rawMLTQuery ); if( terms != null ) { @@ -391,7 +391,7 @@ int id = iterator.nextDoc(); DocListAndSet sim = getMoreLikeThis( id, 0, rows, null, null, flags ); - String name = schema.printableUniqueKey( reader.document2( id ) ); + String name = schema.printableUniqueKey( reader.document( id ) ); mlt.add(name, sim.docList); } Index: solr/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java =================================================================== --- solr/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java (revision 1153521) +++ solr/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java (working copy) @@ -33,8 +33,8 @@ import org.slf4j.LoggerFactory; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; @@ -122,7 +122,7 @@ if( docId != null ) { Document doc = null; try { - doc = reader.document2( docId ); + doc = reader.document( docId ); } catch( Exception ex ) {} if( doc == null ) { @@ -321,7 +321,7 @@ if( top.totalHits > 0 ) { // Find a document with this field try { - Document doc = searcher.doc2( top.scoreDocs[0].doc ); + Document doc = searcher.doc( top.scoreDocs[0].doc ); IndexableField fld = doc.getField( fieldName ); if( fld != null ) { f.add( "index", getFieldFlags( fld ) ); Index: solr/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java =================================================================== --- solr/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java (revision 1153521) +++ solr/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java (working copy) @@ -80,7 +80,7 @@ int id = iterator.nextDoc(); int rows = p.getInt( MoreLikeThisParams.DOC_COUNT, 5 ); DocListAndSet sim = mltHelper.getMoreLikeThis( id, 0, rows, null, null, flags ); - String name = schema.printableUniqueKey( searcher.doc2( id ) ); + String name = schema.printableUniqueKey( searcher.doc( id ) ); mlt.add(name, sim.docList); if( dbg != null ){ @@ -92,7 +92,7 @@ DocIterator mltIte = sim.docList.iterator(); while( mltIte.hasNext() ){ int mltid = mltIte.nextDoc(); - String key = schema.printableUniqueKey( searcher.doc2( mltid ) ); + String key = schema.printableUniqueKey( searcher.doc( mltid ) ); explains.add( key, searcher.explain( mltHelper.getRealMLTQuery(), mltid ) ); } docDbg.add( "explain", explains ); Index: solr/src/java/org/apache/solr/handler/component/QueryComponent.java =================================================================== --- solr/src/java/org/apache/solr/handler/component/QueryComponent.java (revision 1153521) +++ solr/src/java/org/apache/solr/handler/component/QueryComponent.java (working copy) @@ -17,7 +17,7 @@ package org.apache.solr.handler.component; -import org.apache.lucene.document2.Field; +import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexReader.ReaderContext; import org.apache.lucene.index.Term; @@ -465,7 +465,7 @@ Sort sort = searcher.weightSort(rb.getSortSpec().getSort()); SortField[] sortFields = sort==null ? new SortField[]{SortField.FIELD_SCORE} : sort.getSort(); NamedList sortVals = new NamedList(); // order is important for the sort fields - org.apache.lucene.document2.FieldType docft = new org.apache.lucene.document2.FieldType(); + org.apache.lucene.document.FieldType docft = new org.apache.lucene.document.FieldType(); docft.setStored(true); Field field = new Field("dummy", docft, ""); // a dummy Field ReaderContext topReaderContext = searcher.getTopReaderContext(); Index: solr/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java =================================================================== --- solr/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java (revision 1153521) +++ solr/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java (working copy) @@ -32,7 +32,7 @@ import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; import org.apache.lucene.search.highlight.*; Index: solr/src/java/org/apache/solr/response/BinaryResponseWriter.java =================================================================== --- solr/src/java/org/apache/solr/response/BinaryResponseWriter.java (revision 1153521) +++ solr/src/java/org/apache/solr/response/BinaryResponseWriter.java (working copy) @@ -16,7 +16,7 @@ */ package org.apache.solr.response; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexableField; import org.apache.solr.common.SolrDocument; import org.apache.solr.common.SolrDocumentList; Index: solr/src/java/org/apache/solr/response/CSVResponseWriter.java =================================================================== --- solr/src/java/org/apache/solr/response/CSVResponseWriter.java (revision 1153521) +++ solr/src/java/org/apache/solr/response/CSVResponseWriter.java (working copy) @@ -19,7 +19,7 @@ import org.apache.commons.csv.CSVPrinter; import org.apache.commons.csv.CSVStrategy; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexableField; import org.apache.solr.common.SolrDocument; import org.apache.solr.common.SolrDocumentList; Index: solr/src/java/org/apache/solr/response/TextResponseWriter.java =================================================================== --- solr/src/java/org/apache/solr/response/TextResponseWriter.java (revision 1153521) +++ solr/src/java/org/apache/solr/response/TextResponseWriter.java (working copy) @@ -21,7 +21,7 @@ import java.io.Writer; import java.util.*; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexableField; import org.apache.solr.common.SolrDocument; import org.apache.solr.common.SolrDocumentList; Index: solr/src/java/org/apache/solr/schema/BinaryField.java =================================================================== --- solr/src/java/org/apache/solr/schema/BinaryField.java (revision 1153521) +++ solr/src/java/org/apache/solr/schema/BinaryField.java (working copy) @@ -20,7 +20,7 @@ import java.io.IOException; import java.nio.ByteBuffer; -import org.apache.lucene.document2.Field; +import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.SortField; import org.apache.lucene.util.BytesRef; @@ -81,7 +81,7 @@ len = buf.length; } - Field f = new org.apache.lucene.document2.BinaryField(field.getName(), buf, offset, len); + Field f = new org.apache.lucene.document.BinaryField(field.getName(), buf, offset, len); f.setBoost(boost); return f; } Index: solr/src/java/org/apache/solr/schema/FieldType.java =================================================================== --- solr/src/java/org/apache/solr/schema/FieldType.java (revision 1153521) +++ solr/src/java/org/apache/solr/schema/FieldType.java (working copy) @@ -21,7 +21,7 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; -import org.apache.lucene.document2.Field; +import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; @@ -249,7 +249,7 @@ } if (val==null) return null; - org.apache.lucene.document2.FieldType newType = new org.apache.lucene.document2.FieldType(); + org.apache.lucene.document.FieldType newType = new org.apache.lucene.document.FieldType(); newType.setIndexed(field.indexed()); newType.setTokenized(field.isTokenized()); newType.setStored(field.stored()); @@ -301,11 +301,11 @@ * Fields per SchemaField * @param name The name of the field * @param val The _internal_ value to index - * @param type {@link org.apache.lucene.document2.FieldType} + * @param type {@link org.apache.lucene.document.FieldType} * @param boost The boost value * @return the {@link org.apache.lucene.index.IndexableField}. */ - protected IndexableField createField(String name, String val, org.apache.lucene.document2.FieldType type, float boost){ + protected IndexableField createField(String name, String val, org.apache.lucene.document.FieldType type, float boost){ Field f = new Field(name, type, val); f.setBoost(boost); return f; Index: solr/src/java/org/apache/solr/schema/IndexSchema.java =================================================================== --- solr/src/java/org/apache/solr/schema/IndexSchema.java (revision 1153521) +++ solr/src/java/org/apache/solr/schema/IndexSchema.java (working copy) @@ -260,7 +260,7 @@ * @return null if this schema has no unique key field * @see #printableUniqueKey */ - public IndexableField getUniqueKeyField(org.apache.lucene.document2.Document doc) { + public IndexableField getUniqueKeyField(org.apache.lucene.document.Document doc) { return doc.getField(uniqueKeyFieldName); // this should return null if name is null } @@ -269,7 +269,7 @@ * the specified Document * @return null if this schema has no unique key field */ - public String printableUniqueKey(org.apache.lucene.document2.Document doc) { + public String printableUniqueKey(org.apache.lucene.document.Document doc) { IndexableField f = doc.getField(uniqueKeyFieldName); return f==null ? null : uniqueKeyFieldType.toExternal(f); } Index: solr/src/java/org/apache/solr/schema/LatLonType.java =================================================================== --- solr/src/java/org/apache/solr/schema/LatLonType.java (revision 1153521) +++ solr/src/java/org/apache/solr/schema/LatLonType.java (working copy) @@ -16,7 +16,7 @@ * limitations under the License. */ -import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader.AtomicReaderContext; Index: solr/src/java/org/apache/solr/schema/PointType.java =================================================================== --- solr/src/java/org/apache/solr/schema/PointType.java (revision 1153521) +++ solr/src/java/org/apache/solr/schema/PointType.java (working copy) @@ -87,7 +87,7 @@ if (field.stored()) { String storedVal = externalVal; // normalize or not? - org.apache.lucene.document2.FieldType customType = new org.apache.lucene.document2.FieldType(); + org.apache.lucene.document.FieldType customType = new org.apache.lucene.document.FieldType(); customType.setStored(true); f[f.length - 1] = createField(field.getName(), storedVal, customType, boost); } Index: solr/src/java/org/apache/solr/schema/TrieField.java =================================================================== --- solr/src/java/org/apache/solr/schema/TrieField.java (revision 1153521) +++ solr/src/java/org/apache/solr/schema/TrieField.java (working copy) @@ -16,7 +16,7 @@ */ package org.apache.solr.schema; -import org.apache.lucene.document2.NumericField; +import org.apache.lucene.document.NumericField; import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.*; import org.apache.lucene.search.cache.CachedArrayCreator; @@ -407,8 +407,8 @@ @Override public String storedToIndexed(IndexableField f) { final BytesRef bytes = new BytesRef(NumericUtils.BUF_SIZE_LONG); - if (f instanceof org.apache.lucene.document2.NumericField) { - final Number val = ((org.apache.lucene.document2.NumericField) f).numericValue(); + if (f instanceof org.apache.lucene.document.NumericField) { + final Number val = ((org.apache.lucene.document.NumericField) f).numericValue(); if (val==null) throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Invalid field contents: "+f.name()); switch (type) { @@ -477,14 +477,14 @@ return null; } - org.apache.lucene.document2.FieldType ft = new org.apache.lucene.document2.FieldType(); + org.apache.lucene.document.FieldType ft = new org.apache.lucene.document.FieldType(); ft.setStored(stored); ft.setTokenized(true); ft.setIndexed(indexed); ft.setOmitNorms(field.omitNorms()); ft.setOmitTermFreqAndPositions(field.omitTf()); - final org.apache.lucene.document2.NumericField f = new org.apache.lucene.document2.NumericField(field.getName(), precisionStep, ft); + final org.apache.lucene.document.NumericField f = new org.apache.lucene.document.NumericField(field.getName(), precisionStep, ft); switch (type) { case INTEGER: int i = (value instanceof Number) Index: solr/src/java/org/apache/solr/search/SolrIndexSearcher.java =================================================================== --- solr/src/java/org/apache/solr/search/SolrIndexSearcher.java (revision 1153521) +++ solr/src/java/org/apache/solr/search/SolrIndexSearcher.java (working copy) @@ -17,10 +17,10 @@ package org.apache.solr.search; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.FieldSelector; -import org.apache.lucene.document2.FieldSelectorResult; -import org.apache.lucene.document2.FieldSelectorVisitor; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldSelector; +import org.apache.lucene.document.FieldSelectorResult; +import org.apache.lucene.document.FieldSelectorVisitor; import org.apache.lucene.index.*; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.search.*; @@ -403,7 +403,7 @@ * Retrieve the {@link Document} instance corresponding to the document id. */ @Override - public Document doc2(int i) throws IOException { + public Document doc(int i) throws IOException { return doc(i, (Set)null); } @@ -432,7 +432,7 @@ } if(!enableLazyFieldLoading || fields == null) { - d = getIndexReader().document2(i); + d = getIndexReader().document(i); } else { final FieldSelectorVisitor visitor = new FieldSelectorVisitor(new SetNonLazyFieldSelector(fields)); getIndexReader().document(i, visitor); Index: solr/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java =================================================================== --- solr/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java (revision 1153521) +++ solr/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java (working copy) @@ -24,9 +24,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.lucene.document2.Document; -import org.apache.lucene.document2.Field; -import org.apache.lucene.document2.TextField; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.search.spell.HighFrequencyDictionary; import org.apache.lucene.search.spell.PlainTextDictionary; import org.apache.lucene.store.RAMDirectory; Index: solr/src/java/org/apache/solr/update/AddUpdateCommand.java =================================================================== --- solr/src/java/org/apache/solr/update/AddUpdateCommand.java (revision 1153521) +++ solr/src/java/org/apache/solr/update/AddUpdateCommand.java (working copy) @@ -17,7 +17,7 @@ package org.apache.solr.update; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.solr.common.SolrInputDocument; Index: solr/src/java/org/apache/solr/update/DocumentBuilder.java =================================================================== --- solr/src/java/org/apache/solr/update/DocumentBuilder.java (revision 1153521) +++ solr/src/java/org/apache/solr/update/DocumentBuilder.java (working copy) @@ -21,7 +21,7 @@ import java.util.HashMap; import java.util.List; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.index.IndexableField; import org.apache.solr.common.SolrDocument; import org.apache.solr.common.SolrException; Index: solr/src/java/org/apache/solr/update/UpdateHandler.java =================================================================== --- solr/src/java/org/apache/solr/update/UpdateHandler.java (revision 1153521) +++ solr/src/java/org/apache/solr/update/UpdateHandler.java (working copy) @@ -21,7 +21,7 @@ import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.search.Collector; import org.apache.lucene.search.Scorer; Index: solr/src/java/org/apache/solr/util/SolrPluginUtils.java =================================================================== --- solr/src/java/org/apache/solr/util/SolrPluginUtils.java (revision 1153521) +++ solr/src/java/org/apache/solr/util/SolrPluginUtils.java (working copy) @@ -17,7 +17,7 @@ package org.apache.solr.util; -import org.apache.lucene.document2.Document; +import org.apache.lucene.document.Document; import org.apache.lucene.queryParser.ParseException; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.*; @@ -335,7 +335,7 @@ for (int i=0; i