Index: lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java =================================================================== --- lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java (revision 1148487) +++ lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java (working copy) @@ -19,9 +19,11 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.NumericField; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.NumericField; +import org.apache.lucene.document2.StringField; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.index.IndexWriterConfig; @@ -168,8 +170,7 @@ // field that is indexed (i.e. searchable), but don't tokenize // the field into separate words and don't index term frequency // or positional information: - Field pathField = new Field("path", file.getPath(), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); - pathField.setOmitTermFreqAndPositions(true); + Field pathField = new Field("path", StringField.TYPE_STORED, file.getPath()); doc.add(pathField); // Add the last modified date of the file a field named "modified". @@ -187,7 +188,7 @@ // so that the text of the file is tokenized and indexed, but not stored. // Note that FileReader expects the file to be in UTF-8 encoding. // If that's not the case searching for special characters will fail. - doc.add(new Field("contents", new BufferedReader(new InputStreamReader(fis, "UTF-8")))); + doc.add(new TextField("contents", new BufferedReader(new InputStreamReader(fis, "UTF-8")))); if (writer.getConfig().getOpenMode() == OpenMode.CREATE) { // New index, so we just add the document (no old document can be there): Index: lucene/contrib/demo/src/java/org/apache/lucene/demo/SearchFiles.java =================================================================== --- lucene/contrib/demo/src/java/org/apache/lucene/demo/SearchFiles.java (revision 1148487) +++ lucene/contrib/demo/src/java/org/apache/lucene/demo/SearchFiles.java (working copy) @@ -26,7 +26,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -175,7 +175,7 @@ continue; } - Document doc = searcher.doc(hits[i].doc); + Document doc = searcher.doc2(hits[i].doc); String path = doc.get("path"); if (path != null) { System.out.println((i+1) + ". " + path); Index: lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java =================================================================== --- lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java (revision 1148487) +++ lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java (working copy) @@ -31,7 +31,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.TermFreqVector; import org.apache.lucene.index.TermPositionVector; @@ -64,6 +64,7 @@ * {@link org.apache.lucene.document.Document} * @throws IOException if there was an error loading */ + public static TokenStream getAnyTokenStream(IndexReader reader, int docId, String field, Document doc, Analyzer analyzer) throws IOException { TokenStream ts = null; @@ -269,7 +270,7 @@ // convenience method public static TokenStream getTokenStream(IndexReader reader, int docId, String field, Analyzer analyzer) throws IOException { - Document doc = reader.document(docId); + Document doc = reader.document2(docId); return getTokenStream(doc, field, analyzer); } Index: lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java =================================================================== --- lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java (revision 1148487) +++ lucene/contrib/highlighter/src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java (working copy) @@ -21,7 +21,9 @@ import java.util.ArrayList; import java.util.List; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.TextField; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.StoredFieldVisitor; @@ -117,8 +119,11 @@ if (fieldInfo.name.equals(fieldName)) { final byte[] b = new byte[numUTF8Bytes]; in.readBytes(b, 0, b.length); - Field.TermVector termVector = Field.TermVector.toTermVector(fieldInfo.storeTermVector, fieldInfo.storeOffsetWithTermVector, fieldInfo.storePositionWithTermVector); - fields.add(new Field(fieldInfo.name, false, new String(b, "UTF-8"), Field.Store.YES, Field.Index.ANALYZED, termVector)); + FieldType ft = new FieldType(TextField.TYPE_STORED); + ft.setStoreTermVectors(fieldInfo.storeTermVector); + ft.setStoreTermVectorOffsets(fieldInfo.storeOffsetWithTermVector); + ft.setStoreTermVectorPositions(fieldInfo.storePositionWithTermVector); + fields.add(new Field(fieldInfo.name, ft, new String(b, "UTF-8"))); } else { in.seek(in.getFilePointer() + numUTF8Bytes); } Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java =================================================================== --- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (revision 1148487) +++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (working copy) @@ -112,7 +112,7 @@ for (int i = 0; i < hits.scoreDocs.length; i++) { - org.apache.lucene.document.Document doc = searcher.doc(hits.scoreDocs[i].doc); + Document doc = searcher.doc2(hits.scoreDocs[i].doc); String storedField = doc.get(FIELD_NAME); TokenStream stream = TokenSources.getAnyTokenStream(searcher @@ -1568,7 +1568,7 @@ TopDocs hits = searcher.search(query, null, 10); for( int i = 0; i < hits.totalHits; i++ ){ - org.apache.lucene.document.Document doc = searcher.doc( hits.scoreDocs[i].doc ); + Document doc = searcher.doc2( hits.scoreDocs[i].doc ); String result = h.getBestFragment( a, "t_text1", doc.get( "t_text1" )); if (VERBOSE) System.out.println("result:" + result); assertEquals("more random words for second field", result); Index: lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java =================================================================== --- lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (revision 1148487) +++ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (working copy) @@ -275,12 +275,17 @@ public Document document(int n) throws IOException { return getIndex().getDocumentsByNumber()[n].getDocument(); } + + @Override + public org.apache.lucene.document2.Document document2(int n) throws IOException { + return getIndex().getDocumentsByNumber()[n].getDocument2(); + } @Override public void document(int docID, StoredFieldVisitor visitor) throws IOException { throw new UnsupportedOperationException(); } - + /** * never ever touch these values. it is the true values, unless norms have * been touched. Index: lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java =================================================================== --- lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (revision 1148487) +++ lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (working copy) @@ -1229,7 +1229,7 @@ if (DEBUG) System.err.println("MemoryIndexReader.document"); // no-op: there are no stored fields } - + @Override public boolean hasDeletions() { if (DEBUG) System.err.println("MemoryIndexReader.hasDeletions"); Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelector.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelector.java (revision 0) +++ lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelector.java (revision 0) @@ -0,0 +1,33 @@ +package org.apache.lucene.document2; + +/** + * Copyright 2004 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Similar to a {@link java.io.FileFilter}, the FieldSelector allows one to make decisions about + * what Fields get loaded on a {@link Document} by {@link FieldSelectorVisitor} + * + **/ +public interface FieldSelector { + + /** + * + * @param fieldName the field to accept or reject + * @return an instance of {@link FieldSelectorResult} + * if the {@link Field} named fieldName should be loaded. + */ + FieldSelectorResult accept(String fieldName); +} Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorResult.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorResult.java (revision 0) +++ lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorResult.java (revision 0) @@ -0,0 +1,76 @@ +package org.apache.lucene.document2; + +/** + * Copyright 2004 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Provides information about what should be done with this Field + * + **/ +public enum FieldSelectorResult { + + /** + * Load this {@link Field} every time the {@link Document} is loaded, reading in the data as it is encountered. + * {@link Document#getField(String)} and {@link Document#getFieldable(String)} should not return null. + *

+ * {@link Document#add(Fieldable)} should be called by the Reader. + */ + LOAD, + + /** + * Lazily load this {@link Field}. This means the {@link Field} is valid, but it may not actually contain its data until + * invoked. {@link Document#getField(String)} SHOULD NOT BE USED. {@link Document#getFieldable(String)} is safe to use and should + * return a valid instance of a {@link Fieldable}. + *

+ * {@link Document#add(Fieldable)} should be called by the Reader. + */ + LAZY_LOAD, + + /** + * Do not load the {@link Field}. {@link Document#getField(String)} and {@link Document#getFieldable(String)} should return null. + * {@link Document#add(Fieldable)} is not called. + *

+ * {@link Document#add(Fieldable)} should not be called by the Reader. + */ + NO_LOAD, + + /** + * Load this field as in the {@link #LOAD} case, but immediately return from {@link Field} loading for the {@link Document}. Thus, the + * Document may not have its complete set of Fields. {@link Document#getField(String)} and {@link Document#getFieldable(String)} should + * both be valid for this {@link Field} + *

+ * {@link Document#add(Fieldable)} should be called by the Reader. + */ + LOAD_AND_BREAK, + + /** Expert: Load the size of this {@link Field} rather than its value. + * Size is measured as number of bytes required to store the field == bytes for a binary or any compressed value, and 2*chars for a String value. + * The size is stored as a binary value, represented as an int in a byte[], with the higher order byte first in [0] + */ + SIZE, + + /** Expert: Like {@link #SIZE} but immediately break from the field loading loop, i.e., stop loading further fields, after the size is loaded */ + SIZE_AND_BREAK, + + /** + * Lazily load this {@link Field}, but do not cache the result. This means the {@link Field} is valid, but it may not actually contain its data until + * invoked. {@link Document#getField(String)} SHOULD NOT BE USED. {@link Document#getFieldable(String)} is safe to use and should + * return a valid instance of a {@link Fieldable}. + *

+ * {@link Document#add(Fieldable)} should be called by the Reader. + */ + LATENT +} Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorVisitor.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorVisitor.java (revision 0) +++ lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorVisitor.java (revision 0) @@ -0,0 +1,329 @@ +package org.apache.lucene.document2; + +/** + * Copyright 2004 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.io.Reader; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.document.NumericField.DataType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldReaderException; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.BytesRef; + +/** Create this, passing a legacy {@link FieldSelector} to it, then + * pass this class to {@link IndexReader#document(int, + * StoredFieldVisitor)}, then call {@link #getDocument} to + * retrieve the loaded document. + + *

NOTE: If you use Lazy fields, you should not + * access the returned document after the reader has been + * closed! + */ + +public class FieldSelectorVisitor extends StoredFieldVisitor { + + private final FieldSelector selector; + private final Document doc; + + public FieldSelectorVisitor(FieldSelector selector) { + this.selector = selector; + doc = new Document(); + } + + public Document getDocument() { + return doc; + } + + @Override + public boolean binaryField(FieldInfo fieldInfo, IndexInput in, int numBytes) throws IOException { + final FieldSelectorResult accept = selector.accept(fieldInfo.name); + switch (accept) { + case LOAD: + case LOAD_AND_BREAK: + final byte[] b = new byte[numBytes]; + in.readBytes(b, 0, b.length); + doc.add(new BinaryField(fieldInfo.name, b)); + return accept != FieldSelectorResult.LOAD; + case LAZY_LOAD: + case LATENT: + addFieldLazy(in, fieldInfo, true, accept == FieldSelectorResult.LAZY_LOAD, numBytes); + return false; + case SIZE: + case SIZE_AND_BREAK: + in.seek(in.getFilePointer() + numBytes); + addFieldSize(fieldInfo, numBytes); + return accept != FieldSelectorResult.SIZE; + default: + // skip + in.seek(in.getFilePointer() + numBytes); + return false; + } + } + + @Override + public boolean stringField(FieldInfo fieldInfo, IndexInput in, int numUTF8Bytes) throws IOException { + final FieldSelectorResult accept = selector.accept(fieldInfo.name); + switch (accept) { + case LOAD: + case LOAD_AND_BREAK: + final byte[] b = new byte[numUTF8Bytes]; + in.readBytes(b, 0, b.length); + FieldType ft = new FieldType(TextField.TYPE_STORED); + ft.setStoreTermVectors(fieldInfo.storeTermVector); + ft.setStoreTermVectorOffsets(fieldInfo.storeOffsetWithTermVector); + ft.setStoreTermVectorPositions(fieldInfo.storePositionWithTermVector); + doc.add(new Field(fieldInfo.name, ft, new String(b, "UTF-8"))); + return accept != FieldSelectorResult.LOAD; + case LAZY_LOAD: + case LATENT: + addFieldLazy(in, fieldInfo, false, accept == FieldSelectorResult.LAZY_LOAD, numUTF8Bytes); + return false; + case SIZE: + case SIZE_AND_BREAK: + in.seek(in.getFilePointer() + numUTF8Bytes); + addFieldSize(fieldInfo, 2*numUTF8Bytes); + return accept != FieldSelectorResult.SIZE; + default: + // skip + in.seek(in.getFilePointer() + numUTF8Bytes); + return false; + } + } + + @Override + public boolean intField(FieldInfo fieldInfo, int value) throws IOException { + FieldType ft = new FieldType(NumericField.TYPE_STORED); + ft.setIndexed(fieldInfo.isIndexed); + ft.setOmitNorms(fieldInfo.omitNorms); + ft.setOmitTermFreqAndPositions(fieldInfo.omitTermFreqAndPositions); + return addNumericField(fieldInfo, new NumericField(fieldInfo.name, ft).setIntValue(value)); + } + + @Override + public boolean longField(FieldInfo fieldInfo, long value) throws IOException { + FieldType ft = new FieldType(NumericField.TYPE_STORED); + ft.setIndexed(fieldInfo.isIndexed); + ft.setOmitNorms(fieldInfo.omitNorms); + ft.setOmitTermFreqAndPositions(fieldInfo.omitTermFreqAndPositions); + return addNumericField(fieldInfo, new NumericField(fieldInfo.name, ft).setLongValue(value)); + } + + @Override + public boolean floatField(FieldInfo fieldInfo, float value) throws IOException { + FieldType ft = new FieldType(NumericField.TYPE_STORED); + ft.setIndexed(fieldInfo.isIndexed); + ft.setOmitNorms(fieldInfo.omitNorms); + ft.setOmitTermFreqAndPositions(fieldInfo.omitTermFreqAndPositions); + return addNumericField(fieldInfo, new NumericField(fieldInfo.name, ft).setFloatValue(value)); + } + + @Override + public boolean doubleField(FieldInfo fieldInfo, double value) throws IOException { + FieldType ft = new FieldType(NumericField.TYPE_STORED); + ft.setIndexed(fieldInfo.isIndexed); + ft.setOmitNorms(fieldInfo.omitNorms); + ft.setOmitTermFreqAndPositions(fieldInfo.omitTermFreqAndPositions); + return addNumericField(fieldInfo, new NumericField(fieldInfo.name, ft).setDoubleValue(value)); + } + + private boolean addNumericField(FieldInfo fieldInfo, NumericField f) { + doc.add(f); + final FieldSelectorResult accept = selector.accept(fieldInfo.name); + switch (accept) { + case LOAD: + return false; + case LOAD_AND_BREAK: + return true; + case LAZY_LOAD: + case LATENT: + return false; + case SIZE: + return false; + case SIZE_AND_BREAK: + return true; + default: + return false; + } + } + + private void addFieldLazy(IndexInput in, FieldInfo fi, boolean binary, boolean cacheResult, int numBytes) throws IOException { + final IndexableField f; + final long pointer = in.getFilePointer(); + // Need to move the pointer ahead by toRead positions + in.seek(pointer+numBytes); + FieldType ft = new FieldType(); + ft.setStored(true); + ft.setOmitNorms(fi.omitNorms); + ft.setOmitTermFreqAndPositions(fi.omitTermFreqAndPositions); + ft.setLazy(true); + + if (binary) { + f = new LazyField(in, fi.name, ft, numBytes, pointer, binary, cacheResult); + } else { + ft.setStoreTermVectors(fi.storeTermVector); + ft.setStoreTermVectorOffsets(fi.storeOffsetWithTermVector); + ft.setStoreTermVectorPositions(fi.storePositionWithTermVector); + f = new LazyField(in, fi.name, ft, numBytes, pointer, binary, cacheResult); + } + + doc.add(f); + } + + // Add the size of field as a byte[] containing the 4 bytes of the integer byte size (high order byte first; char = 2 bytes) + // Read just the size -- caller must skip the field content to continue reading fields + // Return the size in bytes or chars, depending on field type + private void addFieldSize(FieldInfo fi, int numBytes) throws IOException { + byte[] sizebytes = new byte[4]; + sizebytes[0] = (byte) (numBytes>>>24); + sizebytes[1] = (byte) (numBytes>>>16); + sizebytes[2] = (byte) (numBytes>>> 8); + sizebytes[3] = (byte) numBytes ; + doc.add(new BinaryField(fi.name, sizebytes)); + } + + /** + * A Lazy field implementation that defers loading of fields until asked for, instead of when the Document is + * loaded. + */ + private static class LazyField extends Field { + private int toRead; + private long pointer; + private final boolean cacheResult; + private final IndexInput in; + + public LazyField(IndexInput in, String name, FieldType ft, int toRead, long pointer, boolean isBinary, boolean cacheResult) { + super(name, ft); + this.in = in; + this.toRead = toRead; + this.pointer = pointer; + this.isBinary = isBinary; + this.cacheResult = cacheResult; + if (isBinary) + binaryLength = toRead; + } + + @Override + public Number numericValue() { + return null; + } + + @Override + public DataType numericDataType() { + return null; + } + + private IndexInput localFieldsStream; + + private IndexInput getFieldStream() { + if (localFieldsStream == null) { + localFieldsStream = (IndexInput) in.clone(); + } + return localFieldsStream; + } + + /** The value of the field as a Reader, or null. If null, the String value, + * binary value, or TokenStream value is used. Exactly one of stringValue(), + * readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */ + public Reader readerValue() { + return null; + } + + /** The value of the field as a TokenStream, or null. If null, the Reader value, + * String value, or binary value is used. Exactly one of stringValue(), + * readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */ + public TokenStream tokenStreamValue() { + return null; + } + + /** The value of the field as a String, or null. If null, the Reader value, + * binary value, or TokenStream value is used. Exactly one of stringValue(), + * readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */ + synchronized public String stringValue() { + if (isBinary) + return null; + else { + if (fieldsData == null) { + String result = null; + IndexInput localFieldsStream = getFieldStream(); + try { + localFieldsStream.seek(pointer); + byte[] bytes = new byte[toRead]; + localFieldsStream.readBytes(bytes, 0, toRead); + result = new String(bytes, "UTF-8"); + } catch (IOException e) { + throw new FieldReaderException(e); + } + if (cacheResult == true){ + fieldsData = result; + } + return result; + } else { + return (String) fieldsData; + } + } + } + + synchronized private byte[] getBinaryValue(byte[] result) { + if (isBinary) { + if (fieldsData == null) { + // Allocate new buffer if result is null or too small + final byte[] b; + if (result == null || result.length < toRead) + b = new byte[toRead]; + else + b = result; + + IndexInput localFieldsStream = getFieldStream(); + + // Throw this IOException since IndexReader.document does so anyway, so probably not that big of a change for people + // since they are already handling this exception when getting the document + try { + localFieldsStream.seek(pointer); + localFieldsStream.readBytes(b, 0, toRead); + } catch (IOException e) { + throw new FieldReaderException(e); + } + + binaryOffset = 0; + binaryLength = toRead; + if (cacheResult == true){ + fieldsData = b; + } + return b; + } else { + return (byte[]) fieldsData; + } + } else + return null; + } + + @Override + public BytesRef binaryValue(BytesRef reuse) { + final byte[] bytes = getBinaryValue(reuse != null ? reuse.bytes : null); + if (bytes != null) { + return new BytesRef(bytes, 0, bytes.length); + } else { + return null; + } + } + } +} \ No newline at end of file Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/LoadFirstFieldSelector.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document2/LoadFirstFieldSelector.java (revision 0) +++ lucene/contrib/misc/src/java/org/apache/lucene/document2/LoadFirstFieldSelector.java (revision 0) @@ -0,0 +1,29 @@ +package org.apache.lucene.document2; +/** + * Copyright 2004 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/** + * Load the First field and break. + *

+ * See {@link FieldSelectorResult#LOAD_AND_BREAK} + */ +public class LoadFirstFieldSelector implements FieldSelector { + + public FieldSelectorResult accept(String fieldName) { + return FieldSelectorResult.LOAD_AND_BREAK; + } +} \ No newline at end of file Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/MapFieldSelector.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document2/MapFieldSelector.java (revision 0) +++ lucene/contrib/misc/src/java/org/apache/lucene/document2/MapFieldSelector.java (revision 0) @@ -0,0 +1,67 @@ +package org.apache.lucene.document2; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * A {@link FieldSelector} based on a Map of field names to {@link FieldSelectorResult}s + * + */ +public class MapFieldSelector implements FieldSelector { + + Map fieldSelections; + + /** Create a a MapFieldSelector + * @param fieldSelections maps from field names (String) to {@link FieldSelectorResult}s + */ + public MapFieldSelector(Map fieldSelections) { + this.fieldSelections = fieldSelections; + } + + /** Create a a MapFieldSelector + * @param fields fields to LOAD. List of Strings. All other fields are NO_LOAD. + */ + public MapFieldSelector(List fields) { + fieldSelections = new HashMap(fields.size()*5/3); + for (final String field : fields) + fieldSelections.put(field, FieldSelectorResult.LOAD); + } + + /** Create a a MapFieldSelector + * @param fields fields to LOAD. All other fields are NO_LOAD. + */ + public MapFieldSelector(String... fields) { + this(Arrays.asList(fields)); + } + + + + /** Load field according to its associated value in fieldSelections + * @param field a field name + * @return the fieldSelections value that field maps to or NO_LOAD if none. + */ + public FieldSelectorResult accept(String field) { + FieldSelectorResult selection = fieldSelections.get(field); + return selection!=null ? selection : FieldSelectorResult.NO_LOAD; + } + +} Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/SetBasedFieldSelector.java =================================================================== --- lucene/contrib/misc/src/java/org/apache/lucene/document2/SetBasedFieldSelector.java (revision 0) +++ lucene/contrib/misc/src/java/org/apache/lucene/document2/SetBasedFieldSelector.java (revision 0) @@ -0,0 +1,60 @@ +package org.apache.lucene.document2; + +/** + * Copyright 2004 The Apache Software Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.Set; + +/** + * Declare what fields to load normally and what fields to load lazily + * + **/ + +public class SetBasedFieldSelector implements FieldSelector { + + private Set fieldsToLoad; + private Set lazyFieldsToLoad; + + /** + * Pass in the Set of {@link Field} names to load and the Set of {@link Field} names to load lazily. If both are null, the + * Document will not have any {@link Field} on it. + * @param fieldsToLoad A Set of {@link String} field names to load. May be empty, but not null + * @param lazyFieldsToLoad A Set of {@link String} field names to load lazily. May be empty, but not null + */ + public SetBasedFieldSelector(Set fieldsToLoad, Set lazyFieldsToLoad) { + this.fieldsToLoad = fieldsToLoad; + this.lazyFieldsToLoad = lazyFieldsToLoad; + } + + /** + * Indicate whether to load the field with the given name or not. If the {@link Field#name()} is not in either of the + * initializing Sets, then {@link org.apache.lucene.document.FieldSelectorResult#NO_LOAD} is returned. If a Field name + * is in both fieldsToLoad and lazyFieldsToLoad, lazy has precedence. + * + * @param fieldName The {@link Field} name to check + * @return The {@link FieldSelectorResult} + */ + public FieldSelectorResult accept(String fieldName) { + FieldSelectorResult result = FieldSelectorResult.NO_LOAD; + if (fieldsToLoad.contains(fieldName) == true){ + result = FieldSelectorResult.LOAD; + } + if (lazyFieldsToLoad.contains(fieldName) == true){ + result = FieldSelectorResult.LAZY_LOAD; + } + return result; + } +} \ No newline at end of file Index: lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java (revision 1148487) +++ lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java (working copy) @@ -152,7 +152,7 @@ writer.close(); IndexReader reader = IndexReader.open(dir, null, true, 1, new AppendingCodecProvider()); assertEquals(2, reader.numDocs()); - org.apache.lucene.document.Document doc2 = reader.document(0); + Document doc2 = reader.document2(0); assertEquals(text, doc2.get("f")); Fields fields = MultiFields.getFields(reader); Terms terms = fields.terms("f"); Index: lucene/contrib/misc/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java =================================================================== --- lucene/contrib/misc/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java (revision 1148487) +++ lucene/contrib/misc/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java (working copy) @@ -23,7 +23,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; Index: lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java =================================================================== --- lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (revision 1148487) +++ lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (working copy) @@ -93,7 +93,7 @@ ScoreDoc[] hits = searcher.search(tq,df, 1000).scoreDocs; for(int i=0;i0); for(int i=0;i0); for(int i=0;i0); for(int i=0;i0)); - org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); + Document doc=searcher.doc2(sd[0].doc); assertEquals("Should match most similar not most rare variant", "2",doc.get("id")); } //Test multiple input words are having variants produced @@ -104,7 +104,7 @@ TopDocs topDocs = searcher.search(flt, 1); ScoreDoc[] sd = topDocs.scoreDocs; assertTrue("score docs must match 1 doc", (sd!=null)&&(sd.length>0)); - org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); + Document doc=searcher.doc2(sd[0].doc); assertEquals("Should match most similar when using 2 words", "2",doc.get("id")); } //Test bug found when first query word does not match anything @@ -119,7 +119,7 @@ TopDocs topDocs = searcher.search(flt, 1); ScoreDoc[] sd = topDocs.scoreDocs; assertTrue("score docs must match 1 doc", (sd!=null)&&(sd.length>0)); - org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); + Document doc=searcher.doc2(sd[0].doc); assertEquals("Should match most similar when using 2 words", "2",doc.get("id")); } Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java =================================================================== --- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (revision 1148487) +++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (working copy) @@ -98,7 +98,7 @@ TopDocs td = searcher.search(q, 10); ScoreDoc[] sd = td.scoreDocs; for (int i = 0; i < sd.length; i++) { - org.apache.lucene.document.Document doc = searcher.doc(sd[i].doc); + Document doc = searcher.doc2(sd[i].doc); String id = doc.get("id"); assertTrue(qString + "matched doc#" + id + " not expected", expecteds .contains(id)); Index: lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java =================================================================== --- lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (revision 1148487) +++ lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (working copy) @@ -284,7 +284,7 @@ assertEquals(2, results); double lastDistance = 0; for(int i =0 ; i < results; i++){ - org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc); + Document d = searcher.doc2(scoreDocs[i].doc); String name = d.get("name"); double rsLat = Double.parseDouble(d.get(latField)); @@ -380,7 +380,7 @@ assertEquals(18, results); double lastDistance = 0; for(int i =0 ; i < results; i++){ - org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc); + Document d = searcher.doc2(scoreDocs[i].doc); String name = d.get("name"); double rsLat = Double.parseDouble(d.get(latField)); double rsLng = Double.parseDouble(d.get(lngField)); @@ -475,7 +475,7 @@ assertEquals(expected[x], results); double lastDistance = 0; for(int i =0 ; i < results; i++){ - org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc); + Document d = searcher.doc2(scoreDocs[i].doc); String name = d.get("name"); double rsLat = Double.parseDouble(d.get(latField)); @@ -570,7 +570,7 @@ assertEquals(expected[x], results); for(int i =0 ; i < results; i++){ - org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc); + Document d = searcher.doc2(scoreDocs[i].doc); String name = d.get("name"); double rsLat = Double.parseDouble(d.get(latField)); Index: lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java =================================================================== --- lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (revision 1148487) +++ lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (working copy) @@ -233,7 +233,7 @@ ScoreDoc[] scoreDocs = hits.scoreDocs; for(int i=0;i fieldsToAdd; + + /** Load only fields named in the provided Set<String>. */ + public Document2StoredFieldVisitor(Set fieldsToAdd) { + this.fieldsToAdd = fieldsToAdd; + } + + /** Load only fields named in the provided Set<String>. */ + public Document2StoredFieldVisitor(String... fields) { + fieldsToAdd = new HashSet(fields.length); + for(String field : fields) { + fieldsToAdd.add(field); + } + } + + /** Load all stored fields. */ + public Document2StoredFieldVisitor() { + this.fieldsToAdd = null; + } + + @Override + public boolean binaryField(FieldInfo fieldInfo, IndexInput in, int numBytes) throws IOException { + if (accept(fieldInfo)) { + final byte[] b = new byte[numBytes]; + in.readBytes(b, 0, b.length); + doc.add(new BinaryField(fieldInfo.name, b)); + } else { + in.seek(in.getFilePointer() + numBytes); + } + return false; + } + + @Override + public boolean stringField(FieldInfo fieldInfo, IndexInput in, int numUTF8Bytes) throws IOException { + if (accept(fieldInfo)) { + final byte[] b = new byte[numUTF8Bytes]; + in.readBytes(b, 0, b.length); + FieldType ft = new FieldType(TextField.TYPE_STORED); + ft.setStoreTermVectors(fieldInfo.storeTermVector); + ft.setStoreTermVectorPositions(fieldInfo.storePositionWithTermVector); + ft.setStoreTermVectorOffsets(fieldInfo.storeOffsetWithTermVector); + ft.setStoreTermVectors(fieldInfo.storeTermVector); + doc.add(new Field(fieldInfo.name, + false, + ft, + new String(b, "UTF-8"))); + } else { + in.seek(in.getFilePointer() + numUTF8Bytes); + } + return false; + } + + @Override + public boolean intField(FieldInfo fieldInfo, int value) { + if (accept(fieldInfo)) { + FieldType ft = new FieldType(NumericField.TYPE_STORED); + ft.setIndexed(fieldInfo.isIndexed); + doc.add(new NumericField(fieldInfo.name, ft).setIntValue(value)); + } + return false; + } + + @Override + public boolean longField(FieldInfo fieldInfo, long value) { + if (accept(fieldInfo)) { + FieldType ft = new FieldType(NumericField.TYPE_STORED); + ft.setIndexed(fieldInfo.isIndexed); + doc.add(new NumericField(fieldInfo.name, ft).setLongValue(value)); + } + return false; + } + + @Override + public boolean floatField(FieldInfo fieldInfo, float value) { + if (accept(fieldInfo)) { + FieldType ft = new FieldType(NumericField.TYPE_STORED); + ft.setIndexed(fieldInfo.isIndexed); + doc.add(new NumericField(fieldInfo.name, ft).setFloatValue(value)); + } + return false; + } + + @Override + public boolean doubleField(FieldInfo fieldInfo, double value) { + if (accept(fieldInfo)) { + FieldType ft = new FieldType(NumericField.TYPE_STORED); + ft.setIndexed(fieldInfo.isIndexed); + doc.add(new NumericField(fieldInfo.name, ft).setDoubleValue(value)); + } + return false; + } + + private boolean accept(FieldInfo fieldInfo) { + return fieldsToAdd == null || fieldsToAdd.contains(fieldInfo.name); + } + + public Document getDocument() { + return doc; + } +} \ No newline at end of file Index: lucene/src/java/org/apache/lucene/index/IndexReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/IndexReader.java (revision 1148487) +++ lucene/src/java/org/apache/lucene/index/IndexReader.java (working copy) @@ -954,7 +954,7 @@ * #document(int)}. If you want to load a subset, use * {@link DocumentStoredFieldVisitor}. */ public abstract void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException; - + // nocommit -- the new document(int docID) API should // clearly advertise that only field types/values are // preserved -- index time metadata like boost, omitNorm, @@ -981,6 +981,13 @@ return visitor.getDocument(); } + public org.apache.lucene.document2.Document document2(int docID) throws CorruptIndexException, IOException { + ensureOpen(); + final Document2StoredFieldVisitor visitor = new Document2StoredFieldVisitor(); + document(docID, visitor); + return visitor.getDocument(); + } + /** Returns true if any documents have been deleted */ public abstract boolean hasDeletions(); Index: lucene/src/java/org/apache/lucene/search/IndexSearcher.java =================================================================== --- lucene/src/java/org/apache/lucene/search/IndexSearcher.java (revision 1148487) +++ lucene/src/java/org/apache/lucene/search/IndexSearcher.java (working copy) @@ -243,11 +243,15 @@ return reader.document(docID); } + public org.apache.lucene.document2.Document doc2(int docID) throws CorruptIndexException, IOException { + return reader.document2(docID); + } + /* Sugar for .getIndexReader().document(docID, fieldVisitor) */ public void doc(int docID, StoredFieldVisitor fieldVisitor) throws CorruptIndexException, IOException { reader.document(docID, fieldVisitor); } - + /** Expert: Set the SimilarityProvider implementation used by this Searcher. * */ Index: lucene/src/test-framework/org/apache/lucene/index/DocHelper.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/index/DocHelper.java (revision 1148487) +++ lucene/src/test-framework/org/apache/lucene/index/DocHelper.java (working copy) @@ -297,8 +297,4 @@ public static int numFields(Document doc) { return doc.size(); } - - public static int numFields2(org.apache.lucene.document.Document doc) { - return doc.getFields().size(); - } } Index: lucene/src/test-framework/org/apache/lucene/util/LineFileDocs.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/util/LineFileDocs.java (revision 1148487) +++ lucene/src/test-framework/org/apache/lucene/util/LineFileDocs.java (working copy) @@ -28,8 +28,11 @@ import java.util.zip.GZIPInputStream; import java.util.Random; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.FieldType; +import org.apache.lucene.document2.StringField; +import org.apache.lucene.document2.TextField; /** Minimal port of contrib/benchmark's LneDocSource + * DocMaker, so tests can enum docs from a line file created @@ -117,19 +120,24 @@ public DocState() { doc = new Document(); - title = new Field("title", "", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS); + title = new StringField("title", ""); doc.add(title); - titleTokenized = new Field("titleTokenized", "", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); + FieldType ft = new FieldType(TextField.TYPE_STORED); + ft.setStoreTermVectors(true); + ft.setStoreTermVectorOffsets(true); + ft.setStoreTermVectorPositions(true); + + titleTokenized = new Field("titleTokenized", ft, ""); doc.add(titleTokenized); - body = new Field("body", "", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); + body = new Field("body", ft, ""); doc.add(body); - id = new Field("docid", "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); + id = new Field("docid", StringField.TYPE_STORED, ""); doc.add(id); - date = new Field("date", "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); + date = new Field("date", StringField.TYPE_STORED, ""); doc.add(date); } } Index: lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (revision 1148487) +++ lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (working copy) @@ -36,10 +36,6 @@ import java.util.regex.Pattern; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; -import org.apache.lucene.document.Field.TermVector; import org.apache.lucene.document2.FieldType; import org.apache.lucene.index.*; import org.apache.lucene.index.codecs.Codec; @@ -1062,18 +1058,6 @@ return dir; } - /** Returns a new field instance. - * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */ - public static Field newField(String name, String value, Index index) { - return newField(random, name, value, index); - } - - /** Returns a new field instance. - * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */ - public static Field newField(String name, String value, Store store, Index index) { - return newField(random, name, value, store, index); - } - public static org.apache.lucene.document2.Field newField(String name, String value, FieldType type) { return newField(random, name, value, type); } @@ -1114,67 +1098,6 @@ return new org.apache.lucene.document2.Field(name, newType, value); } - /** - * Returns a new Field instance. Use this when the test does not - * care about some specific field settings (most tests) - *

- */ - public static Field newField(String name, String value, Store store, Index index, TermVector tv) { - return newField(random, name, value, store, index, tv); - } - - /** Returns a new field instance, using the specified random. - * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */ - public static Field newField(Random random, String name, String value, Index index) { - return newField(random, name, value, Store.NO, index); - } - - /** Returns a new field instance, using the specified random. - * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */ - public static Field newField(Random random, String name, String value, Store store, Index index) { - return newField(random, name, value, store, index, TermVector.NO); - } - - /** Returns a new field instance, using the specified random. - * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */ - public static Field newField(Random random, String name, String value, Store store, Index index, TermVector tv) { - if (usually(random)) { - // most of the time, don't modify the params - return new Field(name, value, store, index, tv); - } - - if (!index.isIndexed()) - return new Field(name, value, store, index, tv); - - if (!store.isStored() && random.nextBoolean()) - store = Store.YES; // randomly store it - - tv = randomTVSetting(random, tv); - - return new Field(name, value, store, index, tv); - } - - static final TermVector tvSettings[] = { - TermVector.NO, TermVector.YES, TermVector.WITH_OFFSETS, - TermVector.WITH_POSITIONS, TermVector.WITH_POSITIONS_OFFSETS - }; - - private static TermVector randomTVSetting(Random random, TermVector minimum) { - switch(minimum) { - case NO: return tvSettings[_TestUtil.nextInt(random, 0, tvSettings.length-1)]; - case YES: return tvSettings[_TestUtil.nextInt(random, 1, tvSettings.length-1)]; - case WITH_OFFSETS: return random.nextBoolean() ? TermVector.WITH_OFFSETS - : TermVector.WITH_POSITIONS_OFFSETS; - case WITH_POSITIONS: return random.nextBoolean() ? TermVector.WITH_POSITIONS - : TermVector.WITH_POSITIONS_OFFSETS; - default: return TermVector.WITH_POSITIONS_OFFSETS; - } - } - /** return a random Locale from the available locales on the system */ public static Locale randomLocale(Random random) { Locale locales[] = Locale.getAvailableLocales(); Index: lucene/src/test/org/apache/lucene/TestDemo.java =================================================================== --- lucene/src/test/org/apache/lucene/TestDemo.java (revision 1148487) +++ lucene/src/test/org/apache/lucene/TestDemo.java (working copy) @@ -72,7 +72,7 @@ assertEquals(1, hits.totalHits); // Iterate through the results: for (int i = 0; i < hits.scoreDocs.length; i++) { - org.apache.lucene.document.Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc); + Document hitDoc = isearcher.doc2(hits.scoreDocs[i].doc); assertEquals(text, hitDoc.get("fieldname")); } Index: lucene/src/test/org/apache/lucene/TestSearch.java =================================================================== --- lucene/src/test/org/apache/lucene/TestSearch.java (revision 1148487) +++ lucene/src/test/org/apache/lucene/TestSearch.java (working copy) @@ -127,7 +127,7 @@ out.println(hits.length + " total results"); for (int i = 0 ; i < hits.length && i < 10; i++) { - org.apache.lucene.document.Document d = searcher.doc(hits[i].doc); + Document d = searcher.doc2(hits[i].doc); out.println(i + " " + hits[i].score + " " + d.get("contents")); } } Index: lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java =================================================================== --- lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java (revision 1148487) +++ lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java (working copy) @@ -142,7 +142,7 @@ out.println(hits.length + " total results\n"); for (int i = 0 ; i < hits.length; i++) { if ( i < 10 || (i > 94 && i < 105) ) { - org.apache.lucene.document.Document d = searcher.doc(hits[i].doc); + Document d = searcher.doc2(hits[i].doc); out.println(i + " " + d.get(ID_FIELD)); } } @@ -152,7 +152,7 @@ assertEquals("total results", expectedCount, hits.length); for (int i = 0 ; i < hits.length; i++) { if (i < 10 || (i > 94 && i < 105) ) { - org.apache.lucene.document.Document d = searcher.doc(hits[i].doc); + Document d = searcher.doc2(hits[i].doc); assertEquals("check " + i, String.valueOf(i), d.get(ID_FIELD)); } } Index: lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (revision 1148487) +++ lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (working copy) @@ -31,7 +31,6 @@ import org.apache.lucene.document2.Field; import org.apache.lucene.document2.FieldType; import org.apache.lucene.document2.TextField; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.document2.NumericField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.DefaultSimilarity; @@ -287,12 +286,12 @@ for(int i=0;i<35;i++) { if (!delDocs.get(i)) { - org.apache.lucene.document.Document d = reader.document(i); - List fields = d.getFields(); + Document d = reader.document2(i); + List fields = d.getFields(); if (d.getField("content3") == null) { final int numFields = 5; assertEquals(numFields, fields.size()); - org.apache.lucene.document.Field f = d.getField("id"); + IndexableField f = d.getField("id"); assertEquals(""+i, f.stringValue()); f = d.getField("utf8"); @@ -320,7 +319,7 @@ // First document should be #21 since it's norm was // increased: - org.apache.lucene.document.Document d = searcher.getIndexReader().document(hits[0].doc); + Document d = searcher.getIndexReader().document2(hits[0].doc); assertEquals("didn't get the right document first", "21", d.get("id")); doTestHits(hits, 34, searcher.getIndexReader()); @@ -366,7 +365,7 @@ // make sure searching sees right # hits IndexSearcher searcher = new IndexSearcher(dir, true); ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; - org.apache.lucene.document.Document d = searcher.getIndexReader().document(hits[0].doc); + Document d = searcher.getIndexReader().document2(hits[0].doc); assertEquals("wrong first document", "21", d.get("id")); doTestHits(hits, 44, searcher.getIndexReader()); searcher.close(); @@ -385,7 +384,7 @@ searcher = new IndexSearcher(dir, true); hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; assertEquals("wrong number of hits", 43, hits.length); - d = searcher.doc(hits[0].doc); + d = searcher.doc2(hits[0].doc); assertEquals("wrong first document", "22", d.get("id")); doTestHits(hits, 43, searcher.getIndexReader()); searcher.close(); @@ -398,7 +397,7 @@ searcher = new IndexSearcher(dir, true); hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; assertEquals("wrong number of hits", 43, hits.length); - d = searcher.doc(hits[0].doc); + d = searcher.doc2(hits[0].doc); doTestHits(hits, 43, searcher.getIndexReader()); assertEquals("wrong first document", "22", d.get("id")); searcher.close(); @@ -414,7 +413,7 @@ IndexSearcher searcher = new IndexSearcher(dir, true); ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; assertEquals("wrong number of hits", 34, hits.length); - org.apache.lucene.document.Document d = searcher.doc(hits[0].doc); + Document d = searcher.doc2(hits[0].doc); assertEquals("wrong first document", "21", d.get("id")); searcher.close(); @@ -430,7 +429,7 @@ searcher = new IndexSearcher(dir, true); hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; assertEquals("wrong number of hits", 33, hits.length); - d = searcher.doc(hits[0].doc); + d = searcher.doc2(hits[0].doc); assertEquals("wrong first document", "22", d.get("id")); doTestHits(hits, 33, searcher.getIndexReader()); searcher.close(); @@ -443,7 +442,7 @@ searcher = new IndexSearcher(dir, true); hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; assertEquals("wrong number of hits", 33, hits.length); - d = searcher.doc(hits[0].doc); + d = searcher.doc2(hits[0].doc); assertEquals("wrong first document", "22", d.get("id")); doTestHits(hits, 33, searcher.getIndexReader()); searcher.close(); @@ -684,12 +683,12 @@ for (int id=10; id<15; id++) { ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", 4, Integer.valueOf(id), Integer.valueOf(id), true, true), 100).scoreDocs; assertEquals("wrong number of hits", 1, hits.length); - org.apache.lucene.document.Document d = searcher.doc(hits[0].doc); + Document d = searcher.doc2(hits[0].doc); assertEquals(String.valueOf(id), d.get("id")); hits = searcher.search(NumericRangeQuery.newLongRange("trieLong", 4, Long.valueOf(id), Long.valueOf(id), true, true), 100).scoreDocs; assertEquals("wrong number of hits", 1, hits.length); - d = searcher.doc(hits[0].doc); + d = searcher.doc2(hits[0].doc); assertEquals(String.valueOf(id), d.get("id")); } Index: lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java (revision 1148487) +++ lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java (working copy) @@ -81,12 +81,12 @@ sis.read(dir); IndexReader reader = openReader(); assertTrue(reader != null); - org.apache.lucene.document.Document newDoc1 = reader.document(0); + Document newDoc1 = reader.document2(0); assertTrue(newDoc1 != null); - assertTrue(DocHelper.numFields2(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size()); - org.apache.lucene.document.Document newDoc2 = reader.document(1); + assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size()); + Document newDoc2 = reader.document2(1); assertTrue(newDoc2 != null); - assertTrue(DocHelper.numFields2(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); + assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); TermFreqVector vector = reader.getTermFreqVector(0, DocHelper.TEXT_FIELD_2_KEY); assertTrue(vector != null); TestSegmentReader.checkNorms(reader); Index: lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (revision 1148487) +++ lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (working copy) @@ -28,7 +28,6 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.document2.Document; import org.apache.lucene.document2.Field; import org.apache.lucene.document2.FieldType; @@ -69,19 +68,19 @@ //After adding the document, we should be able to read it back in SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR); assertTrue(reader != null); - org.apache.lucene.document.Document doc = reader.document(0); + Document doc = reader.document2(0); assertTrue(doc != null); //System.out.println("Document: " + doc); - Fieldable [] fields = doc.getFields("textField2"); + IndexableField [] fields = doc.getFields("textField2"); assertTrue(fields != null && fields.length == 1); assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_2_TEXT)); - assertTrue(fields[0].isTermVectorStored()); + assertTrue(fields[0].storeTermVectors()); fields = doc.getFields("textField1"); assertTrue(fields != null && fields.length == 1); assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_1_TEXT)); - assertFalse(fields[0].isTermVectorStored()); + assertFalse(fields[0].storeTermVectors()); fields = doc.getFields("keyField"); assertTrue(fields != null && fields.length == 1); Index: lucene/src/test/org/apache/lucene/index/TestFieldsReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (revision 1148487) +++ lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (working copy) @@ -22,9 +22,9 @@ import java.util.*; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.NumericField; -import org.apache.lucene.document.Fieldable; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.NumericField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.FieldCache; import org.apache.lucene.store.BufferedIndexInput; @@ -67,32 +67,32 @@ assertTrue(dir != null); assertTrue(fieldInfos != null); IndexReader reader = IndexReader.open(dir); - Document doc = reader.document(0); + Document doc = reader.document2(0); assertTrue(doc != null); assertTrue(doc.getField(DocHelper.TEXT_FIELD_1_KEY) != null); - Fieldable field = doc.getField(DocHelper.TEXT_FIELD_2_KEY); + Field field = (Field) doc.getField(DocHelper.TEXT_FIELD_2_KEY); assertTrue(field != null); - assertTrue(field.isTermVectorStored() == true); + assertTrue(field.storeTermVectors() == true); - assertTrue(field.isStoreOffsetWithTermVector() == true); - assertTrue(field.isStorePositionWithTermVector() == true); + assertTrue(field.storeTermVectorOffsets() == true); + assertTrue(field.storeTermVectorPositions() == true); - field = doc.getField(DocHelper.TEXT_FIELD_3_KEY); + field = (Field) doc.getField(DocHelper.TEXT_FIELD_3_KEY); assertTrue(field != null); - assertTrue(field.isTermVectorStored() == false); - assertTrue(field.isStoreOffsetWithTermVector() == false); - assertTrue(field.isStorePositionWithTermVector() == false); + assertTrue(field.storeTermVectors() == false); + assertTrue(field.storeTermVectorOffsets() == false); + assertTrue(field.storeTermVectorPositions() == false); - field = doc.getField(DocHelper.NO_TF_KEY); + field = (Field) doc.getField(DocHelper.NO_TF_KEY); assertTrue(field != null); - assertTrue(field.isTermVectorStored() == false); - assertTrue(field.isStoreOffsetWithTermVector() == false); - assertTrue(field.isStorePositionWithTermVector() == false); + assertTrue(field.storeTermVectors() == false); + assertTrue(field.storeTermVectorOffsets() == false); + assertTrue(field.storeTermVectorPositions() == false); - DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(DocHelper.TEXT_FIELD_3_KEY); + Document2StoredFieldVisitor visitor = new Document2StoredFieldVisitor(DocHelper.TEXT_FIELD_3_KEY); reader.document(0, visitor); - final List fields = visitor.getDocument().getFields(); + final List fields = visitor.getDocument().getFields(); assertEquals(1, fields.size()); assertEquals(DocHelper.TEXT_FIELD_3_KEY, fields.get(0).name()); @@ -229,25 +229,25 @@ RandomIndexWriter w = new RandomIndexWriter(random, dir); final int numDocs = atLeast(500); final Number[] answers = new Number[numDocs]; - final NumericField.DataType[] typeAnswers = new NumericField.DataType[numDocs]; + final org.apache.lucene.document.NumericField.DataType[] typeAnswers = new org.apache.lucene.document.NumericField.DataType[numDocs]; for(int id=0;id fieldable1 = doc1.getFields(); - List fieldable2 = doc2.getFields(); + Document doc1 = index1.document2(i); + Document doc2 = index2.document2(i); + List fieldable1 = doc1.getFields(); + List fieldable2 = doc2.getFields(); assertEquals("Different numbers of fields for doc " + i + ".", fieldable1.size(), fieldable2.size()); - Iterator itField1 = fieldable1.iterator(); - Iterator itField2 = fieldable2.iterator(); + Iterator itField1 = fieldable1.iterator(); + Iterator itField2 = fieldable2.iterator(); while (itField1.hasNext()) { - org.apache.lucene.document.Field curField1 = (org.apache.lucene.document.Field) itField1.next(); - org.apache.lucene.document.Field curField2 = (org.apache.lucene.document.Field) itField2.next(); + Field curField1 = (Field) itField1.next(); + Field curField2 = (Field) itField2.next(); assertEquals("Different fields names for doc " + i + ".", curField1.name(), curField2.name()); assertEquals("Different field values for doc " + i + ".", curField1.stringValue(), curField2.stringValue()); } Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (revision 1148487) +++ lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (working copy) @@ -188,7 +188,7 @@ if (i>0) { int k = i-1; int n = j + k*M; - org.apache.lucene.document.Document prevItereationDoc = reader.document(n); + Document prevItereationDoc = reader.document2(n); assertNotNull(prevItereationDoc); String id = prevItereationDoc.get("id"); assertEquals(k+"_"+j, id); Index: lucene/src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (revision 1148487) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -42,7 +42,6 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.document2.BinaryField; import org.apache.lucene.document2.Document; import org.apache.lucene.document2.Field; @@ -1012,8 +1011,8 @@ w.close(); IndexReader ir = IndexReader.open(dir, true); - org.apache.lucene.document.Document doc2 = ir.document(0); - org.apache.lucene.document.Field f2 = doc2.getField("binary"); + Document doc2 = ir.document2(0); + IndexableField f2 = doc2.getField("binary"); b = f2.binaryValue(null).bytes; assertTrue(b != null); assertEquals(17, b.length, 17); @@ -1283,8 +1282,8 @@ w.close(); IndexReader ir = IndexReader.open(dir, true); - org.apache.lucene.document.Document doc2 = ir.document(0); - org.apache.lucene.document.Field f3 = doc2.getField("binary"); + Document doc2 = ir.document2(0); + IndexableField f3 = doc2.getField("binary"); b = f3.binaryValue(null).bytes; assertTrue(b != null); assertEquals(17, b.length, 17); @@ -1325,20 +1324,20 @@ doc.add(newField("zzz", "1 2 3", customType)); w.addDocument(doc); IndexReader r = w.getReader(); - org.apache.lucene.document.Document doc2 = r.document(0); - Iterator it = doc2.getFields().iterator(); + Document doc2 = r.document2(0); + Iterator it = doc2.getFields().iterator(); assertTrue(it.hasNext()); - org.apache.lucene.document.Field f = (org.apache.lucene.document.Field) it.next(); + Field f = (Field) it.next(); assertEquals(f.name(), "zzz"); assertEquals(f.stringValue(), "a b c"); assertTrue(it.hasNext()); - f = (org.apache.lucene.document.Field) it.next(); + f = (Field) it.next(); assertEquals(f.name(), "aaa"); assertEquals(f.stringValue(), "a b c"); assertTrue(it.hasNext()); - f = (org.apache.lucene.document.Field) it.next(); + f = (Field) it.next(); assertEquals(f.name(), "zzz"); assertEquals(f.stringValue(), "1 2 3"); assertFalse(it.hasNext()); @@ -1669,7 +1668,7 @@ } TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1); assertEquals(1, hits.totalHits); - org.apache.lucene.document.Document doc = r.document(hits.scoreDocs[0].doc); + Document doc = r.document2(hits.scoreDocs[0].doc); Document docExp = docs.get(testID); for(int i=0;i docIDs = new ArrayList(); final SubDocs subDocs = new SubDocs(packID, docIDs); final List docsList = new ArrayList(); @@ -524,7 +525,7 @@ startDocID = docID; } lastDocID = docID; - final Document doc = s.doc(docID); + final Document doc = s.doc2(docID); assertEquals(subDocs.packID, doc.get("packID")); } Index: lucene/src/test/org/apache/lucene/index/TestParallelReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestParallelReader.java (revision 1148487) +++ lucene/src/test/org/apache/lucene/index/TestParallelReader.java (working copy) @@ -212,8 +212,8 @@ assertEquals(parallelHits.length, singleHits.length); for(int i = 0; i < parallelHits.length; i++) { assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f); - org.apache.lucene.document.Document docParallel = parallel.doc(parallelHits[i].doc); - org.apache.lucene.document.Document docSingle = single.doc(singleHits[i].doc); + Document docParallel = parallel.doc2(parallelHits[i].doc); + Document docSingle = single.doc2(singleHits[i].doc); assertEquals(docParallel.get("f1"), docSingle.get("f1")); assertEquals(docParallel.get("f2"), docSingle.get("f2")); assertEquals(docParallel.get("f3"), docSingle.get("f3")); Index: lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java (revision 1148487) +++ lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java (working copy) @@ -40,14 +40,14 @@ IndexReader r = null; final int numUpdates = (int) (SIZE * (2+random.nextDouble())); for(int docIter=0;docIter= SIZE && random.nextInt(50) == 17) { Index: lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java (revision 1148487) +++ lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java (working copy) @@ -87,13 +87,13 @@ BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR); assertTrue(mergedReader != null); assertTrue(mergedReader.numDocs() == 2); - org.apache.lucene.document.Document newDoc1 = mergedReader.document(0); + Document newDoc1 = mergedReader.document2(0); assertTrue(newDoc1 != null); //There are 2 unstored fields on the document - assertTrue(DocHelper.numFields2(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size()); - org.apache.lucene.document.Document newDoc2 = mergedReader.document(1); + assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size()); + Document newDoc2 = mergedReader.document2(1); assertTrue(newDoc2 != null); - assertTrue(DocHelper.numFields2(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); + assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); DocsEnum termDocs = MultiFields.getTermDocsEnum(mergedReader, MultiFields.getDeletedDocs(mergedReader), Index: lucene/src/test/org/apache/lucene/index/TestSegmentReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestSegmentReader.java (revision 1148487) +++ lucene/src/test/org/apache/lucene/index/TestSegmentReader.java (working copy) @@ -25,7 +25,6 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.document2.Document; import org.apache.lucene.store.Directory; @@ -61,13 +60,13 @@ public void testDocument() throws IOException { assertTrue(reader.numDocs() == 1); assertTrue(reader.maxDoc() >= 1); - org.apache.lucene.document.Document result = reader.document(0); + Document result = reader.document2(0); assertTrue(result != null); //There are 2 unstored fields on the document that are not preserved across writing - assertTrue(DocHelper.numFields2(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size()); + assertTrue(DocHelper.numFields(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size()); - List fields = result.getFields(); - for (final Fieldable field : fields ) { + List fields = result.getFields(); + for (final IndexableField field : fields ) { assertTrue(field != null); assertTrue(DocHelper.nameValues.containsKey(field.name())); } Index: lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java (revision 1148487) +++ lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java (working copy) @@ -30,7 +30,7 @@ import junit.framework.Assert; import org.apache.lucene.analysis.MockAnalyzer; -import org.apache.lucene.document.Fieldable; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.document2.Document; import org.apache.lucene.document2.Field; import org.apache.lucene.document2.FieldType; @@ -133,8 +133,8 @@ static Term idTerm = new Term("id",""); IndexingThread[] threads; - static Comparator fieldNameComparator = new Comparator() { - public int compare(Fieldable o1, Fieldable o2) { + static Comparator fieldNameComparator = new Comparator() { + public int compare(IndexableField o1, IndexableField o2) { return o1.name().compareTo(o2.name()); } }; @@ -294,7 +294,7 @@ Bits delDocs = sub.getDeletedDocs(); System.out.println(" " + ((SegmentReader) sub).getSegmentInfo()); for(int docID=0;docID0 && (searcher.getIndexReader().getSequentialSubReaders() == null || @@ -211,9 +211,9 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); - org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); + Document doc=searcher.doc2(sd[0].doc); assertEquals("First doc", startOffset, Integer.parseInt(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, Integer.parseInt(doc.get(field)) ); q=NumericRangeQuery.newIntRange(field, precisionStep, null, upper, false, true); @@ -221,9 +221,9 @@ sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); - doc=searcher.doc(sd[0].doc); + doc=searcher.doc2(sd[0].doc); assertEquals("First doc", startOffset, Integer.parseInt(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, Integer.parseInt(doc.get(field)) ); } @@ -252,9 +252,9 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", noDocs-count, sd.length ); - org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); + Document doc=searcher.doc2(sd[0].doc); assertEquals("First doc", count*distance+startOffset, Integer.parseInt(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc", (noDocs-1)*distance+startOffset, Integer.parseInt(doc.get(field)) ); q=NumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, false); @@ -262,9 +262,9 @@ sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", noDocs-count, sd.length ); - doc=searcher.doc(sd[0].doc); + doc=searcher.doc2(sd[0].doc); assertEquals("First doc", count*distance+startOffset, Integer.parseInt(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc", (noDocs-1)*distance+startOffset, Integer.parseInt(doc.get(field)) ); } Index: lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (revision 1148487) +++ lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (working copy) @@ -150,9 +150,9 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count"+type, count, sd.length ); - org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); + Document doc=searcher.doc2(sd[0].doc); assertEquals("First doc"+type, 2*distance+startOffset, Long.parseLong(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc"+type, (1+count)*distance+startOffset, Long.parseLong(doc.get(field)) ); if (i>0 && (searcher.getIndexReader().getSequentialSubReaders() == null || @@ -217,9 +217,9 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); - org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); + Document doc=searcher.doc2(sd[0].doc); assertEquals("First doc", startOffset, Long.parseLong(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, Long.parseLong(doc.get(field)) ); q=NumericRangeQuery.newLongRange(field, precisionStep, null, upper, false, true); @@ -227,9 +227,9 @@ sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); - doc=searcher.doc(sd[0].doc); + doc=searcher.doc2(sd[0].doc); assertEquals("First doc", startOffset, Long.parseLong(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, Long.parseLong(doc.get(field)) ); } @@ -263,9 +263,9 @@ ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", noDocs-count, sd.length ); - org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc); + Document doc=searcher.doc2(sd[0].doc); assertEquals("First doc", count*distance+startOffset, Long.parseLong(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc", (noDocs-1)*distance+startOffset, Long.parseLong(doc.get(field)) ); q=NumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, false); @@ -273,9 +273,9 @@ sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", noDocs-count, sd.length ); - doc=searcher.doc(sd[0].doc); + doc=searcher.doc2(sd[0].doc); assertEquals("First doc", count*distance+startOffset, Long.parseLong(doc.get(field)) ); - doc=searcher.doc(sd[sd.length-1].doc); + doc=searcher.doc2(sd[sd.length-1].doc); assertEquals("Last doc", (noDocs-1)*distance+startOffset, Long.parseLong(doc.get(field)) ); } Index: lucene/src/test/org/apache/lucene/search/TestSort.java =================================================================== --- lucene/src/test/org/apache/lucene/search/TestSort.java (revision 1148487) +++ lucene/src/test/org/apache/lucene/search/TestSort.java (working copy) @@ -35,6 +35,7 @@ import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; @@ -350,18 +351,18 @@ int lastDocId = 0; boolean fail = false; for (int x = 0; x < n; ++x) { - org.apache.lucene.document.Document doc2 = searcher.doc(result[x].doc); - String[] v = doc2.getValues("tracer"); - String[] v2 = doc2.getValues("tracer2"); + Document doc2 = searcher.doc2(result[x].doc); + IndexableField[] v = doc2.getFields("tracer"); + IndexableField[] v2 = doc2.getFields("tracer2"); for (int j = 0; j < v.length; ++j) { if (last != null) { - int cmp = v[j].compareTo(last); + int cmp = v[j].stringValue().compareTo(last); if (!(cmp >= 0)) { // ensure first field is in order fail = true; System.out.println("fail:" + v[j] + " < " + last); } if (cmp == 0) { // ensure second field is in reverse order - cmp = v2[j].compareTo(lastSub); + cmp = v2[j].stringValue().compareTo(lastSub); if (cmp > 0) { fail = true; System.out.println("rev field fail:" + v2[j] + " > " + lastSub); @@ -373,8 +374,8 @@ } } } - last = v[j]; - lastSub = v2[j]; + last = v[j].stringValue(); + lastSub = v2[j].stringValue(); lastDocId = result[x].doc; buff.append(v[j] + "(" + v2[j] + ")(" + result[x].doc+") "); } @@ -956,10 +957,10 @@ StringBuilder buff = new StringBuilder(10); int n = result.length; for (int i=0; i(); // Initialize the map with the default fields. - fields.put(BODY_FIELD, new Field(BODY_FIELD, "", bodyStore, bodyIndex, termVector)); - fields.put(TITLE_FIELD, new Field(TITLE_FIELD, "", store, index, termVector)); - fields.put(DATE_FIELD, new Field(DATE_FIELD, "", store, index, termVector)); - fields.put(ID_FIELD, new Field(ID_FIELD, "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); - fields.put(NAME_FIELD, new Field(NAME_FIELD, "", store, index, termVector)); + fields.put(BODY_FIELD, new Field(BODY_FIELD, bodyFt, "")); + fields.put(TITLE_FIELD, new Field(TITLE_FIELD, ft, "")); + fields.put(DATE_FIELD, new Field(DATE_FIELD, ft, "")); + fields.put(ID_FIELD, new Field(ID_FIELD, StringField.TYPE_STORED, "")); + fields.put(NAME_FIELD, new Field(NAME_FIELD, ft, "")); numericFields.put(DATE_MSEC_FIELD, new NumericField(DATE_MSEC_FIELD)); numericFields.put(TIME_SEC_FIELD, new NumericField(TIME_SEC_FIELD)); @@ -125,14 +125,14 @@ * reuseFields was set to true, then it attempts to reuse a * Field instance. If such a field does not exist, it creates a new one. */ - Field getField(String name, Store store, Index index, TermVector termVector) { + Field getField(String name, FieldType ft) { if (!reuseFields) { - return new Field(name, "", store, index, termVector); + return new Field(name, ft, ""); } Field f = fields.get(name); if (f == null) { - f = new Field(name, "", store, index, termVector); + f = new Field(name, ft, ""); fields.put(name, f); } return f; @@ -179,12 +179,9 @@ protected Config config; - protected Store storeVal = Store.NO; - protected Store bodyStoreVal = Store.NO; - protected Index indexVal = Index.ANALYZED_NO_NORMS; - protected Index bodyIndexVal = Index.ANALYZED; - protected TermVector termVecVal = TermVector.NO; - + protected FieldType valType; + protected FieldType bodyValType; + protected ContentSource source; protected boolean reuseFields; protected boolean indexProperties; @@ -196,6 +193,13 @@ private int printNum = 0; + public DocMaker() { + valType = new FieldType(TextField.TYPE_UNSTORED); + valType.setOmitNorms(true); + + bodyValType = new FieldType(TextField.TYPE_UNSTORED); + } + // create a doc // use only part of the body, modify it to keep the rest (or use all if size==0). // reset the docdata properties so they are not added more than once. @@ -206,7 +210,10 @@ doc.getFields().clear(); // Set ID_FIELD - Field idField = ds.getField(ID_FIELD, storeVal, Index.NOT_ANALYZED_NO_NORMS, termVecVal); + FieldType ft = new FieldType(valType); + ft.setIndexed(false); + + Field idField = ds.getField(ID_FIELD, ft); int id; if (r != null) { id = r.nextInt(updateDocIDLimit); @@ -223,7 +230,7 @@ String name = docData.getName(); if (name == null) name = ""; name = cnt < 0 ? name : name + "_" + cnt; - Field nameField = ds.getField(NAME_FIELD, storeVal, indexVal, termVecVal); + Field nameField = ds.getField(NAME_FIELD, valType); nameField.setValue(name); doc.add(nameField); @@ -242,7 +249,7 @@ } else { dateString = ""; } - Field dateStringField = ds.getField(DATE_FIELD, storeVal, indexVal, termVecVal); + Field dateStringField = ds.getField(DATE_FIELD, valType); dateStringField.setValue(dateString); doc.add(dateStringField); @@ -264,7 +271,7 @@ // Set TITLE_FIELD String title = docData.getTitle(); - Field titleField = ds.getField(TITLE_FIELD, storeVal, indexVal, termVecVal); + Field titleField = ds.getField(TITLE_FIELD, valType); titleField.setValue(title == null ? "" : title); doc.add(titleField); @@ -285,12 +292,12 @@ bdy = body.substring(0, size); // use part docData.setBody(body.substring(size)); // some left } - Field bodyField = ds.getField(BODY_FIELD, bodyStoreVal, bodyIndexVal, termVecVal); + Field bodyField = ds.getField(BODY_FIELD, bodyValType); bodyField.setValue(bdy); doc.add(bodyField); if (storeBytes) { - Field bytesField = ds.getField(BYTES_FIELD, Store.YES, Index.NOT_ANALYZED_NO_NORMS, TermVector.NO); + Field bytesField = ds.getField(BYTES_FIELD, StringField.TYPE_STORED); bytesField.setValue(bdy.getBytes("UTF-8")); doc.add(bytesField); } @@ -300,7 +307,7 @@ Properties props = docData.getProps(); if (props != null) { for (final Map.Entry entry : props.entrySet()) { - Field f = ds.getField((String) entry.getKey(), storeVal, indexVal, termVecVal); + Field f = ds.getField((String) entry.getKey(), valType); f.setValue((String) entry.getValue()); doc.add(f); } @@ -319,7 +326,7 @@ protected DocState getDocState() { DocState ds = docState.get(); if (ds == null) { - ds = new DocState(reuseFields, storeVal, bodyStoreVal, indexVal, bodyIndexVal, termVecVal); + ds = new DocState(reuseFields, valType, bodyValType); docState.set(ds); } return ds; @@ -455,33 +462,23 @@ boolean norms = config.get("doc.tokenized.norms", false); boolean bodyNorms = config.get("doc.body.tokenized.norms", true); boolean termVec = config.get("doc.term.vector", false); - storeVal = (stored ? Field.Store.YES : Field.Store.NO); - bodyStoreVal = (bodyStored ? Field.Store.YES : Field.Store.NO); - if (tokenized) { - indexVal = norms ? Index.ANALYZED : Index.ANALYZED_NO_NORMS; - } else { - indexVal = norms ? Index.NOT_ANALYZED : Index.NOT_ANALYZED_NO_NORMS; - } - - if (bodyTokenized) { - bodyIndexVal = bodyNorms ? Index.ANALYZED : Index.ANALYZED_NO_NORMS; - } else { - bodyIndexVal = bodyNorms ? Index.NOT_ANALYZED : Index.NOT_ANALYZED_NO_NORMS; - } - boolean termVecPositions = config.get("doc.term.vector.positions", false); boolean termVecOffsets = config.get("doc.term.vector.offsets", false); - if (termVecPositions && termVecOffsets) { - termVecVal = TermVector.WITH_POSITIONS_OFFSETS; - } else if (termVecPositions) { - termVecVal = TermVector.WITH_POSITIONS; - } else if (termVecOffsets) { - termVecVal = TermVector.WITH_OFFSETS; - } else if (termVec) { - termVecVal = TermVector.YES; - } else { - termVecVal = TermVector.NO; - } + + valType.setStored(stored); + bodyValType.setStored(bodyStored); + valType.setTokenized(tokenized); + valType.setOmitNorms(!norms); + bodyValType.setTokenized(bodyTokenized); + bodyValType.setOmitNorms(!bodyNorms); + + valType.setStoreTermVectors(termVec); + valType.setStoreTermVectorPositions(termVecPositions); + valType.setStoreTermVectorOffsets(termVecOffsets); + bodyValType.setStoreTermVectors(termVec); + bodyValType.setStoreTermVectorPositions(termVecPositions); + bodyValType.setStoreTermVectorOffsets(termVecOffsets); + storeBytes = config.get("doc.store.body.bytes", false); reuseFields = config.get("doc.reuse.fields", true); Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddDocTask.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddDocTask.java (revision 1148487) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/AddDocTask.java (working copy) @@ -19,7 +19,7 @@ import org.apache.lucene.benchmark.byTask.PerfRunData; import org.apache.lucene.benchmark.byTask.feeds.DocMaker; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; /** * Add a document, optionally with of a certain size. Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/BenchmarkHighlighter.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/BenchmarkHighlighter.java (revision 1148487) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/BenchmarkHighlighter.java (working copy) @@ -18,7 +18,7 @@ */ import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; public abstract class BenchmarkHighlighter { Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java (revision 1148487) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ReadTask.java (working copy) @@ -27,9 +27,9 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.benchmark.byTask.PerfRunData; import org.apache.lucene.benchmark.byTask.feeds.QueryMaker; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Fieldable; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.MultiFields; import org.apache.lucene.search.Collector; import org.apache.lucene.search.TopDocs; @@ -99,7 +99,7 @@ Bits delDocs = MultiFields.getDeletedDocs(reader); for (int m = 0; m < reader.maxDoc(); m++) { if (null == delDocs || ! delDocs.get(m)) { - doc = reader.document(m); + doc = reader.document2(m); res += (doc == null ? 0 : 1); } } @@ -144,7 +144,7 @@ System.out.println("numDocs() = " + reader.numDocs()); for(int i=0;i0 && sufficientFields[i]; Index: modules/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractWikipedia.java =================================================================== --- modules/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractWikipedia.java (revision 1148487) +++ modules/benchmark/src/java/org/apache/lucene/benchmark/utils/ExtractWikipedia.java (working copy) @@ -25,7 +25,7 @@ import org.apache.lucene.benchmark.byTask.feeds.DocMaker; import org.apache.lucene.benchmark.byTask.feeds.NoMoreDataException; import org.apache.lucene.benchmark.byTask.utils.Config; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; /** * Extract the downloaded Wikipedia dump into separate files for indexing. Index: modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java =================================================================== --- modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java (revision 1148487) +++ modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/feeds/DocMakerTest.java (working copy) @@ -29,7 +29,7 @@ import org.apache.lucene.benchmark.byTask.tasks.CreateIndexTask; import org.apache.lucene.benchmark.byTask.tasks.TaskSequence; import org.apache.lucene.benchmark.byTask.utils.Config; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; @@ -137,28 +137,28 @@ // Don't set anything, use the defaults doc = createTestNormsDocument(false, false, false, false); - assertTrue(doc.getField(DocMaker.TITLE_FIELD).getOmitNorms()); - assertFalse(doc.getField(DocMaker.BODY_FIELD).getOmitNorms()); + assertTrue(doc.getField(DocMaker.TITLE_FIELD).omitNorms()); + assertFalse(doc.getField(DocMaker.BODY_FIELD).omitNorms()); // Set norms to false doc = createTestNormsDocument(true, false, false, false); - assertTrue(doc.getField(DocMaker.TITLE_FIELD).getOmitNorms()); - assertFalse(doc.getField(DocMaker.BODY_FIELD).getOmitNorms()); + assertTrue(doc.getField(DocMaker.TITLE_FIELD).omitNorms()); + assertFalse(doc.getField(DocMaker.BODY_FIELD).omitNorms()); // Set norms to true doc = createTestNormsDocument(true, true, false, false); - assertFalse(doc.getField(DocMaker.TITLE_FIELD).getOmitNorms()); - assertFalse(doc.getField(DocMaker.BODY_FIELD).getOmitNorms()); + assertFalse(doc.getField(DocMaker.TITLE_FIELD).omitNorms()); + assertFalse(doc.getField(DocMaker.BODY_FIELD).omitNorms()); // Set body norms to false doc = createTestNormsDocument(false, false, true, false); - assertTrue(doc.getField(DocMaker.TITLE_FIELD).getOmitNorms()); - assertTrue(doc.getField(DocMaker.BODY_FIELD).getOmitNorms()); + assertTrue(doc.getField(DocMaker.TITLE_FIELD).omitNorms()); + assertTrue(doc.getField(DocMaker.BODY_FIELD).omitNorms()); // Set body norms to true doc = createTestNormsDocument(false, false, true, true); - assertTrue(doc.getField(DocMaker.TITLE_FIELD).getOmitNorms()); - assertFalse(doc.getField(DocMaker.BODY_FIELD).getOmitNorms()); + assertTrue(doc.getField(DocMaker.TITLE_FIELD).omitNorms()); + assertFalse(doc.getField(DocMaker.BODY_FIELD).omitNorms()); } } Index: modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/CountingHighlighterTestTask.java =================================================================== --- modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/CountingHighlighterTestTask.java (revision 1148487) +++ modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/CountingHighlighterTestTask.java (working copy) @@ -26,7 +26,7 @@ import org.apache.lucene.search.highlight.QueryScorer; import org.apache.lucene.search.highlight.TokenSources; import org.apache.lucene.search.Query; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.index.IndexReader; import java.io.IOException; @@ -45,7 +45,7 @@ @Override protected Document retrieveDoc(IndexReader ir, int id) throws IOException { - Document document = ir.document(id); + Document document = ir.document2(id); if (document != null) { numDocsRetrieved++; } Index: modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java =================================================================== --- modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java (revision 1148487) +++ modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTaskTest.java (working copy) @@ -32,10 +32,9 @@ import org.apache.lucene.benchmark.byTask.feeds.DocMaker; import org.apache.lucene.benchmark.byTask.utils.Config; import org.apache.lucene.benchmark.byTask.utils.StreamUtils.Type; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.Field; +import org.apache.lucene.document2.StringField; /** Tests the functionality of {@link WriteLineDocTask}. */ public class WriteLineDocTaskTest extends BenchmarkTestCase { @@ -46,9 +45,9 @@ @Override public Document makeDocument() throws Exception { Document doc = new Document(); - doc.add(new Field(BODY_FIELD, "body", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); - doc.add(new Field(TITLE_FIELD, "title", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); - doc.add(new Field(DATE_FIELD, "date", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); + doc.add(new StringField(BODY_FIELD, "body")); + doc.add(new StringField(TITLE_FIELD, "title")); + doc.add(new StringField(DATE_FIELD, "date")); return doc; } @@ -60,9 +59,9 @@ @Override public Document makeDocument() throws Exception { Document doc = new Document(); - doc.add(new Field(BODY_FIELD, "body\r\ntext\ttwo", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); - doc.add(new Field(TITLE_FIELD, "title\r\ntext", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); - doc.add(new Field(DATE_FIELD, "date\r\ntext", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); + doc.add(new StringField(BODY_FIELD, "body\r\ntext\ttwo")); + doc.add(new StringField(TITLE_FIELD, "title\r\ntext")); + doc.add(new StringField(DATE_FIELD, "date\r\ntext")); return doc; } @@ -73,8 +72,8 @@ @Override public Document makeDocument() throws Exception { Document doc = new Document(); - doc.add(new Field(TITLE_FIELD, "title", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); - doc.add(new Field(DATE_FIELD, "date", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); + doc.add(new StringField(TITLE_FIELD, "title")); + doc.add(new StringField(DATE_FIELD, "date")); return doc; } } @@ -84,8 +83,8 @@ @Override public Document makeDocument() throws Exception { Document doc = new Document(); - doc.add(new Field(BODY_FIELD, "body", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); - doc.add(new Field(DATE_FIELD, "date", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); + doc.add(new StringField(BODY_FIELD, "body")); + doc.add(new StringField(DATE_FIELD, "date")); return doc; } } @@ -95,7 +94,7 @@ @Override public Document makeDocument() throws Exception { Document doc = new Document(); - doc.add(new Field(DATE_FIELD, "date", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); + doc.add(new StringField(DATE_FIELD, "date")); return doc; } } @@ -106,7 +105,7 @@ @Override public Document makeDocument() throws Exception { Document doc = new Document(); - doc.add(new Field(DATE_FIELD, "date", Store.NO, Index.NOT_ANALYZED_NO_NORMS)); + doc.add(new StringField(DATE_FIELD, "date")); return doc; } } @@ -126,9 +125,9 @@ public Document makeDocument() throws Exception { Document doc = new Document(); String name = Thread.currentThread().getName(); - doc.add(new Field(BODY_FIELD, "body_" + name, Store.NO, Index.NOT_ANALYZED_NO_NORMS)); - doc.add(new Field(TITLE_FIELD, "title_" + name, Store.NO, Index.NOT_ANALYZED_NO_NORMS)); - doc.add(new Field(DATE_FIELD, "date_" + name, Store.NO, Index.NOT_ANALYZED_NO_NORMS)); + doc.add(new StringField(BODY_FIELD, "body_" + name)); + doc.add(new StringField(TITLE_FIELD, "title_" + name)); + doc.add(new StringField(DATE_FIELD, "date_" + name)); return doc; } Index: solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java =================================================================== --- solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java (revision 1148487) +++ solr/contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java (working copy) @@ -28,6 +28,7 @@ import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; import org.apache.lucene.collation.ICUCollationKeyAnalyzer; import org.apache.lucene.document.Fieldable; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermRangeQuery; @@ -164,7 +165,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { writer.writeStr(name, f.stringValue(), true); } Index: solr/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java =================================================================== --- solr/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java (revision 1148487) +++ solr/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java (working copy) @@ -32,7 +32,8 @@ import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Query; import org.apache.lucene.search.highlight.*; import org.apache.lucene.search.vectorhighlight.FastVectorHighlighter; @@ -415,7 +416,20 @@ // END: Hack SolrParams params = req.getParams(); - String[] docTexts = doc.getValues(fieldName); + IndexableField[] docFields = doc.getFields(fieldName); + List listFields = new ArrayList(); + for (IndexableField field : docFields) { + listFields.add(field.stringValue()); + } + + String[] docTexts; + if (listFields.size() == 0) { + docTexts = new String[0]; + } + else { + docTexts = (String[]) listFields.toArray(new String[listFields.size()]); + } + // according to Document javadoc, doc.getValues() never returns null. check empty instead of null if (docTexts.length == 0) return; @@ -538,7 +552,20 @@ private void alternateField( NamedList docSummaries, SolrParams params, Document doc, String fieldName ){ String alternateField = params.getFieldParam(fieldName, HighlightParams.ALTERNATE_FIELD); if (alternateField != null && alternateField.length() > 0) { - String[] altTexts = doc.getValues(alternateField); + IndexableField[] docFields = doc.getFields(alternateField); + List listFields = new ArrayList(); + for (IndexableField field : docFields) { + if (field.binaryValue(null) == null) + listFields.add(field.stringValue()); + } + + String[] altTexts; + if (listFields.size() == 0) { + altTexts = new String[0]; + } + else { + altTexts = listFields.toArray(new String[listFields.size()]); + } if (altTexts != null && altTexts.length > 0){ int alternateFieldLen = params.getFieldInt(fieldName, HighlightParams.ALTERNATE_FIELD_LENGTH,0); if( alternateFieldLen <= 0 ){ Index: solr/src/java/org/apache/solr/response/BinaryResponseWriter.java =================================================================== --- solr/src/java/org/apache/solr/response/BinaryResponseWriter.java (revision 1148487) +++ solr/src/java/org/apache/solr/response/BinaryResponseWriter.java (working copy) @@ -16,8 +16,8 @@ */ package org.apache.solr.response; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Fieldable; +import org.apache.lucene.document2.Document; +import org.apache.lucene.index.IndexableField; import org.apache.solr.common.SolrDocument; import org.apache.solr.common.SolrDocumentList; import org.apache.solr.common.params.CommonParams; @@ -156,7 +156,7 @@ public SolrDocument getDoc(Document doc) { SolrDocument solrDoc = new SolrDocument(); - for (Fieldable f : doc.getFields()) { + for (IndexableField f : doc) { String fieldName = f.name(); if( !returnFields.wantsField(fieldName) ) continue; @@ -165,7 +165,7 @@ if(sf != null) ft =sf.getType(); Object val; if (ft == null) { // handle fields not in the schema - if (f.isBinary()) val = f.binaryValue(null).bytes; + if (f.binaryValue(null)!=null) val = f.binaryValue(null).bytes; else val = f.stringValue(); } else { try { Index: solr/src/java/org/apache/solr/response/TextResponseWriter.java =================================================================== --- solr/src/java/org/apache/solr/response/TextResponseWriter.java (revision 1148487) +++ solr/src/java/org/apache/solr/response/TextResponseWriter.java (working copy) @@ -21,8 +21,8 @@ import java.io.Writer; import java.util.*; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Fieldable; +import org.apache.lucene.document2.Document; +import org.apache.lucene.index.IndexableField; import org.apache.solr.common.SolrDocument; import org.apache.solr.common.SolrDocumentList; import org.apache.solr.common.util.FastWriter; @@ -120,8 +120,8 @@ } else if (val instanceof String) { writeStr(name, val.toString(), true); // micro-optimization... using toString() avoids a cast first - } else if (val instanceof Fieldable) { - Fieldable f = (Fieldable)val; + } else if (val instanceof IndexableField) { + IndexableField f = (IndexableField)val; SchemaField sf = schema.getFieldOrNull( f.name() ); if( sf != null ) { sf.getType().write(this, name, f); @@ -202,7 +202,7 @@ public final SolrDocument toSolrDocument( Document doc ) { SolrDocument out = new SolrDocument(); - for( Fieldable f : doc.getFields() ) { + for( IndexableField f : doc) { if( "gack_i".equals( f.name() ) ) { System.out.println( f ); } Index: solr/src/java/org/apache/solr/schema/BCDIntField.java =================================================================== --- solr/src/java/org/apache/solr/schema/BCDIntField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/BCDIntField.java (working copy) @@ -77,7 +77,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { writer.writeInt(name,toExternal(f)); } } Index: solr/src/java/org/apache/solr/schema/BinaryField.java =================================================================== --- solr/src/java/org/apache/solr/schema/BinaryField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/BinaryField.java (working copy) @@ -36,7 +36,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { writer.writeStr(name, toBase64String(toObject(f)), false); } Index: solr/src/java/org/apache/solr/schema/BoolField.java =================================================================== --- solr/src/java/org/apache/solr/schema/BoolField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/BoolField.java (working copy) @@ -157,7 +157,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { writer.writeBool(name, f.stringValue().charAt(0) == 'T'); } } Index: solr/src/java/org/apache/solr/schema/ByteField.java =================================================================== --- solr/src/java/org/apache/solr/schema/ByteField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/ByteField.java (working copy) @@ -17,6 +17,7 @@ */ import org.apache.lucene.document.Fieldable; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.SortField; import org.apache.lucene.search.cache.ByteValuesCreator; import org.apache.lucene.search.cache.CachedArrayCreator; @@ -52,7 +53,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { String s = f.stringValue(); // these values may be from a legacy lucene index, which may Index: solr/src/java/org/apache/solr/schema/CollationField.java =================================================================== --- solr/src/java/org/apache/solr/schema/CollationField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/CollationField.java (working copy) @@ -186,7 +186,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { writer.writeStr(name, f.stringValue(), true); } Index: solr/src/java/org/apache/solr/schema/DateField.java =================================================================== --- solr/src/java/org/apache/solr/schema/DateField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/DateField.java (working copy) @@ -245,7 +245,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { writer.writeDate(name, toExternal(f)); } Index: solr/src/java/org/apache/solr/schema/DoubleField.java =================================================================== --- solr/src/java/org/apache/solr/schema/DoubleField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/DoubleField.java (working copy) @@ -53,7 +53,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { String s = f.stringValue(); // these values may be from a legacy lucene index, which may Index: solr/src/java/org/apache/solr/schema/ExternalFileField.java =================================================================== --- solr/src/java/org/apache/solr/schema/ExternalFileField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/ExternalFileField.java (working copy) @@ -77,7 +77,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { throw new UnsupportedOperationException(); } Index: solr/src/java/org/apache/solr/schema/FieldType.java =================================================================== --- solr/src/java/org/apache/solr/schema/FieldType.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/FieldType.java (working copy) @@ -600,7 +600,7 @@ /** * calls back to TextResponseWriter to write the field value */ - public abstract void write(TextResponseWriter writer, String name, Fieldable f) throws IOException; + public abstract void write(TextResponseWriter writer, String name, IndexableField f) throws IOException; /** Index: solr/src/java/org/apache/solr/schema/FloatField.java =================================================================== --- solr/src/java/org/apache/solr/schema/FloatField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/FloatField.java (working copy) @@ -51,7 +51,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { String s = f.stringValue(); // these values may be from a legacy lucene index, which may Index: solr/src/java/org/apache/solr/schema/GeoHashField.java =================================================================== --- solr/src/java/org/apache/solr/schema/GeoHashField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/GeoHashField.java (working copy) @@ -69,7 +69,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { writer.writeStr(name, toExternal(f), false); } Index: solr/src/java/org/apache/solr/schema/IntField.java =================================================================== --- solr/src/java/org/apache/solr/schema/IntField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/IntField.java (working copy) @@ -51,7 +51,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { String s = f.stringValue(); // these values may be from a legacy lucene index, which may Index: solr/src/java/org/apache/solr/schema/LatLonType.java =================================================================== --- solr/src/java/org/apache/solr/schema/LatLonType.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/LatLonType.java (working copy) @@ -269,7 +269,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { writer.writeStr(name, f.stringValue(), false); } Index: solr/src/java/org/apache/solr/schema/LongField.java =================================================================== --- solr/src/java/org/apache/solr/schema/LongField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/LongField.java (working copy) @@ -53,7 +53,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { String s = f.stringValue(); // these values may be from a legacy lucene index, which may Index: solr/src/java/org/apache/solr/schema/PointType.java =================================================================== --- solr/src/java/org/apache/solr/schema/PointType.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/PointType.java (working copy) @@ -119,7 +119,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { writer.writeStr(name, f.stringValue(), false); } Index: solr/src/java/org/apache/solr/schema/RandomSortField.java =================================================================== --- solr/src/java/org/apache/solr/schema/RandomSortField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/RandomSortField.java (working copy) @@ -98,7 +98,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { } + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { } private static FieldComparatorSource randomComparatorSource = new FieldComparatorSource() { Index: solr/src/java/org/apache/solr/schema/SchemaField.java =================================================================== --- solr/src/java/org/apache/solr/schema/SchemaField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/SchemaField.java (working copy) @@ -119,7 +119,7 @@ + "}"; } - public void write(TextResponseWriter writer, String name, Fieldable val) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField val) throws IOException { // name is passed in because it may be null if name should not be used. type.write(writer,name,val); } Index: solr/src/java/org/apache/solr/schema/ShortField.java =================================================================== --- solr/src/java/org/apache/solr/schema/ShortField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/ShortField.java (working copy) @@ -56,7 +56,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { String s = f.stringValue(); // these values may be from a legacy lucene index, which may Index: solr/src/java/org/apache/solr/schema/SortableDoubleField.java =================================================================== --- solr/src/java/org/apache/solr/schema/SortableDoubleField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/SortableDoubleField.java (working copy) @@ -91,7 +91,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { String sval = f.stringValue(); writer.writeDouble(name, NumberUtils.SortableStr2double(sval)); } Index: solr/src/java/org/apache/solr/schema/SortableFloatField.java =================================================================== --- solr/src/java/org/apache/solr/schema/SortableFloatField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/SortableFloatField.java (working copy) @@ -93,7 +93,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { String sval = f.stringValue(); writer.writeFloat(name, NumberUtils.SortableStr2float(sval)); } Index: solr/src/java/org/apache/solr/schema/SortableIntField.java =================================================================== --- solr/src/java/org/apache/solr/schema/SortableIntField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/SortableIntField.java (working copy) @@ -96,7 +96,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { String sval = f.stringValue(); writer.writeInt(name, NumberUtils.SortableStr2int(sval,0,sval.length())); } Index: solr/src/java/org/apache/solr/schema/SortableLongField.java =================================================================== --- solr/src/java/org/apache/solr/schema/SortableLongField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/SortableLongField.java (working copy) @@ -94,7 +94,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { String sval = f.stringValue(); writer.writeLong(name, NumberUtils.SortableStr2long(sval,0,sval.length())); } Index: solr/src/java/org/apache/solr/schema/StrField.java =================================================================== --- solr/src/java/org/apache/solr/schema/StrField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/StrField.java (working copy) @@ -42,7 +42,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { writer.writeStr(name, f.stringValue(), true); } Index: solr/src/java/org/apache/solr/schema/TextField.java =================================================================== --- solr/src/java/org/apache/solr/schema/TextField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/TextField.java (working copy) @@ -24,6 +24,7 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.MultiPhraseQuery; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; @@ -74,7 +75,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { writer.writeStr(name, f.stringValue(), true); } Index: solr/src/java/org/apache/solr/schema/TrieDateField.java =================================================================== --- solr/src/java/org/apache/solr/schema/TrieDateField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/TrieDateField.java (working copy) @@ -78,7 +78,7 @@ @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { wrappedField.write(writer, name, f); } Index: solr/src/java/org/apache/solr/schema/TrieField.java =================================================================== --- solr/src/java/org/apache/solr/schema/TrieField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/TrieField.java (working copy) @@ -232,7 +232,7 @@ @Override - public void write(TextResponseWriter writer, String name, Fieldable f) throws IOException { + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { writer.writeVal(name, toObject(f)); } Index: solr/src/java/org/apache/solr/schema/UUIDField.java =================================================================== --- solr/src/java/org/apache/solr/schema/UUIDField.java (revision 1148487) +++ solr/src/java/org/apache/solr/schema/UUIDField.java (working copy) @@ -54,7 +54,7 @@ } @Override - public void write(TextResponseWriter writer, String name, Fieldable f) + public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { writer.writeStr(name, f.stringValue(), false); } Index: solr/src/java/org/apache/solr/search/SolrIndexSearcher.java =================================================================== --- solr/src/java/org/apache/solr/search/SolrIndexSearcher.java (revision 1148487) +++ solr/src/java/org/apache/solr/search/SolrIndexSearcher.java (working copy) @@ -17,10 +17,10 @@ package org.apache.solr.search; -import org.apache.lucene.document.Document; -import org.apache.lucene.document.FieldSelector; -import org.apache.lucene.document.FieldSelectorResult; -import org.apache.lucene.document.FieldSelectorVisitor; +import org.apache.lucene.document2.Document; +import org.apache.lucene.document2.FieldSelector; +import org.apache.lucene.document2.FieldSelectorResult; +import org.apache.lucene.document2.FieldSelectorVisitor; import org.apache.lucene.index.*; import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.search.*; @@ -403,7 +403,7 @@ * Retrieve the {@link Document} instance corresponding to the document id. */ @Override - public Document doc(int i) throws IOException { + public Document doc2(int i) throws IOException { return doc(i, (Set)null); } @@ -432,7 +432,7 @@ } if(!enableLazyFieldLoading || fields == null) { - d = getIndexReader().document(i); + d = getIndexReader().document2(i); } else { final FieldSelectorVisitor visitor = new FieldSelectorVisitor(new SetNonLazyFieldSelector(fields)); getIndexReader().document(i, visitor); Index: solr/src/java/org/apache/solr/update/DocumentBuilder.java =================================================================== --- solr/src/java/org/apache/solr/update/DocumentBuilder.java (revision 1148487) +++ solr/src/java/org/apache/solr/update/DocumentBuilder.java (working copy) @@ -338,10 +338,10 @@ * * @since solr 1.3 */ - public SolrDocument loadStoredFields( SolrDocument doc, org.apache.lucene.document.Document luceneDoc ) + public SolrDocument loadStoredFields( SolrDocument doc, Document luceneDoc ) { - for( Fieldable field : luceneDoc.getFields() ) { - if( field.isStored() ) { + for( IndexableField field : luceneDoc) { + if( field.stored() ) { SchemaField sf = schema.getField( field.name() ); if( !schema.isCopyFieldTarget( sf ) ) { doc.addField( field.name(), sf.getType().toObject( field ) ); Index: solr/src/java/org/apache/solr/util/SolrPluginUtils.java =================================================================== --- solr/src/java/org/apache/solr/util/SolrPluginUtils.java (revision 1148487) +++ solr/src/java/org/apache/solr/util/SolrPluginUtils.java (working copy) @@ -17,7 +17,7 @@ package org.apache.solr.util; -import org.apache.lucene.document.Document; +import org.apache.lucene.document2.Document; import org.apache.lucene.queryParser.ParseException; import org.apache.lucene.queryParser.QueryParser; import org.apache.lucene.search.*; @@ -335,7 +335,7 @@ for (int i=0; i