Index: lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java
===================================================================
--- lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java (revision 1148487)
+++ lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/TokenSources.java (working copy)
@@ -81,6 +81,23 @@
return ts;
}
+ public static TokenStream getAnyTokenStream(IndexReader reader, int docId,
+ String field, org.apache.lucene.document2.Document doc, Analyzer analyzer) throws IOException {
+ TokenStream ts = null;
+
+ TermFreqVector tfv = reader.getTermFreqVector(docId, field);
+ if (tfv != null) {
+ if (tfv instanceof TermPositionVector) {
+ ts = getTokenStream((TermPositionVector) tfv);
+ }
+ }
+ // No token info stored so fall back to analyzing raw content
+ if (ts == null) {
+ ts = getTokenStream(doc, field, analyzer);
+ }
+ return ts;
+ }
+
/**
* A convenience method that tries a number of approaches to getting a token
* stream. The cost of finding there are no termVectors in the index is
@@ -283,6 +300,16 @@
return getTokenStream(field, contents, analyzer);
}
+ public static TokenStream getTokenStream(org.apache.lucene.document2.Document doc, String field,
+ Analyzer analyzer) {
+ String contents = doc.get(field);
+ if (contents == null) {
+ throw new IllegalArgumentException("Field " + field
+ + " in document is not stored and cannot be analyzed");
+ }
+ return getTokenStream(field, contents, analyzer);
+ }
+
// convenience method
public static TokenStream getTokenStream(String field, String contents,
Analyzer analyzer) {
Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
===================================================================
--- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (revision 1148487)
+++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (working copy)
@@ -112,7 +112,7 @@
for (int i = 0; i < hits.scoreDocs.length; i++) {
- org.apache.lucene.document.Document doc = searcher.doc(hits.scoreDocs[i].doc);
+ Document doc = searcher.doc2(hits.scoreDocs[i].doc);
String storedField = doc.get(FIELD_NAME);
TokenStream stream = TokenSources.getAnyTokenStream(searcher
@@ -1568,7 +1568,7 @@
TopDocs hits = searcher.search(query, null, 10);
for( int i = 0; i < hits.totalHits; i++ ){
- org.apache.lucene.document.Document doc = searcher.doc( hits.scoreDocs[i].doc );
+ Document doc = searcher.doc2( hits.scoreDocs[i].doc );
String result = h.getBestFragment( a, "t_text1", doc.get( "t_text1" ));
if (VERBOSE) System.out.println("result:" + result);
assertEquals("more random words for second field", result);
Index: lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java
===================================================================
--- lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (revision 1148487)
+++ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (working copy)
@@ -275,12 +275,17 @@
public Document document(int n) throws IOException {
return getIndex().getDocumentsByNumber()[n].getDocument();
}
+
+ @Override
+ public org.apache.lucene.document2.Document document2(int n) throws IOException {
+ return getIndex().getDocumentsByNumber()[n].getDocument2();
+ }
@Override
public void document(int docID, StoredFieldVisitor visitor) throws IOException {
throw new UnsupportedOperationException();
}
-
+
/**
* never ever touch these values. it is the true values, unless norms have
* been touched.
Index: lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
===================================================================
--- lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (revision 1148487)
+++ lucene/contrib/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java (working copy)
@@ -1229,7 +1229,7 @@
if (DEBUG) System.err.println("MemoryIndexReader.document");
// no-op: there are no stored fields
}
-
+
@Override
public boolean hasDeletions() {
if (DEBUG) System.err.println("MemoryIndexReader.hasDeletions");
Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelector.java
===================================================================
--- lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelector.java (revision 0)
+++ lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelector.java (revision 0)
@@ -0,0 +1,33 @@
+package org.apache.lucene.document2;
+
+/**
+ * Copyright 2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Similar to a {@link java.io.FileFilter}, the FieldSelector allows one to make decisions about
+ * what Fields get loaded on a {@link Document} by {@link FieldSelectorVisitor}
+ *
+ **/
+public interface FieldSelector {
+
+ /**
+ *
+ * @param fieldName the field to accept or reject
+ * @return an instance of {@link FieldSelectorResult}
+ * if the {@link Field} named fieldName should be loaded.
+ */
+ FieldSelectorResult accept(String fieldName);
+}
Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorResult.java
===================================================================
--- lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorResult.java (revision 0)
+++ lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorResult.java (revision 0)
@@ -0,0 +1,76 @@
+package org.apache.lucene.document2;
+
+/**
+ * Copyright 2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Provides information about what should be done with this Field
+ *
+ **/
+public enum FieldSelectorResult {
+
+ /**
+ * Load this {@link Field} every time the {@link Document} is loaded, reading in the data as it is encountered.
+ * {@link Document#getField(String)} and {@link Document#getFieldable(String)} should not return null.
+ *
+ * {@link Document#add(Fieldable)} should be called by the Reader.
+ */
+ LOAD,
+
+ /**
+ * Lazily load this {@link Field}. This means the {@link Field} is valid, but it may not actually contain its data until
+ * invoked. {@link Document#getField(String)} SHOULD NOT BE USED. {@link Document#getFieldable(String)} is safe to use and should
+ * return a valid instance of a {@link Fieldable}.
+ *
+ * {@link Document#add(Fieldable)} should be called by the Reader.
+ */
+ LAZY_LOAD,
+
+ /**
+ * Do not load the {@link Field}. {@link Document#getField(String)} and {@link Document#getFieldable(String)} should return null.
+ * {@link Document#add(Fieldable)} is not called.
+ *
+ * {@link Document#add(Fieldable)} should not be called by the Reader.
+ */
+ NO_LOAD,
+
+ /**
+ * Load this field as in the {@link #LOAD} case, but immediately return from {@link Field} loading for the {@link Document}. Thus, the
+ * Document may not have its complete set of Fields. {@link Document#getField(String)} and {@link Document#getFieldable(String)} should
+ * both be valid for this {@link Field}
+ *
+ * {@link Document#add(Fieldable)} should be called by the Reader.
+ */
+ LOAD_AND_BREAK,
+
+ /** Expert: Load the size of this {@link Field} rather than its value.
+ * Size is measured as number of bytes required to store the field == bytes for a binary or any compressed value, and 2*chars for a String value.
+ * The size is stored as a binary value, represented as an int in a byte[], with the higher order byte first in [0]
+ */
+ SIZE,
+
+ /** Expert: Like {@link #SIZE} but immediately break from the field loading loop, i.e., stop loading further fields, after the size is loaded */
+ SIZE_AND_BREAK,
+
+ /**
+ * Lazily load this {@link Field}, but do not cache the result. This means the {@link Field} is valid, but it may not actually contain its data until
+ * invoked. {@link Document#getField(String)} SHOULD NOT BE USED. {@link Document#getFieldable(String)} is safe to use and should
+ * return a valid instance of a {@link Fieldable}.
+ *
+ * {@link Document#add(Fieldable)} should be called by the Reader.
+ */
+ LATENT
+}
Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorVisitor.java
===================================================================
--- lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorVisitor.java (revision 0)
+++ lucene/contrib/misc/src/java/org/apache/lucene/document2/FieldSelectorVisitor.java (revision 0)
@@ -0,0 +1,329 @@
+package org.apache.lucene.document2;
+
+/**
+ * Copyright 2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.NumericField.DataType;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldReaderException;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.index.StoredFieldVisitor;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.BytesRef;
+
+/** Create this, passing a legacy {@link FieldSelector} to it, then
+ * pass this class to {@link IndexReader#document(int,
+ * StoredFieldVisitor)}, then call {@link #getDocument} to
+ * retrieve the loaded document.
+
+ *
NOTE: If you use Lazy fields, you should not
+ * access the returned document after the reader has been
+ * closed!
+ */
+
+public class FieldSelectorVisitor extends StoredFieldVisitor {
+
+ private final FieldSelector selector;
+ private final Document doc;
+
+ public FieldSelectorVisitor(FieldSelector selector) {
+ this.selector = selector;
+ doc = new Document();
+ }
+
+ public Document getDocument() {
+ return doc;
+ }
+
+ @Override
+ public boolean binaryField(FieldInfo fieldInfo, IndexInput in, int numBytes) throws IOException {
+ final FieldSelectorResult accept = selector.accept(fieldInfo.name);
+ switch (accept) {
+ case LOAD:
+ case LOAD_AND_BREAK:
+ final byte[] b = new byte[numBytes];
+ in.readBytes(b, 0, b.length);
+ doc.add(new BinaryField(fieldInfo.name, b));
+ return accept != FieldSelectorResult.LOAD;
+ case LAZY_LOAD:
+ case LATENT:
+ addFieldLazy(in, fieldInfo, true, accept == FieldSelectorResult.LAZY_LOAD, numBytes);
+ return false;
+ case SIZE:
+ case SIZE_AND_BREAK:
+ in.seek(in.getFilePointer() + numBytes);
+ addFieldSize(fieldInfo, numBytes);
+ return accept != FieldSelectorResult.SIZE;
+ default:
+ // skip
+ in.seek(in.getFilePointer() + numBytes);
+ return false;
+ }
+ }
+
+ @Override
+ public boolean stringField(FieldInfo fieldInfo, IndexInput in, int numUTF8Bytes) throws IOException {
+ final FieldSelectorResult accept = selector.accept(fieldInfo.name);
+ switch (accept) {
+ case LOAD:
+ case LOAD_AND_BREAK:
+ final byte[] b = new byte[numUTF8Bytes];
+ in.readBytes(b, 0, b.length);
+ FieldType ft = new FieldType(TextField.TYPE_STORED);
+ ft.setStoreTermVectors(fieldInfo.storeTermVector);
+ ft.setStoreTermVectorOffsets(fieldInfo.storeOffsetWithTermVector);
+ ft.setStoreTermVectorPositions(fieldInfo.storePositionWithTermVector);
+ doc.add(new Field(fieldInfo.name, ft, new String(b, "UTF-8")));
+ return accept != FieldSelectorResult.LOAD;
+ case LAZY_LOAD:
+ case LATENT:
+ addFieldLazy(in, fieldInfo, false, accept == FieldSelectorResult.LAZY_LOAD, numUTF8Bytes);
+ return false;
+ case SIZE:
+ case SIZE_AND_BREAK:
+ in.seek(in.getFilePointer() + numUTF8Bytes);
+ addFieldSize(fieldInfo, 2*numUTF8Bytes);
+ return accept != FieldSelectorResult.SIZE;
+ default:
+ // skip
+ in.seek(in.getFilePointer() + numUTF8Bytes);
+ return false;
+ }
+ }
+
+ @Override
+ public boolean intField(FieldInfo fieldInfo, int value) throws IOException {
+ FieldType ft = new FieldType(NumericField.TYPE_STORED);
+ ft.setIndexed(fieldInfo.isIndexed);
+ ft.setOmitNorms(fieldInfo.omitNorms);
+ ft.setOmitTermFreqAndPositions(fieldInfo.omitTermFreqAndPositions);
+ return addNumericField(fieldInfo, new NumericField(fieldInfo.name, ft).setIntValue(value));
+ }
+
+ @Override
+ public boolean longField(FieldInfo fieldInfo, long value) throws IOException {
+ FieldType ft = new FieldType(NumericField.TYPE_STORED);
+ ft.setIndexed(fieldInfo.isIndexed);
+ ft.setOmitNorms(fieldInfo.omitNorms);
+ ft.setOmitTermFreqAndPositions(fieldInfo.omitTermFreqAndPositions);
+ return addNumericField(fieldInfo, new NumericField(fieldInfo.name, ft).setLongValue(value));
+ }
+
+ @Override
+ public boolean floatField(FieldInfo fieldInfo, float value) throws IOException {
+ FieldType ft = new FieldType(NumericField.TYPE_STORED);
+ ft.setIndexed(fieldInfo.isIndexed);
+ ft.setOmitNorms(fieldInfo.omitNorms);
+ ft.setOmitTermFreqAndPositions(fieldInfo.omitTermFreqAndPositions);
+ return addNumericField(fieldInfo, new NumericField(fieldInfo.name, ft).setFloatValue(value));
+ }
+
+ @Override
+ public boolean doubleField(FieldInfo fieldInfo, double value) throws IOException {
+ FieldType ft = new FieldType(NumericField.TYPE_STORED);
+ ft.setIndexed(fieldInfo.isIndexed);
+ ft.setOmitNorms(fieldInfo.omitNorms);
+ ft.setOmitTermFreqAndPositions(fieldInfo.omitTermFreqAndPositions);
+ return addNumericField(fieldInfo, new NumericField(fieldInfo.name, ft).setDoubleValue(value));
+ }
+
+ private boolean addNumericField(FieldInfo fieldInfo, NumericField f) {
+ doc.add(f);
+ final FieldSelectorResult accept = selector.accept(fieldInfo.name);
+ switch (accept) {
+ case LOAD:
+ return false;
+ case LOAD_AND_BREAK:
+ return true;
+ case LAZY_LOAD:
+ case LATENT:
+ return false;
+ case SIZE:
+ return false;
+ case SIZE_AND_BREAK:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ private void addFieldLazy(IndexInput in, FieldInfo fi, boolean binary, boolean cacheResult, int numBytes) throws IOException {
+ final IndexableField f;
+ final long pointer = in.getFilePointer();
+ // Need to move the pointer ahead by toRead positions
+ in.seek(pointer+numBytes);
+ FieldType ft = new FieldType();
+ ft.setStored(true);
+ ft.setOmitNorms(fi.omitNorms);
+ ft.setOmitTermFreqAndPositions(fi.omitTermFreqAndPositions);
+ ft.setLazy(true);
+
+ if (binary) {
+ f = new LazyField(in, fi.name, ft, numBytes, pointer, binary, cacheResult);
+ } else {
+ ft.setStoreTermVectors(fi.storeTermVector);
+ ft.setStoreTermVectorOffsets(fi.storeOffsetWithTermVector);
+ ft.setStoreTermVectorPositions(fi.storePositionWithTermVector);
+ f = new LazyField(in, fi.name, ft, numBytes, pointer, binary, cacheResult);
+ }
+
+ doc.add(f);
+ }
+
+ // Add the size of field as a byte[] containing the 4 bytes of the integer byte size (high order byte first; char = 2 bytes)
+ // Read just the size -- caller must skip the field content to continue reading fields
+ // Return the size in bytes or chars, depending on field type
+ private void addFieldSize(FieldInfo fi, int numBytes) throws IOException {
+ byte[] sizebytes = new byte[4];
+ sizebytes[0] = (byte) (numBytes>>>24);
+ sizebytes[1] = (byte) (numBytes>>>16);
+ sizebytes[2] = (byte) (numBytes>>> 8);
+ sizebytes[3] = (byte) numBytes ;
+ doc.add(new BinaryField(fi.name, sizebytes));
+ }
+
+ /**
+ * A Lazy field implementation that defers loading of fields until asked for, instead of when the Document is
+ * loaded.
+ */
+ private static class LazyField extends Field {
+ private int toRead;
+ private long pointer;
+ private final boolean cacheResult;
+ private final IndexInput in;
+
+ public LazyField(IndexInput in, String name, FieldType ft, int toRead, long pointer, boolean isBinary, boolean cacheResult) {
+ super(name, ft);
+ this.in = in;
+ this.toRead = toRead;
+ this.pointer = pointer;
+ this.isBinary = isBinary;
+ this.cacheResult = cacheResult;
+ if (isBinary)
+ binaryLength = toRead;
+ }
+
+ @Override
+ public Number numericValue() {
+ return null;
+ }
+
+ @Override
+ public DataType numericDataType() {
+ return null;
+ }
+
+ private IndexInput localFieldsStream;
+
+ private IndexInput getFieldStream() {
+ if (localFieldsStream == null) {
+ localFieldsStream = (IndexInput) in.clone();
+ }
+ return localFieldsStream;
+ }
+
+ /** The value of the field as a Reader, or null. If null, the String value,
+ * binary value, or TokenStream value is used. Exactly one of stringValue(),
+ * readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */
+ public Reader readerValue() {
+ return null;
+ }
+
+ /** The value of the field as a TokenStream, or null. If null, the Reader value,
+ * String value, or binary value is used. Exactly one of stringValue(),
+ * readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */
+ public TokenStream tokenStreamValue() {
+ return null;
+ }
+
+ /** The value of the field as a String, or null. If null, the Reader value,
+ * binary value, or TokenStream value is used. Exactly one of stringValue(),
+ * readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */
+ synchronized public String stringValue() {
+ if (isBinary)
+ return null;
+ else {
+ if (fieldsData == null) {
+ String result = null;
+ IndexInput localFieldsStream = getFieldStream();
+ try {
+ localFieldsStream.seek(pointer);
+ byte[] bytes = new byte[toRead];
+ localFieldsStream.readBytes(bytes, 0, toRead);
+ result = new String(bytes, "UTF-8");
+ } catch (IOException e) {
+ throw new FieldReaderException(e);
+ }
+ if (cacheResult == true){
+ fieldsData = result;
+ }
+ return result;
+ } else {
+ return (String) fieldsData;
+ }
+ }
+ }
+
+ synchronized private byte[] getBinaryValue(byte[] result) {
+ if (isBinary) {
+ if (fieldsData == null) {
+ // Allocate new buffer if result is null or too small
+ final byte[] b;
+ if (result == null || result.length < toRead)
+ b = new byte[toRead];
+ else
+ b = result;
+
+ IndexInput localFieldsStream = getFieldStream();
+
+ // Throw this IOException since IndexReader.document does so anyway, so probably not that big of a change for people
+ // since they are already handling this exception when getting the document
+ try {
+ localFieldsStream.seek(pointer);
+ localFieldsStream.readBytes(b, 0, toRead);
+ } catch (IOException e) {
+ throw new FieldReaderException(e);
+ }
+
+ binaryOffset = 0;
+ binaryLength = toRead;
+ if (cacheResult == true){
+ fieldsData = b;
+ }
+ return b;
+ } else {
+ return (byte[]) fieldsData;
+ }
+ } else
+ return null;
+ }
+
+ @Override
+ public BytesRef binaryValue(BytesRef reuse) {
+ final byte[] bytes = getBinaryValue(reuse != null ? reuse.bytes : null);
+ if (bytes != null) {
+ return new BytesRef(bytes, 0, bytes.length);
+ } else {
+ return null;
+ }
+ }
+ }
+}
\ No newline at end of file
Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/LoadFirstFieldSelector.java
===================================================================
--- lucene/contrib/misc/src/java/org/apache/lucene/document2/LoadFirstFieldSelector.java (revision 0)
+++ lucene/contrib/misc/src/java/org/apache/lucene/document2/LoadFirstFieldSelector.java (revision 0)
@@ -0,0 +1,29 @@
+package org.apache.lucene.document2;
+/**
+ * Copyright 2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+/**
+ * Load the First field and break.
+ *
+ * See {@link FieldSelectorResult#LOAD_AND_BREAK}
+ */
+public class LoadFirstFieldSelector implements FieldSelector {
+
+ public FieldSelectorResult accept(String fieldName) {
+ return FieldSelectorResult.LOAD_AND_BREAK;
+ }
+}
\ No newline at end of file
Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/MapFieldSelector.java
===================================================================
--- lucene/contrib/misc/src/java/org/apache/lucene/document2/MapFieldSelector.java (revision 0)
+++ lucene/contrib/misc/src/java/org/apache/lucene/document2/MapFieldSelector.java (revision 0)
@@ -0,0 +1,67 @@
+package org.apache.lucene.document2;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A {@link FieldSelector} based on a Map of field names to {@link FieldSelectorResult}s
+ *
+ */
+public class MapFieldSelector implements FieldSelector {
+
+ Map fieldSelections;
+
+ /** Create a a MapFieldSelector
+ * @param fieldSelections maps from field names (String) to {@link FieldSelectorResult}s
+ */
+ public MapFieldSelector(Map fieldSelections) {
+ this.fieldSelections = fieldSelections;
+ }
+
+ /** Create a a MapFieldSelector
+ * @param fields fields to LOAD. List of Strings. All other fields are NO_LOAD.
+ */
+ public MapFieldSelector(List fields) {
+ fieldSelections = new HashMap(fields.size()*5/3);
+ for (final String field : fields)
+ fieldSelections.put(field, FieldSelectorResult.LOAD);
+ }
+
+ /** Create a a MapFieldSelector
+ * @param fields fields to LOAD. All other fields are NO_LOAD.
+ */
+ public MapFieldSelector(String... fields) {
+ this(Arrays.asList(fields));
+ }
+
+
+
+ /** Load field according to its associated value in fieldSelections
+ * @param field a field name
+ * @return the fieldSelections value that field maps to or NO_LOAD if none.
+ */
+ public FieldSelectorResult accept(String field) {
+ FieldSelectorResult selection = fieldSelections.get(field);
+ return selection!=null ? selection : FieldSelectorResult.NO_LOAD;
+ }
+
+}
Index: lucene/contrib/misc/src/java/org/apache/lucene/document2/SetBasedFieldSelector.java
===================================================================
--- lucene/contrib/misc/src/java/org/apache/lucene/document2/SetBasedFieldSelector.java (revision 0)
+++ lucene/contrib/misc/src/java/org/apache/lucene/document2/SetBasedFieldSelector.java (revision 0)
@@ -0,0 +1,60 @@
+package org.apache.lucene.document2;
+
+/**
+ * Copyright 2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Set;
+
+/**
+ * Declare what fields to load normally and what fields to load lazily
+ *
+ **/
+
+public class SetBasedFieldSelector implements FieldSelector {
+
+ private Set fieldsToLoad;
+ private Set lazyFieldsToLoad;
+
+ /**
+ * Pass in the Set of {@link Field} names to load and the Set of {@link Field} names to load lazily. If both are null, the
+ * Document will not have any {@link Field} on it.
+ * @param fieldsToLoad A Set of {@link String} field names to load. May be empty, but not null
+ * @param lazyFieldsToLoad A Set of {@link String} field names to load lazily. May be empty, but not null
+ */
+ public SetBasedFieldSelector(Set fieldsToLoad, Set lazyFieldsToLoad) {
+ this.fieldsToLoad = fieldsToLoad;
+ this.lazyFieldsToLoad = lazyFieldsToLoad;
+ }
+
+ /**
+ * Indicate whether to load the field with the given name or not. If the {@link Field#name()} is not in either of the
+ * initializing Sets, then {@link org.apache.lucene.document.FieldSelectorResult#NO_LOAD} is returned. If a Field name
+ * is in both fieldsToLoad and lazyFieldsToLoad, lazy has precedence.
+ *
+ * @param fieldName The {@link Field} name to check
+ * @return The {@link FieldSelectorResult}
+ */
+ public FieldSelectorResult accept(String fieldName) {
+ FieldSelectorResult result = FieldSelectorResult.NO_LOAD;
+ if (fieldsToLoad.contains(fieldName) == true){
+ result = FieldSelectorResult.LOAD;
+ }
+ if (lazyFieldsToLoad.contains(fieldName) == true){
+ result = FieldSelectorResult.LAZY_LOAD;
+ }
+ return result;
+ }
+}
\ No newline at end of file
Index: lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java
===================================================================
--- lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java (revision 1148487)
+++ lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java (working copy)
@@ -152,7 +152,7 @@
writer.close();
IndexReader reader = IndexReader.open(dir, null, true, 1, new AppendingCodecProvider());
assertEquals(2, reader.numDocs());
- org.apache.lucene.document.Document doc2 = reader.document(0);
+ Document doc2 = reader.document2(0);
assertEquals(text, doc2.get("f"));
Fields fields = MultiFields.getFields(reader);
Terms terms = fields.terms("f");
Index: lucene/contrib/misc/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java
===================================================================
--- lucene/contrib/misc/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java (revision 1148487)
+++ lucene/contrib/misc/src/test/org/apache/lucene/store/TestNRTCachingDirectory.java (working copy)
@@ -23,7 +23,7 @@
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
+import org.apache.lucene.document2.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
Index: lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java
===================================================================
--- lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (revision 1148487)
+++ lucene/contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (working copy)
@@ -93,7 +93,7 @@
ScoreDoc[] hits = searcher.search(tq,df, 1000).scoreDocs;
for(int i=0;i0);
for(int i=0;i0);
for(int i=0;i0);
for(int i=0;i0));
- org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc);
+ Document doc=searcher.doc2(sd[0].doc);
assertEquals("Should match most similar not most rare variant", "2",doc.get("id"));
}
//Test multiple input words are having variants produced
@@ -104,7 +104,7 @@
TopDocs topDocs = searcher.search(flt, 1);
ScoreDoc[] sd = topDocs.scoreDocs;
assertTrue("score docs must match 1 doc", (sd!=null)&&(sd.length>0));
- org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc);
+ Document doc=searcher.doc2(sd[0].doc);
assertEquals("Should match most similar when using 2 words", "2",doc.get("id"));
}
//Test bug found when first query word does not match anything
@@ -119,7 +119,7 @@
TopDocs topDocs = searcher.search(flt, 1);
ScoreDoc[] sd = topDocs.scoreDocs;
assertTrue("score docs must match 1 doc", (sd!=null)&&(sd.length>0));
- org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc);
+ Document doc=searcher.doc2(sd[0].doc);
assertEquals("Should match most similar when using 2 words", "2",doc.get("id"));
}
Index: lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java
===================================================================
--- lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (revision 1148487)
+++ lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/complexPhrase/TestComplexPhraseQuery.java (working copy)
@@ -98,7 +98,7 @@
TopDocs td = searcher.search(q, 10);
ScoreDoc[] sd = td.scoreDocs;
for (int i = 0; i < sd.length; i++) {
- org.apache.lucene.document.Document doc = searcher.doc(sd[i].doc);
+ Document doc = searcher.doc2(sd[i].doc);
String id = doc.get("id");
assertTrue(qString + "matched doc#" + id + " not expected", expecteds
.contains(id));
Index: lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java
===================================================================
--- lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (revision 1148487)
+++ lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (working copy)
@@ -284,7 +284,7 @@
assertEquals(2, results);
double lastDistance = 0;
for(int i =0 ; i < results; i++){
- org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc);
+ Document d = searcher.doc2(scoreDocs[i].doc);
String name = d.get("name");
double rsLat = Double.parseDouble(d.get(latField));
@@ -380,7 +380,7 @@
assertEquals(18, results);
double lastDistance = 0;
for(int i =0 ; i < results; i++){
- org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc);
+ Document d = searcher.doc2(scoreDocs[i].doc);
String name = d.get("name");
double rsLat = Double.parseDouble(d.get(latField));
double rsLng = Double.parseDouble(d.get(lngField));
@@ -475,7 +475,7 @@
assertEquals(expected[x], results);
double lastDistance = 0;
for(int i =0 ; i < results; i++){
- org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc);
+ Document d = searcher.doc2(scoreDocs[i].doc);
String name = d.get("name");
double rsLat = Double.parseDouble(d.get(latField));
@@ -570,7 +570,7 @@
assertEquals(expected[x], results);
for(int i =0 ; i < results; i++){
- org.apache.lucene.document.Document d = searcher.doc(scoreDocs[i].doc);
+ Document d = searcher.doc2(scoreDocs[i].doc);
String name = d.get("name");
double rsLat = Double.parseDouble(d.get(latField));
Index: lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java
===================================================================
--- lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (revision 1148487)
+++ lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (working copy)
@@ -233,7 +233,7 @@
ScoreDoc[] scoreDocs = hits.scoreDocs;
for(int i=0;i fieldsToAdd;
+
+ /** Load only fields named in the provided Set<String>. */
+ public Document2StoredFieldVisitor(Set fieldsToAdd) {
+ this.fieldsToAdd = fieldsToAdd;
+ }
+
+ /** Load only fields named in the provided Set<String>. */
+ public Document2StoredFieldVisitor(String... fields) {
+ fieldsToAdd = new HashSet(fields.length);
+ for(String field : fields) {
+ fieldsToAdd.add(field);
+ }
+ }
+
+ /** Load all stored fields. */
+ public Document2StoredFieldVisitor() {
+ this.fieldsToAdd = null;
+ }
+
+ @Override
+ public boolean binaryField(FieldInfo fieldInfo, IndexInput in, int numBytes) throws IOException {
+ if (accept(fieldInfo)) {
+ final byte[] b = new byte[numBytes];
+ in.readBytes(b, 0, b.length);
+ doc.add(new BinaryField(fieldInfo.name, b));
+ } else {
+ in.seek(in.getFilePointer() + numBytes);
+ }
+ return false;
+ }
+
+ @Override
+ public boolean stringField(FieldInfo fieldInfo, IndexInput in, int numUTF8Bytes) throws IOException {
+ if (accept(fieldInfo)) {
+ final byte[] b = new byte[numUTF8Bytes];
+ in.readBytes(b, 0, b.length);
+ FieldType ft = new FieldType(TextField.TYPE_STORED);
+ ft.setStoreTermVectors(fieldInfo.storeTermVector);
+ ft.setStoreTermVectorPositions(fieldInfo.storePositionWithTermVector);
+ ft.setStoreTermVectorOffsets(fieldInfo.storeOffsetWithTermVector);
+ ft.setStoreTermVectors(fieldInfo.storeTermVector);
+ doc.add(new Field(fieldInfo.name,
+ false,
+ ft,
+ new String(b, "UTF-8")));
+ } else {
+ in.seek(in.getFilePointer() + numUTF8Bytes);
+ }
+ return false;
+ }
+
+ @Override
+ public boolean intField(FieldInfo fieldInfo, int value) {
+ if (accept(fieldInfo)) {
+ FieldType ft = new FieldType(NumericField.TYPE_STORED);
+ ft.setIndexed(fieldInfo.isIndexed);
+ doc.add(new NumericField(fieldInfo.name, ft).setIntValue(value));
+ }
+ return false;
+ }
+
+ @Override
+ public boolean longField(FieldInfo fieldInfo, long value) {
+ if (accept(fieldInfo)) {
+ FieldType ft = new FieldType(NumericField.TYPE_STORED);
+ ft.setIndexed(fieldInfo.isIndexed);
+ doc.add(new NumericField(fieldInfo.name, ft).setLongValue(value));
+ }
+ return false;
+ }
+
+ @Override
+ public boolean floatField(FieldInfo fieldInfo, float value) {
+ if (accept(fieldInfo)) {
+ FieldType ft = new FieldType(NumericField.TYPE_STORED);
+ ft.setIndexed(fieldInfo.isIndexed);
+ doc.add(new NumericField(fieldInfo.name, ft).setFloatValue(value));
+ }
+ return false;
+ }
+
+ @Override
+ public boolean doubleField(FieldInfo fieldInfo, double value) {
+ if (accept(fieldInfo)) {
+ FieldType ft = new FieldType(NumericField.TYPE_STORED);
+ ft.setIndexed(fieldInfo.isIndexed);
+ doc.add(new NumericField(fieldInfo.name, ft).setDoubleValue(value));
+ }
+ return false;
+ }
+
+ private boolean accept(FieldInfo fieldInfo) {
+ return fieldsToAdd == null || fieldsToAdd.contains(fieldInfo.name);
+ }
+
+ public Document getDocument() {
+ return doc;
+ }
+}
\ No newline at end of file
Index: lucene/src/java/org/apache/lucene/index/IndexReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/IndexReader.java (revision 1148487)
+++ lucene/src/java/org/apache/lucene/index/IndexReader.java (working copy)
@@ -954,7 +954,7 @@
* #document(int)}. If you want to load a subset, use
* {@link DocumentStoredFieldVisitor}. */
public abstract void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException;
-
+
// nocommit -- the new document(int docID) API should
// clearly advertise that only field types/values are
// preserved -- index time metadata like boost, omitNorm,
@@ -981,6 +981,13 @@
return visitor.getDocument();
}
+ public org.apache.lucene.document2.Document document2(int docID) throws CorruptIndexException, IOException {
+ ensureOpen();
+ final Document2StoredFieldVisitor visitor = new Document2StoredFieldVisitor();
+ document(docID, visitor);
+ return visitor.getDocument();
+ }
+
/** Returns true if any documents have been deleted */
public abstract boolean hasDeletions();
Index: lucene/src/java/org/apache/lucene/search/IndexSearcher.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/IndexSearcher.java (revision 1148487)
+++ lucene/src/java/org/apache/lucene/search/IndexSearcher.java (working copy)
@@ -243,10 +243,18 @@
return reader.document(docID);
}
+ public org.apache.lucene.document2.Document doc2(int docID) throws CorruptIndexException, IOException {
+ return reader.document2(docID);
+ }
+
/* Sugar for .getIndexReader().document(docID, fieldVisitor) */
public void doc(int docID, StoredFieldVisitor fieldVisitor) throws CorruptIndexException, IOException {
reader.document(docID, fieldVisitor);
}
+
+ public void doc2(int docID, StoredFieldVisitor fieldVisitor) throws CorruptIndexException, IOException {
+ reader.document(docID, fieldVisitor);
+ }
/** Expert: Set the SimilarityProvider implementation used by this Searcher.
*
Index: lucene/src/test-framework/org/apache/lucene/index/DocHelper.java
===================================================================
--- lucene/src/test-framework/org/apache/lucene/index/DocHelper.java (revision 1148487)
+++ lucene/src/test-framework/org/apache/lucene/index/DocHelper.java (working copy)
@@ -297,8 +297,4 @@
public static int numFields(Document doc) {
return doc.size();
}
-
- public static int numFields2(org.apache.lucene.document.Document doc) {
- return doc.getFields().size();
- }
}
Index: lucene/src/test-framework/org/apache/lucene/util/LineFileDocs.java
===================================================================
--- lucene/src/test-framework/org/apache/lucene/util/LineFileDocs.java (revision 1148487)
+++ lucene/src/test-framework/org/apache/lucene/util/LineFileDocs.java (working copy)
@@ -28,8 +28,11 @@
import java.util.zip.GZIPInputStream;
import java.util.Random;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
+import org.apache.lucene.document2.Document;
+import org.apache.lucene.document2.Field;
+import org.apache.lucene.document2.FieldType;
+import org.apache.lucene.document2.StringField;
+import org.apache.lucene.document2.TextField;
/** Minimal port of contrib/benchmark's LneDocSource +
* DocMaker, so tests can enum docs from a line file created
@@ -117,19 +120,24 @@
public DocState() {
doc = new Document();
- title = new Field("title", "", Field.Store.NO, Field.Index.NOT_ANALYZED_NO_NORMS);
+ title = new StringField("title", "");
doc.add(title);
- titleTokenized = new Field("titleTokenized", "", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+ FieldType ft = new FieldType(TextField.TYPE_STORED);
+ ft.setStoreTermVectors(true);
+ ft.setStoreTermVectorOffsets(true);
+ ft.setStoreTermVectorPositions(true);
+
+ titleTokenized = new Field("titleTokenized", ft, "");
doc.add(titleTokenized);
- body = new Field("body", "", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS);
+ body = new Field("body", ft, "");
doc.add(body);
- id = new Field("docid", "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
+ id = new Field("docid", StringField.TYPE_STORED, "");
doc.add(id);
- date = new Field("date", "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
+ date = new Field("date", StringField.TYPE_STORED, "");
doc.add(date);
}
}
Index: lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java
===================================================================
--- lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (revision 1148487)
+++ lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (working copy)
@@ -36,10 +36,6 @@
import java.util.regex.Pattern;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.Field.Index;
-import org.apache.lucene.document.Field.Store;
-import org.apache.lucene.document.Field.TermVector;
import org.apache.lucene.document2.FieldType;
import org.apache.lucene.index.*;
import org.apache.lucene.index.codecs.Codec;
@@ -1062,18 +1058,6 @@
return dir;
}
- /** Returns a new field instance.
- * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
- public static Field newField(String name, String value, Index index) {
- return newField(random, name, value, index);
- }
-
- /** Returns a new field instance.
- * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
- public static Field newField(String name, String value, Store store, Index index) {
- return newField(random, name, value, store, index);
- }
-
public static org.apache.lucene.document2.Field newField(String name, String value, FieldType type) {
return newField(random, name, value, type);
}
@@ -1114,67 +1098,6 @@
return new org.apache.lucene.document2.Field(name, newType, value);
}
- /**
- * Returns a new Field instance. Use this when the test does not
- * care about some specific field settings (most tests)
- *
- *
If the store value is set to Store.NO, sometimes the field will be randomly stored.
- *
More term vector data than you ask for might be indexed, for example if you choose YES
- * it might index term vectors with offsets too.
- *
- */
- public static Field newField(String name, String value, Store store, Index index, TermVector tv) {
- return newField(random, name, value, store, index, tv);
- }
-
- /** Returns a new field instance, using the specified random.
- * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
- public static Field newField(Random random, String name, String value, Index index) {
- return newField(random, name, value, Store.NO, index);
- }
-
- /** Returns a new field instance, using the specified random.
- * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
- public static Field newField(Random random, String name, String value, Store store, Index index) {
- return newField(random, name, value, store, index, TermVector.NO);
- }
-
- /** Returns a new field instance, using the specified random.
- * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */
- public static Field newField(Random random, String name, String value, Store store, Index index, TermVector tv) {
- if (usually(random)) {
- // most of the time, don't modify the params
- return new Field(name, value, store, index, tv);
- }
-
- if (!index.isIndexed())
- return new Field(name, value, store, index, tv);
-
- if (!store.isStored() && random.nextBoolean())
- store = Store.YES; // randomly store it
-
- tv = randomTVSetting(random, tv);
-
- return new Field(name, value, store, index, tv);
- }
-
- static final TermVector tvSettings[] = {
- TermVector.NO, TermVector.YES, TermVector.WITH_OFFSETS,
- TermVector.WITH_POSITIONS, TermVector.WITH_POSITIONS_OFFSETS
- };
-
- private static TermVector randomTVSetting(Random random, TermVector minimum) {
- switch(minimum) {
- case NO: return tvSettings[_TestUtil.nextInt(random, 0, tvSettings.length-1)];
- case YES: return tvSettings[_TestUtil.nextInt(random, 1, tvSettings.length-1)];
- case WITH_OFFSETS: return random.nextBoolean() ? TermVector.WITH_OFFSETS
- : TermVector.WITH_POSITIONS_OFFSETS;
- case WITH_POSITIONS: return random.nextBoolean() ? TermVector.WITH_POSITIONS
- : TermVector.WITH_POSITIONS_OFFSETS;
- default: return TermVector.WITH_POSITIONS_OFFSETS;
- }
- }
-
/** return a random Locale from the available locales on the system */
public static Locale randomLocale(Random random) {
Locale locales[] = Locale.getAvailableLocales();
Index: lucene/src/test/org/apache/lucene/TestDemo.java
===================================================================
--- lucene/src/test/org/apache/lucene/TestDemo.java (revision 1148487)
+++ lucene/src/test/org/apache/lucene/TestDemo.java (working copy)
@@ -72,7 +72,7 @@
assertEquals(1, hits.totalHits);
// Iterate through the results:
for (int i = 0; i < hits.scoreDocs.length; i++) {
- org.apache.lucene.document.Document hitDoc = isearcher.doc(hits.scoreDocs[i].doc);
+ Document hitDoc = isearcher.doc2(hits.scoreDocs[i].doc);
assertEquals(text, hitDoc.get("fieldname"));
}
Index: lucene/src/test/org/apache/lucene/TestSearch.java
===================================================================
--- lucene/src/test/org/apache/lucene/TestSearch.java (revision 1148487)
+++ lucene/src/test/org/apache/lucene/TestSearch.java (working copy)
@@ -127,7 +127,7 @@
out.println(hits.length + " total results");
for (int i = 0 ; i < hits.length && i < 10; i++) {
- org.apache.lucene.document.Document d = searcher.doc(hits[i].doc);
+ Document d = searcher.doc2(hits[i].doc);
out.println(i + " " + hits[i].score + " " + d.get("contents"));
}
}
Index: lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java
===================================================================
--- lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java (revision 1148487)
+++ lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java (working copy)
@@ -142,7 +142,7 @@
out.println(hits.length + " total results\n");
for (int i = 0 ; i < hits.length; i++) {
if ( i < 10 || (i > 94 && i < 105) ) {
- org.apache.lucene.document.Document d = searcher.doc(hits[i].doc);
+ Document d = searcher.doc2(hits[i].doc);
out.println(i + " " + d.get(ID_FIELD));
}
}
@@ -152,7 +152,7 @@
assertEquals("total results", expectedCount, hits.length);
for (int i = 0 ; i < hits.length; i++) {
if (i < 10 || (i > 94 && i < 105) ) {
- org.apache.lucene.document.Document d = searcher.doc(hits[i].doc);
+ Document d = searcher.doc2(hits[i].doc);
assertEquals("check " + i, String.valueOf(i), d.get(ID_FIELD));
}
}
Index: lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (revision 1148487)
+++ lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (working copy)
@@ -31,7 +31,6 @@
import org.apache.lucene.document2.Field;
import org.apache.lucene.document2.FieldType;
import org.apache.lucene.document2.TextField;
-import org.apache.lucene.document.Fieldable;
import org.apache.lucene.document2.NumericField;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DefaultSimilarity;
@@ -287,12 +286,12 @@
for(int i=0;i<35;i++) {
if (!delDocs.get(i)) {
- org.apache.lucene.document.Document d = reader.document(i);
- List fields = d.getFields();
+ Document d = reader.document2(i);
+ List fields = d.getFields();
if (d.getField("content3") == null) {
final int numFields = 5;
assertEquals(numFields, fields.size());
- org.apache.lucene.document.Field f = d.getField("id");
+ IndexableField f = d.getField("id");
assertEquals(""+i, f.stringValue());
f = d.getField("utf8");
@@ -320,7 +319,7 @@
// First document should be #21 since it's norm was
// increased:
- org.apache.lucene.document.Document d = searcher.getIndexReader().document(hits[0].doc);
+ Document d = searcher.getIndexReader().document2(hits[0].doc);
assertEquals("didn't get the right document first", "21", d.get("id"));
doTestHits(hits, 34, searcher.getIndexReader());
@@ -366,7 +365,7 @@
// make sure searching sees right # hits
IndexSearcher searcher = new IndexSearcher(dir, true);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
- org.apache.lucene.document.Document d = searcher.getIndexReader().document(hits[0].doc);
+ Document d = searcher.getIndexReader().document2(hits[0].doc);
assertEquals("wrong first document", "21", d.get("id"));
doTestHits(hits, 44, searcher.getIndexReader());
searcher.close();
@@ -385,7 +384,7 @@
searcher = new IndexSearcher(dir, true);
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 43, hits.length);
- d = searcher.doc(hits[0].doc);
+ d = searcher.doc2(hits[0].doc);
assertEquals("wrong first document", "22", d.get("id"));
doTestHits(hits, 43, searcher.getIndexReader());
searcher.close();
@@ -398,7 +397,7 @@
searcher = new IndexSearcher(dir, true);
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 43, hits.length);
- d = searcher.doc(hits[0].doc);
+ d = searcher.doc2(hits[0].doc);
doTestHits(hits, 43, searcher.getIndexReader());
assertEquals("wrong first document", "22", d.get("id"));
searcher.close();
@@ -414,7 +413,7 @@
IndexSearcher searcher = new IndexSearcher(dir, true);
ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 34, hits.length);
- org.apache.lucene.document.Document d = searcher.doc(hits[0].doc);
+ Document d = searcher.doc2(hits[0].doc);
assertEquals("wrong first document", "21", d.get("id"));
searcher.close();
@@ -430,7 +429,7 @@
searcher = new IndexSearcher(dir, true);
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 33, hits.length);
- d = searcher.doc(hits[0].doc);
+ d = searcher.doc2(hits[0].doc);
assertEquals("wrong first document", "22", d.get("id"));
doTestHits(hits, 33, searcher.getIndexReader());
searcher.close();
@@ -443,7 +442,7 @@
searcher = new IndexSearcher(dir, true);
hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs;
assertEquals("wrong number of hits", 33, hits.length);
- d = searcher.doc(hits[0].doc);
+ d = searcher.doc2(hits[0].doc);
assertEquals("wrong first document", "22", d.get("id"));
doTestHits(hits, 33, searcher.getIndexReader());
searcher.close();
@@ -684,12 +683,12 @@
for (int id=10; id<15; id++) {
ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", 4, Integer.valueOf(id), Integer.valueOf(id), true, true), 100).scoreDocs;
assertEquals("wrong number of hits", 1, hits.length);
- org.apache.lucene.document.Document d = searcher.doc(hits[0].doc);
+ Document d = searcher.doc2(hits[0].doc);
assertEquals(String.valueOf(id), d.get("id"));
hits = searcher.search(NumericRangeQuery.newLongRange("trieLong", 4, Long.valueOf(id), Long.valueOf(id), true, true), 100).scoreDocs;
assertEquals("wrong number of hits", 1, hits.length);
- d = searcher.doc(hits[0].doc);
+ d = searcher.doc2(hits[0].doc);
assertEquals(String.valueOf(id), d.get("id"));
}
Index: lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java (revision 1148487)
+++ lucene/src/test/org/apache/lucene/index/TestDirectoryReader.java (working copy)
@@ -81,12 +81,12 @@
sis.read(dir);
IndexReader reader = openReader();
assertTrue(reader != null);
- org.apache.lucene.document.Document newDoc1 = reader.document(0);
+ Document newDoc1 = reader.document2(0);
assertTrue(newDoc1 != null);
- assertTrue(DocHelper.numFields2(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size());
- org.apache.lucene.document.Document newDoc2 = reader.document(1);
+ assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size());
+ Document newDoc2 = reader.document2(1);
assertTrue(newDoc2 != null);
- assertTrue(DocHelper.numFields2(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
+ assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
TermFreqVector vector = reader.getTermFreqVector(0, DocHelper.TEXT_FIELD_2_KEY);
assertTrue(vector != null);
TestSegmentReader.checkNorms(reader);
Index: lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (revision 1148487)
+++ lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (working copy)
@@ -28,7 +28,6 @@
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.document.Fieldable;
import org.apache.lucene.document2.Document;
import org.apache.lucene.document2.Field;
import org.apache.lucene.document2.FieldType;
@@ -69,19 +68,19 @@
//After adding the document, we should be able to read it back in
SegmentReader reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
assertTrue(reader != null);
- org.apache.lucene.document.Document doc = reader.document(0);
+ Document doc = reader.document2(0);
assertTrue(doc != null);
//System.out.println("Document: " + doc);
- Fieldable [] fields = doc.getFields("textField2");
+ IndexableField [] fields = doc.getFields("textField2");
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_2_TEXT));
- assertTrue(fields[0].isTermVectorStored());
+ assertTrue(fields[0].storeTermVectors());
fields = doc.getFields("textField1");
assertTrue(fields != null && fields.length == 1);
assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_1_TEXT));
- assertFalse(fields[0].isTermVectorStored());
+ assertFalse(fields[0].storeTermVectors());
fields = doc.getFields("keyField");
assertTrue(fields != null && fields.length == 1);
Index: lucene/src/test/org/apache/lucene/index/TestFieldsReader.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (revision 1148487)
+++ lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (working copy)
@@ -22,9 +22,9 @@
import java.util.*;
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.NumericField;
-import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.document2.Document;
+import org.apache.lucene.document2.Field;
+import org.apache.lucene.document2.NumericField;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.FieldCache;
import org.apache.lucene.store.BufferedIndexInput;
@@ -67,32 +67,32 @@
assertTrue(dir != null);
assertTrue(fieldInfos != null);
IndexReader reader = IndexReader.open(dir);
- Document doc = reader.document(0);
+ Document doc = reader.document2(0);
assertTrue(doc != null);
assertTrue(doc.getField(DocHelper.TEXT_FIELD_1_KEY) != null);
- Fieldable field = doc.getField(DocHelper.TEXT_FIELD_2_KEY);
+ Field field = (Field) doc.getField(DocHelper.TEXT_FIELD_2_KEY);
assertTrue(field != null);
- assertTrue(field.isTermVectorStored() == true);
+ assertTrue(field.storeTermVectors() == true);
- assertTrue(field.isStoreOffsetWithTermVector() == true);
- assertTrue(field.isStorePositionWithTermVector() == true);
+ assertTrue(field.storeTermVectorOffsets() == true);
+ assertTrue(field.storeTermVectorPositions() == true);
- field = doc.getField(DocHelper.TEXT_FIELD_3_KEY);
+ field = (Field) doc.getField(DocHelper.TEXT_FIELD_3_KEY);
assertTrue(field != null);
- assertTrue(field.isTermVectorStored() == false);
- assertTrue(field.isStoreOffsetWithTermVector() == false);
- assertTrue(field.isStorePositionWithTermVector() == false);
+ assertTrue(field.storeTermVectors() == false);
+ assertTrue(field.storeTermVectorOffsets() == false);
+ assertTrue(field.storeTermVectorPositions() == false);
- field = doc.getField(DocHelper.NO_TF_KEY);
+ field = (Field) doc.getField(DocHelper.NO_TF_KEY);
assertTrue(field != null);
- assertTrue(field.isTermVectorStored() == false);
- assertTrue(field.isStoreOffsetWithTermVector() == false);
- assertTrue(field.isStorePositionWithTermVector() == false);
+ assertTrue(field.storeTermVectors() == false);
+ assertTrue(field.storeTermVectorOffsets() == false);
+ assertTrue(field.storeTermVectorPositions() == false);
- DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(DocHelper.TEXT_FIELD_3_KEY);
+ Document2StoredFieldVisitor visitor = new Document2StoredFieldVisitor(DocHelper.TEXT_FIELD_3_KEY);
reader.document(0, visitor);
- final List fields = visitor.getDocument().getFields();
+ final List fields = visitor.getDocument().getFields();
assertEquals(1, fields.size());
assertEquals(DocHelper.TEXT_FIELD_3_KEY, fields.get(0).name());
@@ -229,25 +229,25 @@
RandomIndexWriter w = new RandomIndexWriter(random, dir);
final int numDocs = atLeast(500);
final Number[] answers = new Number[numDocs];
- final NumericField.DataType[] typeAnswers = new NumericField.DataType[numDocs];
+ final org.apache.lucene.document.NumericField.DataType[] typeAnswers = new org.apache.lucene.document.NumericField.DataType[numDocs];
for(int id=0;id fieldable1 = doc1.getFields();
- List fieldable2 = doc2.getFields();
+ Document doc1 = index1.document2(i);
+ Document doc2 = index2.document2(i);
+ List fieldable1 = doc1.getFields();
+ List fieldable2 = doc2.getFields();
assertEquals("Different numbers of fields for doc " + i + ".", fieldable1.size(), fieldable2.size());
- Iterator itField1 = fieldable1.iterator();
- Iterator itField2 = fieldable2.iterator();
+ Iterator itField1 = fieldable1.iterator();
+ Iterator itField2 = fieldable2.iterator();
while (itField1.hasNext()) {
- org.apache.lucene.document.Field curField1 = (org.apache.lucene.document.Field) itField1.next();
- org.apache.lucene.document.Field curField2 = (org.apache.lucene.document.Field) itField2.next();
+ Field curField1 = (Field) itField1.next();
+ Field curField2 = (Field) itField2.next();
assertEquals("Different fields names for doc " + i + ".", curField1.name(), curField2.name());
assertEquals("Different field values for doc " + i + ".", curField1.stringValue(), curField2.stringValue());
}
Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (revision 1148487)
+++ lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java (working copy)
@@ -188,7 +188,7 @@
if (i>0) {
int k = i-1;
int n = j + k*M;
- org.apache.lucene.document.Document prevItereationDoc = reader.document(n);
+ Document prevItereationDoc = reader.document2(n);
assertNotNull(prevItereationDoc);
String id = prevItereationDoc.get("id");
assertEquals(k+"_"+j, id);
Index: lucene/src/test/org/apache/lucene/index/TestIndexWriter.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (revision 1148487)
+++ lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (working copy)
@@ -42,7 +42,6 @@
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
-import org.apache.lucene.document.Fieldable;
import org.apache.lucene.document2.BinaryField;
import org.apache.lucene.document2.Document;
import org.apache.lucene.document2.Field;
@@ -1012,8 +1011,8 @@
w.close();
IndexReader ir = IndexReader.open(dir, true);
- org.apache.lucene.document.Document doc2 = ir.document(0);
- org.apache.lucene.document.Field f2 = doc2.getField("binary");
+ Document doc2 = ir.document2(0);
+ IndexableField f2 = doc2.getField("binary");
b = f2.binaryValue(null).bytes;
assertTrue(b != null);
assertEquals(17, b.length, 17);
@@ -1283,8 +1282,8 @@
w.close();
IndexReader ir = IndexReader.open(dir, true);
- org.apache.lucene.document.Document doc2 = ir.document(0);
- org.apache.lucene.document.Field f3 = doc2.getField("binary");
+ Document doc2 = ir.document2(0);
+ IndexableField f3 = doc2.getField("binary");
b = f3.binaryValue(null).bytes;
assertTrue(b != null);
assertEquals(17, b.length, 17);
@@ -1325,20 +1324,20 @@
doc.add(newField("zzz", "1 2 3", customType));
w.addDocument(doc);
IndexReader r = w.getReader();
- org.apache.lucene.document.Document doc2 = r.document(0);
- Iterator it = doc2.getFields().iterator();
+ Document doc2 = r.document2(0);
+ Iterator it = doc2.getFields().iterator();
assertTrue(it.hasNext());
- org.apache.lucene.document.Field f = (org.apache.lucene.document.Field) it.next();
+ Field f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
- f = (org.apache.lucene.document.Field) it.next();
+ f = (Field) it.next();
assertEquals(f.name(), "aaa");
assertEquals(f.stringValue(), "a b c");
assertTrue(it.hasNext());
- f = (org.apache.lucene.document.Field) it.next();
+ f = (Field) it.next();
assertEquals(f.name(), "zzz");
assertEquals(f.stringValue(), "1 2 3");
assertFalse(it.hasNext());
@@ -1669,7 +1668,7 @@
}
TopDocs hits = s.search(new TermQuery(new Term("id", testID)), 1);
assertEquals(1, hits.totalHits);
- org.apache.lucene.document.Document doc = r.document(hits.scoreDocs[0].doc);
+ Document doc = r.document2(hits.scoreDocs[0].doc);
Document docExp = docs.get(testID);
for(int i=0;i docIDs = new ArrayList();
final SubDocs subDocs = new SubDocs(packID, docIDs);
final List docsList = new ArrayList();
@@ -524,7 +525,7 @@
startDocID = docID;
}
lastDocID = docID;
- final Document doc = s.doc(docID);
+ final Document doc = s.doc2(docID);
assertEquals(subDocs.packID, doc.get("packID"));
}
Index: lucene/src/test/org/apache/lucene/index/TestParallelReader.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestParallelReader.java (revision 1148487)
+++ lucene/src/test/org/apache/lucene/index/TestParallelReader.java (working copy)
@@ -212,8 +212,8 @@
assertEquals(parallelHits.length, singleHits.length);
for(int i = 0; i < parallelHits.length; i++) {
assertEquals(parallelHits[i].score, singleHits[i].score, 0.001f);
- org.apache.lucene.document.Document docParallel = parallel.doc(parallelHits[i].doc);
- org.apache.lucene.document.Document docSingle = single.doc(singleHits[i].doc);
+ Document docParallel = parallel.doc2(parallelHits[i].doc);
+ Document docSingle = single.doc2(singleHits[i].doc);
assertEquals(docParallel.get("f1"), docSingle.get("f1"));
assertEquals(docParallel.get("f2"), docSingle.get("f2"));
assertEquals(docParallel.get("f3"), docSingle.get("f3"));
Index: lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java (revision 1148487)
+++ lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java (working copy)
@@ -40,14 +40,14 @@
IndexReader r = null;
final int numUpdates = (int) (SIZE * (2+random.nextDouble()));
for(int docIter=0;docIter= SIZE && random.nextInt(50) == 17) {
Index: lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java (revision 1148487)
+++ lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java (working copy)
@@ -87,13 +87,13 @@
BufferedIndexInput.BUFFER_SIZE, true, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
assertTrue(mergedReader != null);
assertTrue(mergedReader.numDocs() == 2);
- org.apache.lucene.document.Document newDoc1 = mergedReader.document(0);
+ Document newDoc1 = mergedReader.document2(0);
assertTrue(newDoc1 != null);
//There are 2 unstored fields on the document
- assertTrue(DocHelper.numFields2(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size());
- org.apache.lucene.document.Document newDoc2 = mergedReader.document(1);
+ assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size());
+ Document newDoc2 = mergedReader.document2(1);
assertTrue(newDoc2 != null);
- assertTrue(DocHelper.numFields2(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
+ assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
DocsEnum termDocs = MultiFields.getTermDocsEnum(mergedReader,
MultiFields.getDeletedDocs(mergedReader),
Index: lucene/src/test/org/apache/lucene/index/TestSegmentReader.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestSegmentReader.java (revision 1148487)
+++ lucene/src/test/org/apache/lucene/index/TestSegmentReader.java (working copy)
@@ -25,7 +25,6 @@
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.BytesRef;
-import org.apache.lucene.document.Fieldable;
import org.apache.lucene.document2.Document;
import org.apache.lucene.store.Directory;
@@ -61,13 +60,13 @@
public void testDocument() throws IOException {
assertTrue(reader.numDocs() == 1);
assertTrue(reader.maxDoc() >= 1);
- org.apache.lucene.document.Document result = reader.document(0);
+ Document result = reader.document2(0);
assertTrue(result != null);
//There are 2 unstored fields on the document that are not preserved across writing
- assertTrue(DocHelper.numFields2(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size());
+ assertTrue(DocHelper.numFields(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size());
- List fields = result.getFields();
- for (final Fieldable field : fields ) {
+ List fields = result.getFields();
+ for (final IndexableField field : fields ) {
assertTrue(field != null);
assertTrue(DocHelper.nameValues.containsKey(field.name()));
}
Index: lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java (revision 1148487)
+++ lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java (working copy)
@@ -30,7 +30,7 @@
import junit.framework.Assert;
import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.index.IndexableField;
import org.apache.lucene.document2.Document;
import org.apache.lucene.document2.Field;
import org.apache.lucene.document2.FieldType;
@@ -133,8 +133,8 @@
static Term idTerm = new Term("id","");
IndexingThread[] threads;
- static Comparator fieldNameComparator = new Comparator() {
- public int compare(Fieldable o1, Fieldable o2) {
+ static Comparator fieldNameComparator = new Comparator() {
+ public int compare(IndexableField o1, IndexableField o2) {
return o1.name().compareTo(o2.name());
}
};
@@ -294,7 +294,7 @@
Bits delDocs = sub.getDeletedDocs();
System.out.println(" " + ((SegmentReader) sub).getSegmentInfo());
for(int docID=0;docID0 &&
(searcher.getIndexReader().getSequentialSubReaders() == null ||
@@ -211,9 +211,9 @@
ScoreDoc[] sd = topDocs.scoreDocs;
assertNotNull(sd);
assertEquals("Score doc count", count, sd.length );
- org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc);
+ Document doc=searcher.doc2(sd[0].doc);
assertEquals("First doc", startOffset, Integer.parseInt(doc.get(field)) );
- doc=searcher.doc(sd[sd.length-1].doc);
+ doc=searcher.doc2(sd[sd.length-1].doc);
assertEquals("Last doc", (count-1)*distance+startOffset, Integer.parseInt(doc.get(field)) );
q=NumericRangeQuery.newIntRange(field, precisionStep, null, upper, false, true);
@@ -221,9 +221,9 @@
sd = topDocs.scoreDocs;
assertNotNull(sd);
assertEquals("Score doc count", count, sd.length );
- doc=searcher.doc(sd[0].doc);
+ doc=searcher.doc2(sd[0].doc);
assertEquals("First doc", startOffset, Integer.parseInt(doc.get(field)) );
- doc=searcher.doc(sd[sd.length-1].doc);
+ doc=searcher.doc2(sd[sd.length-1].doc);
assertEquals("Last doc", (count-1)*distance+startOffset, Integer.parseInt(doc.get(field)) );
}
@@ -252,9 +252,9 @@
ScoreDoc[] sd = topDocs.scoreDocs;
assertNotNull(sd);
assertEquals("Score doc count", noDocs-count, sd.length );
- org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc);
+ Document doc=searcher.doc2(sd[0].doc);
assertEquals("First doc", count*distance+startOffset, Integer.parseInt(doc.get(field)) );
- doc=searcher.doc(sd[sd.length-1].doc);
+ doc=searcher.doc2(sd[sd.length-1].doc);
assertEquals("Last doc", (noDocs-1)*distance+startOffset, Integer.parseInt(doc.get(field)) );
q=NumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, false);
@@ -262,9 +262,9 @@
sd = topDocs.scoreDocs;
assertNotNull(sd);
assertEquals("Score doc count", noDocs-count, sd.length );
- doc=searcher.doc(sd[0].doc);
+ doc=searcher.doc2(sd[0].doc);
assertEquals("First doc", count*distance+startOffset, Integer.parseInt(doc.get(field)) );
- doc=searcher.doc(sd[sd.length-1].doc);
+ doc=searcher.doc2(sd[sd.length-1].doc);
assertEquals("Last doc", (noDocs-1)*distance+startOffset, Integer.parseInt(doc.get(field)) );
}
Index: lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (revision 1148487)
+++ lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java (working copy)
@@ -150,9 +150,9 @@
ScoreDoc[] sd = topDocs.scoreDocs;
assertNotNull(sd);
assertEquals("Score doc count"+type, count, sd.length );
- org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc);
+ Document doc=searcher.doc2(sd[0].doc);
assertEquals("First doc"+type, 2*distance+startOffset, Long.parseLong(doc.get(field)) );
- doc=searcher.doc(sd[sd.length-1].doc);
+ doc=searcher.doc2(sd[sd.length-1].doc);
assertEquals("Last doc"+type, (1+count)*distance+startOffset, Long.parseLong(doc.get(field)) );
if (i>0 &&
(searcher.getIndexReader().getSequentialSubReaders() == null ||
@@ -217,9 +217,9 @@
ScoreDoc[] sd = topDocs.scoreDocs;
assertNotNull(sd);
assertEquals("Score doc count", count, sd.length );
- org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc);
+ Document doc=searcher.doc2(sd[0].doc);
assertEquals("First doc", startOffset, Long.parseLong(doc.get(field)) );
- doc=searcher.doc(sd[sd.length-1].doc);
+ doc=searcher.doc2(sd[sd.length-1].doc);
assertEquals("Last doc", (count-1)*distance+startOffset, Long.parseLong(doc.get(field)) );
q=NumericRangeQuery.newLongRange(field, precisionStep, null, upper, false, true);
@@ -227,9 +227,9 @@
sd = topDocs.scoreDocs;
assertNotNull(sd);
assertEquals("Score doc count", count, sd.length );
- doc=searcher.doc(sd[0].doc);
+ doc=searcher.doc2(sd[0].doc);
assertEquals("First doc", startOffset, Long.parseLong(doc.get(field)) );
- doc=searcher.doc(sd[sd.length-1].doc);
+ doc=searcher.doc2(sd[sd.length-1].doc);
assertEquals("Last doc", (count-1)*distance+startOffset, Long.parseLong(doc.get(field)) );
}
@@ -263,9 +263,9 @@
ScoreDoc[] sd = topDocs.scoreDocs;
assertNotNull(sd);
assertEquals("Score doc count", noDocs-count, sd.length );
- org.apache.lucene.document.Document doc=searcher.doc(sd[0].doc);
+ Document doc=searcher.doc2(sd[0].doc);
assertEquals("First doc", count*distance+startOffset, Long.parseLong(doc.get(field)) );
- doc=searcher.doc(sd[sd.length-1].doc);
+ doc=searcher.doc2(sd[sd.length-1].doc);
assertEquals("Last doc", (noDocs-1)*distance+startOffset, Long.parseLong(doc.get(field)) );
q=NumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, false);
@@ -273,9 +273,9 @@
sd = topDocs.scoreDocs;
assertNotNull(sd);
assertEquals("Score doc count", noDocs-count, sd.length );
- doc=searcher.doc(sd[0].doc);
+ doc=searcher.doc2(sd[0].doc);
assertEquals("First doc", count*distance+startOffset, Long.parseLong(doc.get(field)) );
- doc=searcher.doc(sd[sd.length-1].doc);
+ doc=searcher.doc2(sd[sd.length-1].doc);
assertEquals("Last doc", (noDocs-1)*distance+startOffset, Long.parseLong(doc.get(field)) );
}
Index: lucene/src/test/org/apache/lucene/search/TestSort.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestSort.java (revision 1148487)
+++ lucene/src/test/org/apache/lucene/search/TestSort.java (working copy)
@@ -35,6 +35,7 @@
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.MultiReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
@@ -350,18 +351,18 @@
int lastDocId = 0;
boolean fail = false;
for (int x = 0; x < n; ++x) {
- org.apache.lucene.document.Document doc2 = searcher.doc(result[x].doc);
- String[] v = doc2.getValues("tracer");
- String[] v2 = doc2.getValues("tracer2");
+ Document doc2 = searcher.doc2(result[x].doc);
+ IndexableField[] v = doc2.getFields("tracer");
+ IndexableField[] v2 = doc2.getFields("tracer2");
for (int j = 0; j < v.length; ++j) {
if (last != null) {
- int cmp = v[j].compareTo(last);
+ int cmp = v[j].stringValue().compareTo(last);
if (!(cmp >= 0)) { // ensure first field is in order
fail = true;
System.out.println("fail:" + v[j] + " < " + last);
}
if (cmp == 0) { // ensure second field is in reverse order
- cmp = v2[j].compareTo(lastSub);
+ cmp = v2[j].stringValue().compareTo(lastSub);
if (cmp > 0) {
fail = true;
System.out.println("rev field fail:" + v2[j] + " > " + lastSub);
@@ -373,8 +374,8 @@
}
}
}
- last = v[j];
- lastSub = v2[j];
+ last = v[j].stringValue();
+ lastSub = v2[j].stringValue();
lastDocId = result[x].doc;
buff.append(v[j] + "(" + v2[j] + ")(" + result[x].doc+") ");
}
@@ -956,10 +957,10 @@
StringBuilder buff = new StringBuilder(10);
int n = result.length;
for (int i=0; i();
// Initialize the map with the default fields.
- fields.put(BODY_FIELD, new Field(BODY_FIELD, "", bodyStore, bodyIndex, termVector));
- fields.put(TITLE_FIELD, new Field(TITLE_FIELD, "", store, index, termVector));
- fields.put(DATE_FIELD, new Field(DATE_FIELD, "", store, index, termVector));
- fields.put(ID_FIELD, new Field(ID_FIELD, "", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
- fields.put(NAME_FIELD, new Field(NAME_FIELD, "", store, index, termVector));
+ fields.put(BODY_FIELD, new Field(BODY_FIELD, bodyFt, ""));
+ fields.put(TITLE_FIELD, new Field(TITLE_FIELD, ft, ""));
+ fields.put(DATE_FIELD, new Field(DATE_FIELD, ft, ""));
+ fields.put(ID_FIELD, new Field(ID_FIELD, StringField.TYPE_STORED, ""));
+ fields.put(NAME_FIELD, new Field(NAME_FIELD, ft, ""));
numericFields.put(DATE_MSEC_FIELD, new NumericField(DATE_MSEC_FIELD));
numericFields.put(TIME_SEC_FIELD, new NumericField(TIME_SEC_FIELD));
@@ -125,14 +125,14 @@
* reuseFields was set to true, then it attempts to reuse a
* Field instance. If such a field does not exist, it creates a new one.
*/
- Field getField(String name, Store store, Index index, TermVector termVector) {
+ Field getField(String name, FieldType ft) {
if (!reuseFields) {
- return new Field(name, "", store, index, termVector);
+ return new Field(name, ft, "");
}
Field f = fields.get(name);
if (f == null) {
- f = new Field(name, "", store, index, termVector);
+ f = new Field(name, ft, "");
fields.put(name, f);
}
return f;
@@ -179,12 +179,9 @@
protected Config config;
- protected Store storeVal = Store.NO;
- protected Store bodyStoreVal = Store.NO;
- protected Index indexVal = Index.ANALYZED_NO_NORMS;
- protected Index bodyIndexVal = Index.ANALYZED;
- protected TermVector termVecVal = TermVector.NO;
-
+ protected FieldType valType;
+ protected FieldType bodyValType;
+
protected ContentSource source;
protected boolean reuseFields;
protected boolean indexProperties;
@@ -196,6 +193,13 @@
private int printNum = 0;
+ public DocMaker() {
+ valType = new FieldType(TextField.TYPE_UNSTORED);
+ valType.setOmitNorms(true);
+
+ bodyValType = new FieldType(TextField.TYPE_UNSTORED);
+ }
+
// create a doc
// use only part of the body, modify it to keep the rest (or use all if size==0).
// reset the docdata properties so they are not added more than once.
@@ -206,7 +210,10 @@
doc.getFields().clear();
// Set ID_FIELD
- Field idField = ds.getField(ID_FIELD, storeVal, Index.NOT_ANALYZED_NO_NORMS, termVecVal);
+ FieldType ft = new FieldType(valType);
+ ft.setIndexed(false);
+
+ Field idField = ds.getField(ID_FIELD, ft);
int id;
if (r != null) {
id = r.nextInt(updateDocIDLimit);
@@ -223,7 +230,7 @@
String name = docData.getName();
if (name == null) name = "";
name = cnt < 0 ? name : name + "_" + cnt;
- Field nameField = ds.getField(NAME_FIELD, storeVal, indexVal, termVecVal);
+ Field nameField = ds.getField(NAME_FIELD, valType);
nameField.setValue(name);
doc.add(nameField);
@@ -242,7 +249,7 @@
} else {
dateString = "";
}
- Field dateStringField = ds.getField(DATE_FIELD, storeVal, indexVal, termVecVal);
+ Field dateStringField = ds.getField(DATE_FIELD, valType);
dateStringField.setValue(dateString);
doc.add(dateStringField);
@@ -264,7 +271,7 @@
// Set TITLE_FIELD
String title = docData.getTitle();
- Field titleField = ds.getField(TITLE_FIELD, storeVal, indexVal, termVecVal);
+ Field titleField = ds.getField(TITLE_FIELD, valType);
titleField.setValue(title == null ? "" : title);
doc.add(titleField);
@@ -285,12 +292,12 @@
bdy = body.substring(0, size); // use part
docData.setBody(body.substring(size)); // some left
}
- Field bodyField = ds.getField(BODY_FIELD, bodyStoreVal, bodyIndexVal, termVecVal);
+ Field bodyField = ds.getField(BODY_FIELD, bodyValType);
bodyField.setValue(bdy);
doc.add(bodyField);
if (storeBytes) {
- Field bytesField = ds.getField(BYTES_FIELD, Store.YES, Index.NOT_ANALYZED_NO_NORMS, TermVector.NO);
+ Field bytesField = ds.getField(BYTES_FIELD, StringField.TYPE_STORED);
bytesField.setValue(bdy.getBytes("UTF-8"));
doc.add(bytesField);
}
@@ -300,7 +307,7 @@
Properties props = docData.getProps();
if (props != null) {
for (final Map.Entry