Index: core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java =================================================================== --- core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java (revision 1340366) +++ core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java (working copy) @@ -32,16 +32,16 @@ import org.apache.lucene.index.DocValues.Source; import org.apache.lucene.index.DocValues.Type; import org.apache.lucene.index.DocValues; -import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.index.MergeState; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; /** - * Abstract API that consumes {@link IndexableField}s. + * Abstract API that consumes {@link StorableField}s. * {@link DocValuesConsumer} are always associated with a specific field and * segments. Concrete implementations of this API write the given - * {@link IndexableField} into a implementation specific format depending on + * {@link StorableField} into a implementation specific format depending on * the fields meta-data. * * @lucene.experimental @@ -52,7 +52,7 @@ protected abstract Type getType(); /** - * Adds the given {@link IndexableField} instance to this + * Adds the given {@link StorableField} instance to this * {@link DocValuesConsumer} * * @param docID @@ -63,7 +63,7 @@ * @throws IOException * if an {@link IOException} occurs */ - public abstract void add(int docID, IndexableField value) + public abstract void add(int docID, StorableField value) throws IOException; /** @@ -72,7 +72,7 @@ * @param docCount * the total number of documents in this {@link DocValuesConsumer}. * Must be greater than or equal the last given docID to - * {@link #add(int, IndexableField)}. + * {@link #add(int, StorableField)}. * @throws IOException */ public abstract void finish(int docCount) throws IOException; Index: core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsWriter.java =================================================================== --- core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsWriter.java (revision 1340366) +++ core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsWriter.java (working copy) @@ -21,6 +21,7 @@ import org.apache.lucene.codecs.StoredFieldsReader; import org.apache.lucene.codecs.StoredFieldsWriter; import org.apache.lucene.document.Document; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexFileNames; @@ -28,6 +29,7 @@ import org.apache.lucene.index.MergePolicy.MergeAbortedException; import org.apache.lucene.index.MergeState; import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -131,7 +133,7 @@ IndexFileNames.segmentFileName(segment, "", FIELDS_INDEX_EXTENSION)); } - public void writeField(FieldInfo info, IndexableField field) throws IOException { + public void writeField(FieldInfo info, StorableField field) throws IOException { fieldsStream.writeVInt(info.number); int bits = 0; final BytesRef bytes; @@ -297,7 +299,7 @@ // on the fly? // NOTE: it's very important to first assign to doc then pass it to // fieldsWriter.addDocument; see LUCENE-1282 - Document doc = reader.reader.document(j); + StoredDocument doc = reader.reader.document(j); addDocument(doc, mergeState.fieldInfos); docCount++; mergeState.checkAbort.work(300); @@ -324,7 +326,7 @@ for (; docCount < maxDoc; docCount++) { // NOTE: it's very important to first assign to doc then pass it to // fieldsWriter.addDocument; see LUCENE-1282 - Document doc = reader.reader.document(docCount); + StoredDocument doc = reader.reader.document(docCount); addDocument(doc, mergeState.fieldInfos); mergeState.checkAbort.work(300); } Index: core/src/java/org/apache/lucene/codecs/lucene40/values/Bytes.java =================================================================== --- core/src/java/org/apache/lucene/codecs/lucene40/values/Bytes.java (revision 1340366) +++ core/src/java/org/apache/lucene/codecs/lucene40/values/Bytes.java (working copy) @@ -29,6 +29,7 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.DataOutput; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; @@ -415,7 +416,7 @@ } @Override - public void add(int docID, IndexableField value) throws IOException { + public void add(int docID, StorableField value) throws IOException { BytesRef bytes = value.binaryValue(); assert bytes != null; if (bytes.length == 0) { // default value - skip it Index: core/src/java/org/apache/lucene/codecs/lucene40/values/FixedStraightBytesImpl.java =================================================================== --- core/src/java/org/apache/lucene/codecs/lucene40/values/FixedStraightBytesImpl.java (revision 1340366) +++ core/src/java/org/apache/lucene/codecs/lucene40/values/FixedStraightBytesImpl.java (working copy) @@ -28,6 +28,7 @@ import org.apache.lucene.index.DocValues.Type; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -74,7 +75,7 @@ } @Override - public void add(int docID, IndexableField value) throws IOException { + public void add(int docID, StorableField value) throws IOException { final BytesRef bytes = value.binaryValue(); assert bytes != null; assert lastDocID < docID; Index: core/src/java/org/apache/lucene/codecs/lucene40/values/Floats.java =================================================================== --- core/src/java/org/apache/lucene/codecs/lucene40/values/Floats.java (revision 1340366) +++ core/src/java/org/apache/lucene/codecs/lucene40/values/Floats.java (working copy) @@ -24,6 +24,7 @@ import org.apache.lucene.index.DocValues.Type; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -88,7 +89,7 @@ } @Override - public void add(int docID, IndexableField value) throws IOException { + public void add(int docID, StorableField value) throws IOException { template.toBytes(value.numericValue().doubleValue(), bytesRef); bytesSpareField.setBytesValue(bytesRef); super.add(docID, bytesSpareField); Index: core/src/java/org/apache/lucene/codecs/lucene40/values/Ints.java =================================================================== --- core/src/java/org/apache/lucene/codecs/lucene40/values/Ints.java (revision 1340366) +++ core/src/java/org/apache/lucene/codecs/lucene40/values/Ints.java (working copy) @@ -25,6 +25,7 @@ import org.apache.lucene.index.DocValues.Type; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -112,7 +113,7 @@ } @Override - public void add(int docID, IndexableField value) throws IOException { + public void add(int docID, StorableField value) throws IOException { template.toBytes(value.numericValue().longValue(), bytesRef); bytesSpareField.setBytesValue(bytesRef); super.add(docID, bytesSpareField); Index: core/src/java/org/apache/lucene/codecs/lucene40/values/PackedIntValues.java =================================================================== --- core/src/java/org/apache/lucene/codecs/lucene40/values/PackedIntValues.java (revision 1340366) +++ core/src/java/org/apache/lucene/codecs/lucene40/values/PackedIntValues.java (working copy) @@ -25,6 +25,7 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -126,7 +127,7 @@ } @Override - public void add(int docID, IndexableField docValue) throws IOException { + public void add(int docID, StorableField docValue) throws IOException { final long v = docValue.numericValue().longValue(); assert lastDocId < docID; if (!started) { Index: core/src/java/org/apache/lucene/codecs/lucene40/values/VarStraightBytesImpl.java =================================================================== --- core/src/java/org/apache/lucene/codecs/lucene40/values/VarStraightBytesImpl.java (revision 1340366) +++ core/src/java/org/apache/lucene/codecs/lucene40/values/VarStraightBytesImpl.java (working copy) @@ -27,6 +27,7 @@ import org.apache.lucene.index.DocValues.Type; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -85,7 +86,7 @@ } @Override - public void add(int docID, IndexableField value) throws IOException { + public void add(int docID, StorableField value) throws IOException { final BytesRef bytes = value.binaryValue(); assert bytes != null; assert !merge; Index: core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesConsumer.java =================================================================== --- core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesConsumer.java (revision 1340366) +++ core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesConsumer.java (working copy) @@ -21,7 +21,7 @@ import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.index.DocValues.Type; import org.apache.lucene.index.IndexFileNames; -import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; @@ -72,7 +72,7 @@ } @Override - public void add(int docID, IndexableField value) throws IOException { + public void add(int docID, StorableField value) throws IOException { assert docID >= 0; final int ord, vSize; switch (type) { Index: core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsWriter.java =================================================================== --- core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsWriter.java (revision 1340366) +++ core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsWriter.java (working copy) @@ -22,7 +22,7 @@ import org.apache.lucene.codecs.StoredFieldsWriter; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexFileNames; -import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; @@ -88,7 +88,7 @@ } @Override - public void writeField(FieldInfo info, IndexableField field) throws IOException { + public void writeField(FieldInfo info, StorableField field) throws IOException { write(FIELD); write(Integer.toString(info.number)); newLine(); Index: core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java =================================================================== --- core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java (revision 1340366) +++ core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java (working copy) @@ -4,10 +4,12 @@ import java.io.IOException; import org.apache.lucene.document.Document; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.MergeState; +import org.apache.lucene.index.StorableField; import org.apache.lucene.util.Bits; /** @@ -32,7 +34,7 @@ *
    *
  1. For every document, {@link #startDocument(int)} is called, * informing the Codec how many fields will be written. - *
  2. {@link #writeField(FieldInfo, IndexableField)} is called for + *
  3. {@link #writeField(FieldInfo, StorableField)} is called for * each field in the document. *
  4. After all documents have been written, {@link #finish(int)} * is called for verification/sanity-checks. @@ -44,14 +46,14 @@ public abstract class StoredFieldsWriter implements Closeable { /** Called before writing the stored fields of the document. - * {@link #writeField(FieldInfo, IndexableField)} will be called + * {@link #writeField(FieldInfo, StorableField)} will be called * numStoredFields times. Note that this is * called even if the document has no stored fields, in * this case numStoredFields will be zero. */ public abstract void startDocument(int numStoredFields) throws IOException; /** Writes a single stored field. */ - public abstract void writeField(FieldInfo info, IndexableField field) throws IOException; + public abstract void writeField(FieldInfo info, StorableField field) throws IOException; /** Aborts writing entirely, implementation should remove * any partially-written files, etc. */ @@ -68,7 +70,7 @@ /** Merges in the stored fields from the readers in * mergeState. The default implementation skips * over deleted documents, and uses {@link #startDocument(int)}, - * {@link #writeField(FieldInfo, IndexableField)}, and {@link #finish(int)}, + * {@link #writeField(FieldInfo, StorableField)}, and {@link #finish(int)}, * returning the number of documents that were written. * Implementations can override this method for more sophisticated * merging (bulk-byte copying, etc). */ @@ -88,7 +90,7 @@ // on the fly? // NOTE: it's very important to first assign to doc then pass it to // fieldsWriter.addDocument; see LUCENE-1282 - Document doc = reader.reader.document(i); + StoredDocument doc = reader.reader.document(i); addDocument(doc, mergeState.fieldInfos); docCount++; mergeState.checkAbort.work(300); @@ -99,20 +101,16 @@ } /** sugar method for startDocument() + writeField() for every stored field in the document */ - protected final void addDocument(Iterable doc, FieldInfos fieldInfos) throws IOException { + protected final void addDocument(Iterable doc, FieldInfos fieldInfos) throws IOException { int storedCount = 0; - for (IndexableField field : doc) { - if (field.fieldType().stored()) { - storedCount++; - } + for (StorableField field : doc) { + storedCount++; } startDocument(storedCount); - for (IndexableField field : doc) { - if (field.fieldType().stored()) { + for (StorableField field : doc) { writeField(fieldInfos.fieldInfo(field.name()), field); - } } } } Index: core/src/java/org/apache/lucene/document/Document.java =================================================================== --- core/src/java/org/apache/lucene/document/Document.java (revision 1340366) +++ core/src/java/org/apache/lucene/document/Document.java (working copy) @@ -21,6 +21,7 @@ import org.apache.lucene.index.IndexReader; // for javadoc import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.search.IndexSearcher; // for javadoc import org.apache.lucene.search.ScoreDoc; // for javadoc import org.apache.lucene.util.BytesRef; Index: core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java =================================================================== --- core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java (revision 1340366) +++ core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java (working copy) @@ -34,7 +34,7 @@ * @lucene.experimental */ public class DocumentStoredFieldVisitor extends StoredFieldVisitor { - private final Document doc = new Document(); + private final StoredDocument doc = new StoredDocument(); private final Set fieldsToAdd; /** Load only fields named in the provided Set<String>. */ @@ -62,13 +62,16 @@ @Override public void stringField(FieldInfo fieldInfo, String value) throws IOException { + /* final FieldType ft = new FieldType(TextField.TYPE_STORED); ft.setStoreTermVectors(fieldInfo.storeTermVector); ft.setStoreTermVectors(fieldInfo.storeTermVector); ft.setIndexed(fieldInfo.isIndexed); ft.setOmitNorms(fieldInfo.omitNorms); ft.setIndexOptions(fieldInfo.indexOptions); - doc.add(new Field(fieldInfo.name, value, ft)); + */ + doc.add(new StoredField(fieldInfo.name, value)); + //doc.add(new Field(fieldInfo.name, value, ft)); } @Override @@ -96,7 +99,7 @@ return fieldsToAdd == null || fieldsToAdd.contains(fieldInfo.name) ? Status.YES : Status.NO; } - public Document getDocument() { + public StoredDocument getDocument() { return doc; } } Index: core/src/java/org/apache/lucene/document/Field.java =================================================================== --- core/src/java/org/apache/lucene/document/Field.java (revision 1340366) +++ core/src/java/org/apache/lucene/document/Field.java (working copy) @@ -31,6 +31,7 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.index.Norm; // javadocs +import org.apache.lucene.index.StorableField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.index.FieldInvertState; // javadocs Index: core/src/java/org/apache/lucene/document/StoredDocument.java =================================================================== --- core/src/java/org/apache/lucene/document/StoredDocument.java (revision 0) +++ core/src/java/org/apache/lucene/document/StoredDocument.java (working copy) @@ -0,0 +1,192 @@ +package org.apache.lucene.document; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; +import org.apache.lucene.index.StorableField; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.util.BytesRef; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class StoredDocument implements Iterable{ + + private final List fields = new ArrayList(); + + + public final void add(StorableField field) { + fields.add(field); + } + + public StorableField[] getFields(String name) { + List result = new ArrayList(); + for (StorableField field : fields) { + if (field.name().equals(name)) { + result.add(field); + } + } + + return result.toArray(new StorableField[result.size()]); + } + + public final StorableField getField(String name) { + for (StorableField field : fields) { + if (field.name().equals(name)) { + return field; + } + } + return null; + } + + public final void removeField(String name) { + Iterator it = fields.iterator(); + while (it.hasNext()) { + StorableField field = it.next(); + if (field.name().equals(name)) { + it.remove(); + return; + } + } + } + + /** + *

    Removes all fields with the given name from the document. + * If there is no field with the specified name, the document remains unchanged.

    + *

    Note that the removeField(s) methods like the add method only make sense + * prior to adding a document to an index. These methods cannot + * be used to change the content of an existing index! In order to achieve this, + * a document has to be deleted from an index and a new changed version of that + * document has to be added.

    + */ + public final void removeFields(String name) { + Iterator it = fields.iterator(); + while (it.hasNext()) { + StorableField field = it.next(); + if (field.name().equals(name)) { + it.remove(); + } + } + } + + public final List getFields() { + return fields; + } + + @Override + public Iterator iterator() { + return this.fields.iterator(); + } + + /** + * Returns an array of byte arrays for of the fields that have the name specified + * as the method parameter. This method returns an empty + * array when there are no matching fields. It never + * returns null. + * + * @param name the name of the field + * @return a byte[][] of binary field values + */ + public final BytesRef[] getBinaryValues(String name) { + final List result = new ArrayList(); + for (StorableField field : fields) { + if (field.name().equals(name)) { + final BytesRef bytes = field.binaryValue(); + if (bytes != null) { + result.add(bytes); + } + } + } + + return result.toArray(new BytesRef[result.size()]); + } + + /** + * Returns an array of bytes for the first (or only) field that has the name + * specified as the method parameter. This method will return null + * if no binary fields with the specified name are available. + * There may be non-binary fields with the same name. + * + * @param name the name of the field. + * @return a byte[] containing the binary field value or null + */ + public final BytesRef getBinaryValue(String name) { + for (StorableField field : fields) { + if (field.name().equals(name)) { + final BytesRef bytes = field.binaryValue(); + if (bytes != null) { + return bytes; + } + } + } + return null; + } + private final static String[] NO_STRINGS = new String[0]; + + /** + * Returns an array of values of the field specified as the method parameter. + * This method returns an empty array when there are no + * matching fields. It never returns null. + * For {@link IntField}, {@link LongField}, {@link + * FloatField} and {@link DoubleField} it returns the string value of the number. If you want + * the actual numeric field instances back, use {@link #getFields}. + * @param name the name of the field + * @return a String[] of field values + */ + public final String[] getValues(String name) { + List result = new ArrayList(); + for (StorableField field : fields) { + if (field.name().equals(name) && field.stringValue() != null) { + result.add(field.stringValue()); + } + } + + if (result.size() == 0) { + return NO_STRINGS; + } + + return result.toArray(new String[result.size()]); + } + + /** Returns the string value of the field with the given name if any exist in + * this document, or null. If multiple fields exist with this name, this + * method returns the first value added. If only binary fields with this name + * exist, returns null. + * For {@link IntField}, {@link LongField}, {@link + * FloatField} and {@link DoubleField} it returns the string value of the number. If you want + * the actual numeric field instance back, use {@link #getField}. + */ + public final String get(String name) { + for (StorableField field : fields) { + if (field.name().equals(name) && field.stringValue() != null) { + return field.stringValue(); + } + } + return null; + } + + public Document asIndexable() { + Document doc = new Document(); + /* TODO */ + + return doc; + } +} Index: core/src/java/org/apache/lucene/document/StoredField.java =================================================================== --- core/src/java/org/apache/lucene/document/StoredField.java (revision 1340366) +++ core/src/java/org/apache/lucene/document/StoredField.java (working copy) @@ -1,6 +1,7 @@ package org.apache.lucene.document; import org.apache.lucene.index.IndexReader; // javadocs +import org.apache.lucene.index.StorableField; import org.apache.lucene.search.IndexSearcher; // javadocs import org.apache.lucene.util.BytesRef; @@ -24,7 +25,7 @@ /** A field whose value is stored so that {@link * IndexSearcher#doc} and {@link IndexReader#document} will * return the field and its value. */ -public final class StoredField extends Field { +public final class StoredField extends Field implements StorableField { public final static FieldType TYPE; static { Index: core/src/java/org/apache/lucene/index/CheckIndex.java =================================================================== --- core/src/java/org/apache/lucene/index/CheckIndex.java (revision 1340366) +++ core/src/java/org/apache/lucene/index/CheckIndex.java (working copy) @@ -31,6 +31,7 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldType; // for javadocs +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.index.DocValues.SortedSource; import org.apache.lucene.index.DocValues.Source; import org.apache.lucene.search.DocIdSetIterator; @@ -1199,7 +1200,7 @@ for (int j = 0; j < info.docCount; ++j) { // Intentionally pull even deleted documents to // make sure they too are not corrupt: - Document doc = reader.document(j); + StoredDocument doc = reader.document(j); if (liveDocs == null || liveDocs.get(j)) { status.docCount++; status.totFields += doc.getFields().size(); Index: core/src/java/org/apache/lucene/index/IndexableField.java =================================================================== --- core/src/java/org/apache/lucene/index/IndexableField.java (revision 1340366) +++ core/src/java/org/apache/lucene/index/IndexableField.java (working copy) @@ -33,30 +33,12 @@ * * @lucene.experimental */ -public interface IndexableField { +public interface IndexableField extends StorableField { - /** Field name */ - public String name(); - /** {@link IndexableFieldType} describing the properties * of this field. */ public IndexableFieldType fieldType(); - - /** Field boost (you must pre-multiply in any doc boost). */ - public float boost(); - /** Non-null if this field has a binary value */ - public BytesRef binaryValue(); - - /** Non-null if this field has a string value */ - public String stringValue(); - - /** Non-null if this field has a Reader value */ - public Reader readerValue(); - - /** Non-null if this field has a numeric value */ - public Number numericValue(); - /** * Creates the TokenStream used for indexing this field. If appropriate, * implementations should use the given Analyzer to create the TokenStreams. Index: core/src/java/org/apache/lucene/index/IndexReader.java =================================================================== --- core/src/java/org/apache/lucene/index/IndexReader.java (revision 1340366) +++ core/src/java/org/apache/lucene/index/IndexReader.java (working copy) @@ -27,116 +27,130 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.DocumentStoredFieldVisitor; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.search.SearcherManager; // javadocs import org.apache.lucene.store.*; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.ReaderUtil; // for javadocs -/** IndexReader is an abstract class, providing an interface for accessing an - index. Search of an index is done entirely through this abstract interface, - so that any subclass which implements it is searchable. - -

    There are two different types of IndexReaders: -

      -
    • {@link AtomicReader}: These indexes do not consist of several sub-readers, - they are atomic. They support retrieval of stored fields, doc values, terms, - and postings. -
    • {@link CompositeReader}: Instances (like {@link DirectoryReader}) - of this reader can only - be used to get stored fields from the underlying AtomicReaders, - but it is not possible to directly retrieve postings. To do that, get - the sub-readers via {@link CompositeReader#getSequentialSubReaders}. - Alternatively, you can mimic an {@link AtomicReader} (with a serious slowdown), - by wrapping composite readers with {@link SlowCompositeReaderWrapper}. -
    - -

    IndexReader instances for indexes on disk are usually constructed - with a call to one of the static DirectoryReader,open() methods, - e.g. {@link DirectoryReader#open(Directory)}. {@link DirectoryReader} implements - the {@link CompositeReader} interface, it is not possible to directly get postings. - -

    For efficiency, in this API documents are often referred to via - document numbers, non-negative integers which each name a unique - document in the index. These document numbers are ephemeral -- they may change - as documents are added to and deleted from an index. Clients should thus not - rely on a given document having the same number between sessions. - -

    -

    NOTE: {@link - IndexReader} instances are completely thread - safe, meaning multiple threads can call any of its methods, - concurrently. If your application requires external - synchronization, you should not synchronize on the - IndexReader instance; use your own - (non-Lucene) objects instead. -*/ +/** + * IndexReader is an abstract class, providing an interface for accessing an + * index. Search of an index is done entirely through this abstract interface, + * so that any subclass which implements it is searchable. + * + *

    + * There are two different types of IndexReaders: + *

      + *
    • {@link AtomicReader}: These indexes do not consist of several + * sub-readers, they are atomic. They support retrieval of stored fields, doc + * values, terms, and postings. + *
    • {@link CompositeReader}: Instances (like {@link DirectoryReader}) of this + * reader can only be used to get stored fields from the underlying + * AtomicReaders, but it is not possible to directly retrieve postings. To do + * that, get the sub-readers via {@link CompositeReader#getSequentialSubReaders} + * . Alternatively, you can mimic an {@link AtomicReader} (with a serious + * slowdown), by wrapping composite readers with + * {@link SlowCompositeReaderWrapper}. + *
    + * + *

    + * IndexReader instances for indexes on disk are usually constructed with a call + * to one of the static DirectoryReader,open() methods, e.g. + * {@link DirectoryReader#open(Directory)}. {@link DirectoryReader} implements + * the {@link CompositeReader} interface, it is not possible to directly get + * postings. + * + *

    + * For efficiency, in this API documents are often referred to via document + * numbers, non-negative integers which each name a unique document in the + * index. These document numbers are ephemeral -- they may change as documents + * are added to and deleted from an index. Clients should thus not rely on a + * given document having the same number between sessions. + * + *

    + * + *

    + * NOTE: {@link IndexReader} instances are completely thread safe, + * meaning multiple threads can call any of its methods, concurrently. If your + * application requires external synchronization, you should not + * synchronize on the IndexReader instance; use your own + * (non-Lucene) objects instead. + */ public abstract class IndexReader implements Closeable { private boolean closed = false; private boolean closedByChild = false; private final AtomicInteger refCount = new AtomicInteger(1); - + IndexReader() { - if (!(this instanceof CompositeReader || this instanceof AtomicReader)) - throw new Error("IndexReader should never be directly extended, subclass AtomicReader or CompositeReader instead."); + if (!(this instanceof CompositeReader || this instanceof AtomicReader)) throw new Error( + "IndexReader should never be directly extended, subclass AtomicReader or CompositeReader instead."); } /** - * A custom listener that's invoked when the IndexReader - * is closed. - * + * A custom listener that's invoked when the IndexReader is closed. + * * @lucene.experimental */ public static interface ReaderClosedListener { public void onClose(IndexReader reader); } - - private final Set readerClosedListeners = - Collections.synchronizedSet(new LinkedHashSet()); - - private final Set parentReaders = - Collections.synchronizedSet(Collections.newSetFromMap(new WeakHashMap())); - - /** Expert: adds a {@link ReaderClosedListener}. The - * provided listener will be invoked when this reader is closed. - * - * @lucene.experimental */ + + private final Set readerClosedListeners = Collections + .synchronizedSet(new LinkedHashSet()); + + private final Set parentReaders = Collections + .synchronizedSet(Collections + .newSetFromMap(new WeakHashMap())); + + /** + * Expert: adds a {@link ReaderClosedListener}. The provided listener will be + * invoked when this reader is closed. + * + * @lucene.experimental + */ public final void addReaderClosedListener(ReaderClosedListener listener) { ensureOpen(); readerClosedListeners.add(listener); } - - /** Expert: remove a previously added {@link ReaderClosedListener}. - * - * @lucene.experimental */ + + /** + * Expert: remove a previously added {@link ReaderClosedListener}. + * + * @lucene.experimental + */ public final void removeReaderClosedListener(ReaderClosedListener listener) { ensureOpen(); readerClosedListeners.remove(listener); } - /** Expert: This method is called by {@code IndexReader}s which wrap other readers - * (e.g. {@link CompositeReader} or {@link FilterAtomicReader}) to register the parent - * at the child (this reader) on construction of the parent. When this reader is closed, - * it will mark all registered parents as closed, too. The references to parent readers - * are weak only, so they can be GCed once they are no longer in use. - * @lucene.experimental */ + /** + * Expert: This method is called by {@code IndexReader}s which wrap other + * readers (e.g. {@link CompositeReader} or {@link FilterAtomicReader}) to + * register the parent at the child (this reader) on construction of the + * parent. When this reader is closed, it will mark all registered parents as + * closed, too. The references to parent readers are weak only, so they can be + * GCed once they are no longer in use. + * + * @lucene.experimental + */ public final void registerParentReader(IndexReader reader) { ensureOpen(); parentReaders.add(reader); } - + private void notifyReaderClosedListeners() { - synchronized(readerClosedListeners) { - for(ReaderClosedListener listener : readerClosedListeners) { + synchronized (readerClosedListeners) { + for (ReaderClosedListener listener : readerClosedListeners) { listener.onClose(this); } } } - + private void reportCloseToParentReaders() { - synchronized(parentReaders) { - for(IndexReader parent : parentReaders) { + synchronized (parentReaders) { + for (IndexReader parent : parentReaders) { parent.closedByChild = true; // cross memory barrier by a fake write: parent.refCount.addAndGet(0); @@ -145,7 +159,7 @@ } } } - + /** Expert: returns the current refCount for this reader */ public final int getRefCount() { // NOTE: don't ensureOpen, so that callers can see @@ -154,17 +168,14 @@ } /** - * Expert: increments the refCount of this IndexReader - * instance. RefCounts are used to determine when a - * reader can be closed safely, i.e. as soon as there are - * no more references. Be sure to always call a - * corresponding {@link #decRef}, in a finally clause; - * otherwise the reader may never be closed. Note that - * {@link #close} simply calls decRef(), which means that - * the IndexReader will not really be closed until {@link - * #decRef} has been called for all outstanding - * references. - * + * Expert: increments the refCount of this IndexReader instance. RefCounts are + * used to determine when a reader can be closed safely, i.e. as soon as there + * are no more references. Be sure to always call a corresponding + * {@link #decRef}, in a finally clause; otherwise the reader may never be + * closed. Note that {@link #close} simply calls decRef(), which means that + * the IndexReader will not really be closed until {@link #decRef} has been + * called for all outstanding references. + * * @see #decRef * @see #tryIncRef */ @@ -174,46 +185,41 @@ } /** - * Expert: increments the refCount of this IndexReader - * instance only if the IndexReader has not been closed yet - * and returns true iff the refCount was - * successfully incremented, otherwise false. - * If this method returns false the reader is either - * already closed or is currently been closed. Either way this - * reader instance shouldn't be used by an application unless - * true is returned. + * Expert: increments the refCount of this IndexReader instance only if the + * IndexReader has not been closed yet and returns true iff the + * refCount was successfully incremented, otherwise false. If + * this method returns false the reader is either already closed + * or is currently been closed. Either way this reader instance shouldn't be + * used by an application unless true is returned. *

    - * RefCounts are used to determine when a - * reader can be closed safely, i.e. as soon as there are - * no more references. Be sure to always call a - * corresponding {@link #decRef}, in a finally clause; - * otherwise the reader may never be closed. Note that - * {@link #close} simply calls decRef(), which means that - * the IndexReader will not really be closed until {@link - * #decRef} has been called for all outstanding - * references. - * + * RefCounts are used to determine when a reader can be closed safely, i.e. as + * soon as there are no more references. Be sure to always call a + * corresponding {@link #decRef}, in a finally clause; otherwise the reader + * may never be closed. Note that {@link #close} simply calls decRef(), which + * means that the IndexReader will not really be closed until {@link #decRef} + * has been called for all outstanding references. + * * @see #decRef * @see #incRef */ public final boolean tryIncRef() { int count; while ((count = refCount.get()) > 0) { - if (refCount.compareAndSet(count, count+1)) { + if (refCount.compareAndSet(count, count + 1)) { return true; } } return false; } - + /** - * Expert: decreases the refCount of this IndexReader - * instance. If the refCount drops to 0, then this - * reader is closed. If an exception is hit, the refCount - * is unchanged. - * - * @throws IOException in case an IOException occurs in doClose() - * + * Expert: decreases the refCount of this IndexReader instance. If the + * refCount drops to 0, then this reader is closed. If an exception is hit, + * the refCount is unchanged. + * + * @throws IOException + * in case an IOException occurs in doClose() + * * @see #incRef */ public final void decRef() throws IOException { @@ -238,28 +244,34 @@ reportCloseToParentReaders(); notifyReaderClosedListeners(); } else if (rc < 0) { - throw new IllegalStateException("too many decRef calls: refCount is " + rc + " after decrement"); + throw new IllegalStateException("too many decRef calls: refCount is " + + rc + " after decrement"); } } /** - * @throws AlreadyClosedException if this IndexReader is closed + * @throws AlreadyClosedException + * if this IndexReader is closed */ protected final void ensureOpen() throws AlreadyClosedException { if (refCount.get() <= 0) { throw new AlreadyClosedException("this IndexReader is closed"); } - // the happens before rule on reading the refCount, which must be after the fake write, + // the happens before rule on reading the refCount, which must be after the + // fake write, // ensures that we see the value: if (closedByChild) { - throw new AlreadyClosedException("this IndexReader cannot be used anymore as one of its child readers was closed"); + throw new AlreadyClosedException( + "this IndexReader cannot be used anymore as one of its child readers was closed"); } } - /** {@inheritDoc} - *

    For caching purposes, {@code IndexReader} subclasses are not allowed - * to implement equals/hashCode, so methods are declared final. - * To lookup instances from caches use {@link #getCoreCacheKey} and + /** + * {@inheritDoc} + *

    + * For caching purposes, {@code IndexReader} subclasses are not allowed to + * implement equals/hashCode, so methods are declared final. To lookup + * instances from caches use {@link #getCoreCacheKey} and * {@link #getCombinedCoreAndDeletesKey}. */ @Override @@ -267,10 +279,12 @@ return (this == obj); } - /** {@inheritDoc} - *

    For caching purposes, {@code IndexReader} subclasses are not allowed - * to implement equals/hashCode, so methods are declared final. - * To lookup instances from caches use {@link #getCoreCacheKey} and + /** + * {@inheritDoc} + *

    + * For caching purposes, {@code IndexReader} subclasses are not allowed to + * implement equals/hashCode, so methods are declared final. To lookup + * instances from caches use {@link #getCoreCacheKey} and * {@link #getCombinedCoreAndDeletesKey}. */ @Override @@ -278,188 +292,222 @@ return System.identityHashCode(this); } - /** Returns a IndexReader reading the index in the given - * Directory - * @param directory the index directory - * @throws CorruptIndexException if the index is corrupt - * @throws IOException if there is a low-level IO error + /** + * Returns a IndexReader reading the index in the given Directory + * + * @param directory + * the index directory + * @throws CorruptIndexException + * if the index is corrupt + * @throws IOException + * if there is a low-level IO error * @deprecated Use {@link DirectoryReader#open(Directory)} */ @Deprecated - public static DirectoryReader open(final Directory directory) throws CorruptIndexException, IOException { + public static DirectoryReader open(final Directory directory) + throws CorruptIndexException, IOException { return DirectoryReader.open(directory); } - /** Expert: Returns a IndexReader reading the index in the given - * Directory with the given termInfosIndexDivisor. - * @param directory the index directory - * @param termInfosIndexDivisor Subsamples which indexed - * terms are loaded into RAM. This has the same effect as {@link - * IndexWriterConfig#setTermIndexInterval} except that setting - * must be done at indexing time while this setting can be - * set per reader. When set to N, then one in every - * N*termIndexInterval terms in the index is loaded into - * memory. By setting this to a value > 1 you can reduce - * memory usage, at the expense of higher latency when - * loading a TermInfo. The default value is 1. Set this - * to -1 to skip loading the terms index entirely. - * @throws CorruptIndexException if the index is corrupt - * @throws IOException if there is a low-level IO error + /** + * Expert: Returns a IndexReader reading the index in the given Directory with + * the given termInfosIndexDivisor. + * + * @param directory + * the index directory + * @param termInfosIndexDivisor + * Subsamples which indexed terms are loaded into RAM. This has the + * same effect as {@link IndexWriterConfig#setTermIndexInterval} + * except that setting must be done at indexing time while this + * setting can be set per reader. When set to N, then one in every + * N*termIndexInterval terms in the index is loaded into memory. By + * setting this to a value > 1 you can reduce memory usage, at the + * expense of higher latency when loading a TermInfo. The default + * value is 1. Set this to -1 to skip loading the terms index + * entirely. + * @throws CorruptIndexException + * if the index is corrupt + * @throws IOException + * if there is a low-level IO error * @deprecated Use {@link DirectoryReader#open(Directory,int)} */ @Deprecated - public static DirectoryReader open(final Directory directory, int termInfosIndexDivisor) throws CorruptIndexException, IOException { + public static DirectoryReader open(final Directory directory, + int termInfosIndexDivisor) throws CorruptIndexException, IOException { return DirectoryReader.open(directory, termInfosIndexDivisor); } /** - * Open a near real time IndexReader from the {@link org.apache.lucene.index.IndexWriter}. - * - * @param writer The IndexWriter to open from - * @param applyAllDeletes If true, all buffered deletes will - * be applied (made visible) in the returned reader. If - * false, the deletes are not applied but remain buffered - * (in IndexWriter) so that they will be applied in the - * future. Applying deletes can be costly, so if your app - * can tolerate deleted documents being returned you might - * gain some performance by passing false. + * Open a near real time IndexReader from the + * {@link org.apache.lucene.index.IndexWriter}. + * + * @param writer + * The IndexWriter to open from + * @param applyAllDeletes + * If true, all buffered deletes will be applied (made visible) in + * the returned reader. If false, the deletes are not applied but + * remain buffered (in IndexWriter) so that they will be applied in + * the future. Applying deletes can be costly, so if your app can + * tolerate deleted documents being returned you might gain some + * performance by passing false. * @return The new IndexReader * @throws CorruptIndexException - * @throws IOException if there is a low-level IO error - * + * @throws IOException + * if there is a low-level IO error + * * @see DirectoryReader#openIfChanged(DirectoryReader,IndexWriter,boolean) - * + * * @lucene.experimental * @deprecated Use {@link DirectoryReader#open(IndexWriter,boolean)} */ @Deprecated - public static DirectoryReader open(final IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException { + public static DirectoryReader open(final IndexWriter writer, + boolean applyAllDeletes) throws CorruptIndexException, IOException { return DirectoryReader.open(writer, applyAllDeletes); } - - /** Expert: returns an IndexReader reading the index in the given - * {@link IndexCommit}. - * @param commit the commit point to open - * @throws CorruptIndexException if the index is corrupt - * @throws IOException if there is a low-level IO error + + /** + * Expert: returns an IndexReader reading the index in the given + * {@link IndexCommit}. + * + * @param commit + * the commit point to open + * @throws CorruptIndexException + * if the index is corrupt + * @throws IOException + * if there is a low-level IO error * @deprecated Use {@link DirectoryReader#open(IndexCommit)} */ @Deprecated - public static DirectoryReader open(final IndexCommit commit) throws CorruptIndexException, IOException { + public static DirectoryReader open(final IndexCommit commit) + throws CorruptIndexException, IOException { return DirectoryReader.open(commit); } - - - /** Expert: returns an IndexReader reading the index in the given - * {@link IndexCommit} and termInfosIndexDivisor. - * @param commit the commit point to open - * @param termInfosIndexDivisor Subsamples which indexed - * terms are loaded into RAM. This has the same effect as {@link - * IndexWriterConfig#setTermIndexInterval} except that setting - * must be done at indexing time while this setting can be - * set per reader. When set to N, then one in every - * N*termIndexInterval terms in the index is loaded into - * memory. By setting this to a value > 1 you can reduce - * memory usage, at the expense of higher latency when - * loading a TermInfo. The default value is 1. Set this - * to -1 to skip loading the terms index entirely. - * @throws CorruptIndexException if the index is corrupt - * @throws IOException if there is a low-level IO error + + /** + * Expert: returns an IndexReader reading the index in the given + * {@link IndexCommit} and termInfosIndexDivisor. + * + * @param commit + * the commit point to open + * @param termInfosIndexDivisor + * Subsamples which indexed terms are loaded into RAM. This has the + * same effect as {@link IndexWriterConfig#setTermIndexInterval} + * except that setting must be done at indexing time while this + * setting can be set per reader. When set to N, then one in every + * N*termIndexInterval terms in the index is loaded into memory. By + * setting this to a value > 1 you can reduce memory usage, at the + * expense of higher latency when loading a TermInfo. The default + * value is 1. Set this to -1 to skip loading the terms index + * entirely. + * @throws CorruptIndexException + * if the index is corrupt + * @throws IOException + * if there is a low-level IO error * @deprecated Use {@link DirectoryReader#open(IndexCommit,int)} */ @Deprecated - public static DirectoryReader open(final IndexCommit commit, int termInfosIndexDivisor) throws CorruptIndexException, IOException { + public static DirectoryReader open(final IndexCommit commit, + int termInfosIndexDivisor) throws CorruptIndexException, IOException { return DirectoryReader.open(commit, termInfosIndexDivisor); } - - /** Retrieve term vectors for this document, or null if - * term vectors were not indexed. The returned Fields - * instance acts like a single-document inverted index - * (the docID will be 0). */ - public abstract Fields getTermVectors(int docID) - throws IOException; - - /** Retrieve term vector for this document and field, or - * null if term vectors were not indexed. The returned - * Fields instance acts like a single-document inverted - * index (the docID will be 0). */ - public final Terms getTermVector(int docID, String field) - throws IOException { + + /** + * Retrieve term vectors for this document, or null if term vectors were not + * indexed. The returned Fields instance acts like a single-document inverted + * index (the docID will be 0). + */ + public abstract Fields getTermVectors(int docID) throws IOException; + + /** + * Retrieve term vector for this document and field, or null if term vectors + * were not indexed. The returned Fields instance acts like a single-document + * inverted index (the docID will be 0). + */ + public final Terms getTermVector(int docID, String field) throws IOException { Fields vectors = getTermVectors(docID); if (vectors == null) { return null; } return vectors.terms(field); } - + /** Returns the number of documents in this index. */ public abstract int numDocs(); - - /** Returns one greater than the largest possible document number. - * This may be used to, e.g., determine how big to allocate an array which - * will have an element for every document number in an index. + + /** + * Returns one greater than the largest possible document number. This may be + * used to, e.g., determine how big to allocate an array which will have an + * element for every document number in an index. */ public abstract int maxDoc(); - + /** Returns the number of deleted documents. */ public final int numDeletedDocs() { return maxDoc() - numDocs(); } - - /** Expert: visits the fields of a stored document, for - * custom processing/loading of each field. If you - * simply want to load all fields, use {@link - * #document(int)}. If you want to load a subset, use - * {@link DocumentStoredFieldVisitor}. */ - public abstract void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException; /** + * Expert: visits the fields of a stored document, for custom + * processing/loading of each field. If you simply want to load all fields, + * use {@link #document(int)}. If you want to load a subset, use + * {@link DocumentStoredFieldVisitor}. + */ + public abstract void document(int docID, StoredFieldVisitor visitor) + throws CorruptIndexException, IOException; + + /** * Returns the stored fields of the nth - * Document in this index. This is just - * sugar for using {@link DocumentStoredFieldVisitor}. + * Document in this index. This is just sugar for using + * {@link DocumentStoredFieldVisitor}. *

    * NOTE: for performance reasons, this method does not check if the * requested document is deleted, and therefore asking for a deleted document * may yield unspecified results. Usually this is not required, however you - * can test if the doc is deleted by checking the {@link - * Bits} returned from {@link MultiFields#getLiveDocs}. - * - * NOTE: only the content of a field is returned, - * if that field was stored during indexing. Metadata - * like boost, omitNorm, IndexOptions, tokenized, etc., - * are not preserved. + * can test if the doc is deleted by checking the {@link Bits} returned from + * {@link MultiFields#getLiveDocs}. * - * @throws CorruptIndexException if the index is corrupt - * @throws IOException if there is a low-level IO error + * NOTE: only the content of a field is returned, if that field was + * stored during indexing. Metadata like boost, omitNorm, IndexOptions, + * tokenized, etc., are not preserved. + * + * @throws CorruptIndexException + * if the index is corrupt + * @throws IOException + * if there is a low-level IO error */ // TODO: we need a separate StoredField, so that the // Document returned here contains that class not // IndexableField - public final Document document(int docID) throws CorruptIndexException, IOException { + public final StoredDocument document(int docID) throws CorruptIndexException, + IOException { final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(); document(docID, visitor); return visitor.getDocument(); } - + /** - * Like {@link #document(int)} but only loads the specified - * fields. Note that this is simply sugar for {@link - * DocumentStoredFieldVisitor#DocumentStoredFieldVisitor(Set)}. + * Like {@link #document(int)} but only loads the specified fields. Note that + * this is simply sugar for + * {@link DocumentStoredFieldVisitor#DocumentStoredFieldVisitor(Set)}. */ - public final Document document(int docID, Set fieldsToLoad) throws CorruptIndexException, IOException { - final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(fieldsToLoad); + public final StoredDocument document(int docID, Set fieldsToLoad) + throws CorruptIndexException, IOException { + final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor( + fieldsToLoad); document(docID, visitor); return visitor.getDocument(); } - + /** Returns true if any documents have been deleted */ public abstract boolean hasDeletions(); - + /** - * Closes files associated with this index. - * Also saves any new deletions to disk. - * No other methods should be called after this has been called. - * @throws IOException if there is a low-level IO error + * Closes files associated with this index. Also saves any new deletions to + * disk. No other methods should be called after this has been called. + * + * @throws IOException + * if there is a low-level IO error */ public final synchronized void close() throws IOException { if (!closed) { @@ -470,59 +518,65 @@ /** Implements close. */ protected abstract void doClose() throws IOException; - + /** * Expert: Returns a the root {@link IndexReaderContext} for this * {@link IndexReader}'s sub-reader tree. Iff this reader is composed of sub * readers ,ie. this reader being a composite reader, this method returns a - * {@link CompositeReaderContext} holding the reader's direct children as well as a - * view of the reader tree's atomic leaf contexts. All sub- + * {@link CompositeReaderContext} holding the reader's direct children as well + * as a view of the reader tree's atomic leaf contexts. All sub- * {@link IndexReaderContext} instances referenced from this readers top-level * context are private to this reader and are not shared with another context * tree. For example, IndexSearcher uses this API to drive searching by one * atomic leaf reader at a time. If this reader is not composed of child * readers, this method returns an {@link AtomicReaderContext}. *

    - * Note: Any of the sub-{@link CompositeReaderContext} instances reference from this - * top-level context holds a null {@link CompositeReaderContext#leaves()} - * reference. Only the top-level context maintains the convenience leaf-view - * for performance reasons. + * Note: Any of the sub-{@link CompositeReaderContext} instances reference + * from this top-level context holds a null + * {@link CompositeReaderContext#leaves()} reference. Only the top-level + * context maintains the convenience leaf-view for performance reasons. * * @lucene.experimental */ public abstract IndexReaderContext getTopReaderContext(); - - /** Expert: Returns a key for this IndexReader, so FieldCache/CachingWrapperFilter can find - * it again. - * This key must not have equals()/hashCode() methods, so "equals" means "identical". */ + + /** + * Expert: Returns a key for this IndexReader, so + * FieldCache/CachingWrapperFilter can find it again. This key must not have + * equals()/hashCode() methods, so "equals" means + * "identical". + */ public Object getCoreCacheKey() { // Don't can ensureOpen since FC calls this (to evict) // on close return this; } - - /** Expert: Returns a key for this IndexReader that also includes deletions, - * so FieldCache/CachingWrapperFilter can find it again. - * This key must not have equals()/hashCode() methods, so "equals" means "identical". */ + + /** + * Expert: Returns a key for this IndexReader that also includes deletions, so + * FieldCache/CachingWrapperFilter can find it again. This key must not have + * equals()/hashCode() methods, so "equals" means + * "identical". + */ public Object getCombinedCoreAndDeletesKey() { // Don't can ensureOpen since FC calls this (to evict) // on close return this; } - /** Returns the number of documents containing the - * term. This method returns 0 if the term or - * field does not exists. This method does not take into - * account deleted documents that have not yet been merged - * away. */ + /** + * Returns the number of documents containing the term. This + * method returns 0 if the term or field does not exists. This method does not + * take into account deleted documents that have not yet been merged away. + */ public final int docFreq(Term term) throws IOException { return docFreq(term.field(), term.bytes()); } - - /** Returns the number of documents containing the - * term. This method returns 0 if the term or - * field does not exists. This method does not take into - * account deleted documents that have not yet been merged - * away. */ + + /** + * Returns the number of documents containing the term. This + * method returns 0 if the term or field does not exists. This method does not + * take into account deleted documents that have not yet been merged away. + */ public abstract int docFreq(String field, BytesRef term) throws IOException; } Index: core/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java =================================================================== --- core/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java (revision 1340366) +++ core/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java (working copy) @@ -26,6 +26,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockObtainFailedException; @@ -68,12 +69,12 @@ int numDocs = r.numDocs(); // index is allowed to have exactly one document or 0. if (numDocs == 1) { - Document doc = r.document(r.maxDoc() - 1); + StoredDocument doc = r.document(r.maxDoc() - 1); if (doc.getField(SNAPSHOTS_ID) == null) { throw new IllegalStateException("directory is not a valid snapshots store!"); } doc.removeField(SNAPSHOTS_ID); - for (IndexableField f : doc) { + for (StorableField f : doc) { snapshots.put(f.name(), f.stringValue()); } } else if (numDocs != 0) { Index: core/src/java/org/apache/lucene/index/StorableField.java =================================================================== --- core/src/java/org/apache/lucene/index/StorableField.java (revision 0) +++ core/src/java/org/apache/lucene/index/StorableField.java (working copy) @@ -0,0 +1,43 @@ +package org.apache.lucene.index; + +import java.io.Reader; + +import org.apache.lucene.util.BytesRef; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public interface StorableField { + + /** Field name */ + public String name(); + + /** Field boost (you must pre-multiply in any doc boost). */ + public float boost(); + + /** Non-null if this field has a binary value */ + public BytesRef binaryValue(); + + /** Non-null if this field has a string value */ + public String stringValue(); + + /** Non-null if this field has a Reader value */ + public Reader readerValue(); + + /** Non-null if this field has a numeric value */ + public Number numericValue(); +} Index: core/src/java/org/apache/lucene/index/StoredFieldsConsumer.java =================================================================== --- core/src/java/org/apache/lucene/index/StoredFieldsConsumer.java (revision 1340366) +++ core/src/java/org/apache/lucene/index/StoredFieldsConsumer.java (working copy) @@ -44,12 +44,12 @@ } private int numStoredFields; - private IndexableField[] storedFields; + private StorableField[] storedFields; private FieldInfo[] fieldInfos; public void reset() { numStoredFields = 0; - storedFields = new IndexableField[1]; + storedFields = new StorableField[1]; fieldInfos = new FieldInfo[1]; } @@ -125,10 +125,10 @@ assert docWriter.writer.testPoint("StoredFieldsWriter.finishDocument end"); } - public void addField(IndexableField field, FieldInfo fieldInfo) throws IOException { + public void addField(StorableField field, FieldInfo fieldInfo) throws IOException { if (numStoredFields == storedFields.length) { int newSize = ArrayUtil.oversize(numStoredFields + 1, RamUsageEstimator.NUM_BYTES_OBJECT_REF); - IndexableField[] newArray = new IndexableField[newSize]; + StorableField[] newArray = new StorableField[newSize]; System.arraycopy(storedFields, 0, newArray, 0, numStoredFields); storedFields = newArray; Index: core/src/java/org/apache/lucene/search/IndexSearcher.java =================================================================== --- core/src/java/org/apache/lucene/search/IndexSearcher.java (revision 1340366) +++ core/src/java/org/apache/lucene/search/IndexSearcher.java (working copy) @@ -31,6 +31,7 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.lucene.document.Document; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.index.AtomicReaderContext; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; // javadocs @@ -180,7 +181,7 @@ } /** Sugar for .getIndexReader().document(docID) */ - public Document doc(int docID) throws CorruptIndexException, IOException { + public StoredDocument doc(int docID) throws CorruptIndexException, IOException { return reader.document(docID); } @@ -190,7 +191,7 @@ } /** Sugar for .getIndexReader().document(docID, fieldsToLoad) */ - public final Document document(int docID, Set fieldsToLoad) throws CorruptIndexException, IOException { + public final StoredDocument document(int docID, Set fieldsToLoad) throws CorruptIndexException, IOException { return reader.document(docID, fieldsToLoad); } Index: core/src/test/org/apache/lucene/codecs/appending/TestAppendingCodec.java =================================================================== --- core/src/test/org/apache/lucene/codecs/appending/TestAppendingCodec.java (revision 1340366) +++ core/src/test/org/apache/lucene/codecs/appending/TestAppendingCodec.java (working copy) @@ -24,6 +24,7 @@ import org.apache.lucene.codecs.appending.AppendingCodec; import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.document.TextField; import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.Fields; @@ -127,7 +128,7 @@ writer.close(); IndexReader reader = IndexReader.open(dir, 1); assertEquals(2, reader.numDocs()); - Document doc2 = reader.document(0); + StoredDocument doc2 = reader.document(0); assertEquals(text, doc2.get("f")); Fields fields = MultiFields.getFields(reader); Terms terms = fields.terms("f"); Index: core/src/test/org/apache/lucene/document/TestBinaryDocument.java =================================================================== --- core/src/test/org/apache/lucene/document/TestBinaryDocument.java (revision 1340366) +++ core/src/test/org/apache/lucene/document/TestBinaryDocument.java (working copy) @@ -56,7 +56,7 @@ /** open a reader and fetch the document */ IndexReader reader = writer.getReader(); - Document docFromReader = reader.document(0); + StoredDocument docFromReader = reader.document(0); assertTrue(docFromReader != null); /** fetch the binary stored field and compare it's content with the original one */ @@ -90,7 +90,7 @@ /** open a reader and fetch the document */ IndexReader reader = writer.getReader(); - Document docFromReader = reader.document(0); + StoredDocument docFromReader = reader.document(0); assertTrue(docFromReader != null); /** fetch the binary compressed field and compare it's content with the original one */ Index: core/src/test/org/apache/lucene/document/TestDocument.java =================================================================== --- core/src/test/org/apache/lucene/document/TestDocument.java (revision 1340366) +++ core/src/test/org/apache/lucene/document/TestDocument.java (working copy) @@ -29,6 +29,7 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.StorableField; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; @@ -219,11 +220,11 @@ return doc; } - private void doAssert(Document doc, boolean fromIndex) { - IndexableField[] keywordFieldValues = doc.getFields("keyword"); - IndexableField[] textFieldValues = doc.getFields("text"); - IndexableField[] unindexedFieldValues = doc.getFields("unindexed"); - IndexableField[] unstoredFieldValues = doc.getFields("unstored"); + private void doAssert(StoredDocument storedDocument, boolean fromIndex) { + StorableField[] keywordFieldValues = storedDocument.getFields("keyword"); + StorableField[] textFieldValues = storedDocument.getFields("text"); + StorableField[] unindexedFieldValues = storedDocument.getFields("unindexed"); + StorableField[] unstoredFieldValues = storedDocument.getFields("unstored"); assertTrue(keywordFieldValues.length == 2); assertTrue(textFieldValues.length == 2); @@ -248,6 +249,35 @@ } } + private void doAssert(Document storedDocument, boolean fromIndex) { + StorableField[] keywordFieldValues = storedDocument.getFields("keyword"); + StorableField[] textFieldValues = storedDocument.getFields("text"); + StorableField[] unindexedFieldValues = storedDocument.getFields("unindexed"); + StorableField[] unstoredFieldValues = storedDocument.getFields("unstored"); + + assertTrue(keywordFieldValues.length == 2); + assertTrue(textFieldValues.length == 2); + assertTrue(unindexedFieldValues.length == 2); + // this test cannot work for documents retrieved from the index + // since unstored fields will obviously not be returned + if (!fromIndex) { + assertTrue(unstoredFieldValues.length == 2); + } + + assertTrue(keywordFieldValues[0].stringValue().equals("test1")); + assertTrue(keywordFieldValues[1].stringValue().equals("test2")); + assertTrue(textFieldValues[0].stringValue().equals("test1")); + assertTrue(textFieldValues[1].stringValue().equals("test2")); + assertTrue(unindexedFieldValues[0].stringValue().equals("test1")); + assertTrue(unindexedFieldValues[1].stringValue().equals("test2")); + // this test cannot work for documents retrieved from the index + // since unstored fields will obviously not be returned + if (!fromIndex) { + assertTrue(unstoredFieldValues[0].stringValue().equals("test1")); + assertTrue(unstoredFieldValues[1].stringValue().equals("test2")); + } + } + public void testFieldSetValue() throws Exception { Field field = new Field("id", "id1", StringField.TYPE_STORED); @@ -273,7 +303,7 @@ assertEquals(3, hits.length); int result = 0; for (int i = 0; i < 3; i++) { - Document doc2 = searcher.doc(hits[i].doc); + StoredDocument doc2 = searcher.doc(hits[i].doc); Field f = (Field) doc2.getField("id"); if (f.stringValue().equals("id1")) result |= 1; else if (f.stringValue().equals("id2")) result |= 2; @@ -318,13 +348,13 @@ IndexReader r = w.getReader(); w.close(); - doc = r.document(0); + StoredDocument sdoc = r.document(0); // 4 stored fields - assertEquals(4, doc.getFields().size()); - assertEquals("abc", doc.get("stored")); - assertEquals("abc xyz", doc.get("stored_indexed")); - assertEquals("abc xyz", doc.get("stored_tokenized")); - final BytesRef br = doc.getBinaryValue("binary"); + assertEquals(4, sdoc.getFields().size()); + assertEquals("abc", sdoc.get("stored")); + assertEquals("abc xyz", sdoc.get("stored_indexed")); + assertEquals("abc xyz", sdoc.get("stored_tokenized")); + final BytesRef br = sdoc.getBinaryValue("binary"); assertNotNull(br); assertEquals(10, br.length); Index: core/src/test/org/apache/lucene/index/TestAddIndexes.java =================================================================== --- core/src/test/org/apache/lucene/index/TestAddIndexes.java (revision 1340366) +++ core/src/test/org/apache/lucene/index/TestAddIndexes.java (working copy) @@ -46,6 +46,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.PackedLongDocValuesField; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; @@ -1246,7 +1247,7 @@ w.close(); assertEquals(2, r3.numDocs()); for(int docID=0;docID<2;docID++) { - Document d = r3.document(docID); + StoredDocument d = r3.document(docID); if (d.get("id").equals("1")) { assertEquals("doc1 field1", d.get("f1")); } else { Index: core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java =================================================================== --- core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (revision 1340366) +++ core/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (working copy) @@ -34,6 +34,7 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.document.IntField; import org.apache.lucene.document.LongField; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.FieldInfo.IndexOptions; @@ -302,12 +303,12 @@ for(int i=0;i<35;i++) { if (liveDocs.get(i)) { - Document d = reader.document(i); - List fields = d.getFields(); + StoredDocument d = reader.document(i); + List fields = d.getFields(); if (d.getField("content3") == null) { final int numFields = 5; assertEquals(numFields, fields.size()); - IndexableField f = d.getField("id"); + StorableField f = d.getField("id"); assertEquals(""+i, f.stringValue()); f = d.getField("utf8"); @@ -334,7 +335,7 @@ // First document should be #21 since it's norm was // increased: - Document d = searcher.getIndexReader().document(hits[0].doc); + StoredDocument d = searcher.getIndexReader().document(hits[0].doc); assertEquals("didn't get the right document first", "21", d.get("id")); doTestHits(hits, 34, searcher.getIndexReader()); @@ -377,7 +378,7 @@ IndexReader reader = IndexReader.open(dir); IndexSearcher searcher = new IndexSearcher(reader); ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; - Document d = searcher.getIndexReader().document(hits[0].doc); + StoredDocument d = searcher.getIndexReader().document(hits[0].doc); assertEquals("wrong first document", "21", d.get("id")); doTestHits(hits, 44, searcher.getIndexReader()); reader.close(); @@ -403,7 +404,7 @@ IndexSearcher searcher = new IndexSearcher(reader); ScoreDoc[] hits = searcher.search(new TermQuery(new Term("content", "aaa")), null, 1000).scoreDocs; assertEquals("wrong number of hits", 34, hits.length); - Document d = searcher.doc(hits[0].doc); + StoredDocument d = searcher.doc(hits[0].doc); assertEquals("wrong first document", "21", d.get("id")); reader.close(); @@ -634,7 +635,7 @@ for (int id=10; id<15; id++) { ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", 4, Integer.valueOf(id), Integer.valueOf(id), true, true), 100).scoreDocs; assertEquals("wrong number of hits", 1, hits.length); - Document d = searcher.doc(hits[0].doc); + StoredDocument d = searcher.doc(hits[0].doc); assertEquals(String.valueOf(id), d.get("id")); hits = searcher.search(NumericRangeQuery.newLongRange("trieLong", 4, Long.valueOf(id), Long.valueOf(id), true, true), 100).scoreDocs; Index: core/src/test/org/apache/lucene/index/TestCustomNorms.java =================================================================== --- core/src/test/org/apache/lucene/index/TestCustomNorms.java (revision 1340366) +++ core/src/test/org/apache/lucene/index/TestCustomNorms.java (working copy) @@ -22,6 +22,7 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.document.TextField; import org.apache.lucene.index.DocValues.Source; import org.apache.lucene.index.DocValues.Type; @@ -78,7 +79,7 @@ assertEquals(Type.FLOAT_32, normValues.getType()); float[] norms = (float[]) source.getArray(); for (int i = 0; i < open.maxDoc(); i++) { - Document document = open.document(i); + StoredDocument document = open.document(i); float expected = Float.parseFloat(document.get(floatTestField)); assertEquals(expected, norms[i], 0.0f); } Index: core/src/test/org/apache/lucene/index/TestDirectoryReader.java =================================================================== --- core/src/test/org/apache/lucene/index/TestDirectoryReader.java (revision 1340366) +++ core/src/test/org/apache/lucene/index/TestDirectoryReader.java (working copy) @@ -32,6 +32,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.document.StoredField; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; @@ -62,10 +63,10 @@ assertTrue(reader != null); assertTrue(reader instanceof StandardDirectoryReader); - Document newDoc1 = reader.document(0); + StoredDocument newDoc1 = reader.document(0); assertTrue(newDoc1 != null); assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size()); - Document newDoc2 = reader.document(1); + StoredDocument newDoc2 = reader.document(1); assertTrue(newDoc2 != null); assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size()); Terms vector = reader.getTermVectors(0).terms(DocHelper.TEXT_FIELD_2_KEY); @@ -387,11 +388,11 @@ writer.addDocument(doc); writer.close(); DirectoryReader reader = DirectoryReader.open(dir); - Document doc2 = reader.document(reader.maxDoc() - 1); - IndexableField[] fields = doc2.getFields("bin1"); + StoredDocument doc2 = reader.document(reader.maxDoc() - 1); + StorableField[] fields = doc2.getFields("bin1"); assertNotNull(fields); assertEquals(1, fields.length); - IndexableField b1 = fields[0]; + StorableField b1 = fields[0]; assertTrue(b1.binaryValue() != null); BytesRef bytesRef = b1.binaryValue(); assertEquals(bin.length, bytesRef.length); @@ -596,13 +597,13 @@ // check stored fields for (int i = 0; i < index1.maxDoc(); i++) { if (liveDocs1 == null || liveDocs1.get(i)) { - Document doc1 = index1.document(i); - Document doc2 = index2.document(i); - List field1 = doc1.getFields(); - List field2 = doc2.getFields(); + StoredDocument doc1 = index1.document(i); + StoredDocument doc2 = index2.document(i); + List field1 = doc1.getFields(); + List field2 = doc2.getFields(); assertEquals("Different numbers of fields for doc " + i + ".", field1.size(), field2.size()); - Iterator itField1 = field1.iterator(); - Iterator itField2 = field2.iterator(); + Iterator itField1 = field1.iterator(); + Iterator itField2 = field2.iterator(); while (itField1.hasNext()) { Field curField1 = (Field) itField1.next(); Field curField2 = (Field) itField2.next(); @@ -1081,7 +1082,7 @@ Set fieldsToLoad = new HashSet(); assertEquals(0, r.document(0, fieldsToLoad).getFields().size()); fieldsToLoad.add("field1"); - Document doc2 = r.document(0, fieldsToLoad); + StoredDocument doc2 = r.document(0, fieldsToLoad); assertEquals(1, doc2.getFields().size()); assertEquals("foobar", doc2.get("field1")); r.close(); Index: core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java =================================================================== --- core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java (revision 1340366) +++ core/src/test/org/apache/lucene/index/TestDirectoryReaderReopen.java (working copy) @@ -32,6 +32,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; @@ -123,7 +124,7 @@ if (i>0) { int k = i-1; int n = j + k*M; - Document prevItereationDoc = reader.document(n); + StoredDocument prevItereationDoc = reader.document(n); assertNotNull(prevItereationDoc); String id = prevItereationDoc.get("id"); assertEquals(k+"_"+j, id); Index: core/src/test/org/apache/lucene/index/TestDocumentWriter.java =================================================================== --- core/src/test/org/apache/lucene/index/TestDocumentWriter.java (revision 1340366) +++ core/src/test/org/apache/lucene/index/TestDocumentWriter.java (working copy) @@ -27,6 +27,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.FieldInfo.IndexOptions; @@ -67,19 +68,19 @@ //After adding the document, we should be able to read it back in SegmentReader reader = new SegmentReader(info, DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random())); assertTrue(reader != null); - Document doc = reader.document(0); + StoredDocument doc = reader.document(0); assertTrue(doc != null); //System.out.println("Document: " + doc); - IndexableField [] fields = doc.getFields("textField2"); + StorableField [] fields = doc.getFields("textField2"); assertTrue(fields != null && fields.length == 1); assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_2_TEXT)); - assertTrue(fields[0].fieldType().storeTermVectors()); + //assertTrue(fields[0].fieldType().storeTermVectors()); fields = doc.getFields("textField1"); assertTrue(fields != null && fields.length == 1); assertTrue(fields[0].stringValue().equals(DocHelper.FIELD_1_TEXT)); - assertFalse(fields[0].fieldType().storeTermVectors()); + //assertFalse(fields[0].fieldType().storeTermVectors()); fields = doc.getFields("keyField"); assertTrue(fields != null && fields.length == 1); Index: core/src/test/org/apache/lucene/index/TestDuelingCodecs.java =================================================================== --- core/src/test/org/apache/lucene/index/TestDuelingCodecs.java (revision 1340366) +++ core/src/test/org/apache/lucene/index/TestDuelingCodecs.java (working copy) @@ -31,6 +31,7 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Document; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.index.TermsEnum.SeekStatus; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.Directory; @@ -548,25 +549,25 @@ public void assertStoredFields(IndexReader leftReader, IndexReader rightReader) throws Exception { assert leftReader.maxDoc() == rightReader.maxDoc(); for (int i = 0; i < leftReader.maxDoc(); i++) { - Document leftDoc = leftReader.document(i); - Document rightDoc = rightReader.document(i); + StoredDocument leftDoc = leftReader.document(i); + StoredDocument rightDoc = rightReader.document(i); // TODO: I think this is bogus because we don't document what the order should be // from these iterators, etc. I think the codec/IndexReader should be free to order this stuff // in whatever way it wants (e.g. maybe it packs related fields together or something) // To fix this, we sort the fields in both documents by name, but // we still assume that all instances with same name are in order: - Comparator comp = new Comparator() { + Comparator comp = new Comparator() { @Override - public int compare(IndexableField arg0, IndexableField arg1) { + public int compare(StorableField arg0, StorableField arg1) { return arg0.name().compareTo(arg1.name()); } }; Collections.sort(leftDoc.getFields(), comp); Collections.sort(rightDoc.getFields(), comp); - Iterator leftIterator = leftDoc.iterator(); - Iterator rightIterator = rightDoc.iterator(); + Iterator leftIterator = leftDoc.iterator(); + Iterator rightIterator = rightDoc.iterator(); while (leftIterator.hasNext()) { assertTrue(info, rightIterator.hasNext()); assertStoredField(leftIterator.next(), rightIterator.next()); @@ -578,7 +579,7 @@ /** * checks that two stored fields are equivalent */ - public void assertStoredField(IndexableField leftField, IndexableField rightField) { + public void assertStoredField(StorableField leftField, StorableField rightField) { assertEquals(info, leftField.name(), rightField.name()); assertEquals(info, leftField.binaryValue(), rightField.binaryValue()); assertEquals(info, leftField.stringValue(), rightField.stringValue()); Index: core/src/test/org/apache/lucene/index/TestFieldsReader.java =================================================================== --- core/src/test/org/apache/lucene/index/TestFieldsReader.java (revision 1340366) +++ core/src/test/org/apache/lucene/index/TestFieldsReader.java (working copy) @@ -31,6 +31,7 @@ import org.apache.lucene.document.FloatField; import org.apache.lucene.document.IntField; import org.apache.lucene.document.LongField; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.document.StoredField; import org.apache.lucene.document.StringField; import org.apache.lucene.index.FieldInfo.IndexOptions; @@ -77,7 +78,7 @@ assertTrue(dir != null); assertTrue(fieldInfos != null); IndexReader reader = IndexReader.open(dir); - Document doc = reader.document(0); + StoredDocument doc = reader.document(0); assertTrue(doc != null); assertTrue(doc.getField(DocHelper.TEXT_FIELD_1_KEY) != null); @@ -102,7 +103,7 @@ DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(DocHelper.TEXT_FIELD_3_KEY); reader.document(0, visitor); - final List fields = visitor.getDocument().getFields(); + final List fields = visitor.getDocument().getFields(); assertEquals(1, fields.size()); assertEquals(DocHelper.TEXT_FIELD_3_KEY, fields.get(0).name()); reader.close(); @@ -290,7 +291,7 @@ for(IndexReader sub : r.getSequentialSubReaders()) { final int[] ids = FieldCache.DEFAULT.getInts((AtomicReader) sub, "id", false); for(int docID=0;docID it = doc2.getFields().iterator(); + StoredDocument doc2 = r.document(0); + Iterator it = doc2.getFields().iterator(); assertTrue(it.hasNext()); Field f = (Field) it.next(); assertEquals(f.name(), "zzz"); Index: core/src/test/org/apache/lucene/index/TestIndexWriterMerging.java =================================================================== --- core/src/test/org/apache/lucene/index/TestIndexWriterMerging.java (revision 1340366) +++ core/src/test/org/apache/lucene/index/TestIndexWriterMerging.java (working copy) @@ -21,6 +21,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; @@ -85,7 +86,7 @@ int max = reader.maxDoc(); for (int i = 0; i < max; i++) { - Document temp = reader.document(i); + StoredDocument temp = reader.document(i); //System.out.println("doc "+i+"="+temp.getField("count").stringValue()); //compare the index doc number to the value that it should be if (!temp.getField("count").stringValue().equals((i + startAt) + "")) Index: core/src/test/org/apache/lucene/index/TestIndexWriterReader.java =================================================================== --- core/src/test/org/apache/lucene/index/TestIndexWriterReader.java (revision 1340366) +++ core/src/test/org/apache/lucene/index/TestIndexWriterReader.java (working copy) @@ -29,6 +29,7 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.search.DocIdSetIterator; @@ -143,7 +144,7 @@ String id10 = r1.document(10).getField("id").stringValue(); - Document newDoc = r1.document(10); + Document newDoc = r1.document(10).asIndexable(); newDoc.removeField("id"); newDoc.add(newField("id", Integer.toString(8000), StringField.TYPE_STORED)); writer.updateDocument(new Term("id", id10), newDoc); @@ -273,9 +274,9 @@ assertEquals(100, index2df); // verify the docs are from different indexes - Document doc5 = r1.document(5); + StoredDocument doc5 = r1.document(5); assertEquals("index1", doc5.get("indexname")); - Document doc150 = r1.document(150); + StoredDocument doc150 = r1.document(150); assertEquals("index2", doc150.get("indexname")); r1.close(); writer.close(); Index: core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java =================================================================== --- core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java (revision 1340366) +++ core/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java (working copy) @@ -26,6 +26,7 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.store.Directory; @@ -262,7 +263,7 @@ w.close(); IndexReader ir = IndexReader.open(dir); - Document doc2 = ir.document(0); + StoredDocument doc2 = ir.document(0); for(int i=0;i= 1); - Document result = reader.document(0); + StoredDocument result = reader.document(0); assertTrue(result != null); //There are 2 unstored fields on the document that are not preserved across writing assertTrue(DocHelper.numFields(result) == DocHelper.numFields(testDoc) - DocHelper.unstored.size()); - List fields = result.getFields(); - for (final IndexableField field : fields ) { + List fields = result.getFields(); + for (final StorableField field : fields ) { assertTrue(field != null); assertTrue(DocHelper.nameValues.containsKey(field.name())); } Index: core/src/test/org/apache/lucene/index/TestStressIndexing2.java =================================================================== --- core/src/test/org/apache/lucene/index/TestStressIndexing2.java (revision 1340366) +++ core/src/test/org/apache/lucene/index/TestStressIndexing2.java (working copy) @@ -32,6 +32,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.DocIdSetIterator; @@ -133,8 +134,8 @@ static Term idTerm = new Term("id",""); IndexingThread[] threads; - static Comparator fieldNameComparator = new Comparator() { - public int compare(IndexableField o1, IndexableField o2) { + static Comparator fieldNameComparator = new Comparator() { + public int compare(StorableField o1, StorableField o2) { return o1.name().compareTo(o2.name()); } }; @@ -287,7 +288,7 @@ Bits liveDocs = ((AtomicReader)sub).getLiveDocs(); System.out.println(" " + ((SegmentReader) sub).getSegmentInfo()); for(int docID=0;docID ff1 = d1.getFields(); - List ff2 = d2.getFields(); + public static void verifyEquals(StoredDocument d1, StoredDocument d2) { + List ff1 = d1.getFields(); + List ff2 = d2.getFields(); Collections.sort(ff1, fieldNameComparator); Collections.sort(ff2, fieldNameComparator); @@ -586,8 +587,8 @@ assertEquals(ff1 + " : " + ff2, ff1.size(), ff2.size()); for (int i=0; i 94 && i < 105) ) { - Document d = searcher.doc(hits[i].doc); + StoredDocument d = searcher.doc(hits[i].doc); out.println(i + " " + d.get(ID_FIELD)); } } @@ -137,7 +137,7 @@ assertEquals("total results", expectedCount, hits.length); for (int i = 0 ; i < hits.length; i++) { if (i < 10 || (i > 94 && i < 105) ) { - Document d = searcher.doc(hits[i].doc); + StoredDocument d = searcher.doc(hits[i].doc); assertEquals("check " + i, String.valueOf(i), d.get(ID_FIELD)); } } Index: test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java =================================================================== --- test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java (revision 1340366) +++ test-framework/src/java/org/apache/lucene/analysis/CollationTestBase.java (working copy) @@ -27,12 +27,14 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -256,8 +258,8 @@ StringBuilder buff = new StringBuilder(10); int n = result.length; for (int i = 0 ; i < n ; ++i) { - Document doc = searcher.doc(result[i].doc); - IndexableField[] v = doc.getFields("tracer"); + StoredDocument doc = searcher.doc(result[i].doc); + StorableField[] v = doc.getFields("tracer"); for (int j = 0 ; j < v.length ; ++j) { buff.append(v[j].stringValue()); } Index: test-framework/src/java/org/apache/lucene/codecs/lucene3x/PreFlexRWNormsConsumer.java =================================================================== --- test-framework/src/java/org/apache/lucene/codecs/lucene3x/PreFlexRWNormsConsumer.java (revision 1340366) +++ test-framework/src/java/org/apache/lucene/codecs/lucene3x/PreFlexRWNormsConsumer.java (working copy) @@ -29,6 +29,7 @@ import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.MergeState; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; @@ -145,7 +146,7 @@ } @Override - public void add(int docID, IndexableField docValue) throws IOException { + public void add(int docID, StorableField docValue) throws IOException { add(docID, docValue.numericValue().longValue()); } Index: test-framework/src/java/org/apache/lucene/codecs/lucene3x/PreFlexRWStoredFieldsWriter.java =================================================================== --- test-framework/src/java/org/apache/lucene/codecs/lucene3x/PreFlexRWStoredFieldsWriter.java (revision 1340366) +++ test-framework/src/java/org/apache/lucene/codecs/lucene3x/PreFlexRWStoredFieldsWriter.java (working copy) @@ -22,6 +22,7 @@ import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; @@ -82,7 +83,7 @@ IndexFileNames.segmentFileName(segment, "", Lucene3xStoredFieldsReader.FIELDS_INDEX_EXTENSION)); } - public void writeField(FieldInfo info, IndexableField field) throws IOException { + public void writeField(FieldInfo info, StorableField field) throws IOException { fieldsStream.writeVInt(info.number); int bits = 0; final BytesRef bytes; Index: test-framework/src/java/org/apache/lucene/index/DocHelper.java =================================================================== --- test-framework/src/java/org/apache/lucene/index/DocHelper.java (revision 1340366) +++ test-framework/src/java/org/apache/lucene/index/DocHelper.java (working copy) @@ -29,6 +29,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.document.StoredField; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; @@ -291,6 +292,10 @@ return doc.getFields().size(); } + public static int numFields(StoredDocument doc) { + return doc.getFields().size(); + } + public static Document createDocument(int n, String indexName, int numFields) { StringBuilder sb = new StringBuilder(); FieldType customType = new FieldType(TextField.TYPE_STORED); Index: test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java =================================================================== --- test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java (revision 1340366) +++ test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java (working copy) @@ -29,6 +29,7 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.search.IndexSearcher; @@ -448,7 +449,7 @@ final int inc = Math.max(1, maxDoc/50); for(int docID=0;docID