Index: lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java (working copy) @@ -32,16 +32,16 @@ import org.apache.lucene.index.DocValues.Source; import org.apache.lucene.index.DocValues.Type; import org.apache.lucene.index.DocValues; -import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.index.MergeState; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; /** - * Abstract API that consumes {@link IndexableField}s. + * Abstract API that consumes {@link StorableField}s. * {@link DocValuesConsumer} are always associated with a specific field and * segments. Concrete implementations of this API write the given - * {@link IndexableField} into a implementation specific format depending on + * {@link StorableField} into a implementation specific format depending on * the fields meta-data. * * @lucene.experimental @@ -52,7 +52,7 @@ protected abstract Type getType(); /** - * Adds the given {@link IndexableField} instance to this + * Adds the given {@link StorableField} instance to this * {@link DocValuesConsumer} * * @param docID @@ -63,7 +63,7 @@ * @throws IOException * if an {@link IOException} occurs */ - public abstract void add(int docID, IndexableField value) + public abstract void add(int docID, StorableField value) throws IOException; /** @@ -72,7 +72,7 @@ * @param docCount * the total number of documents in this {@link DocValuesConsumer}. * Must be greater than or equal the last given docID to - * {@link #add(int, IndexableField)}. + * {@link #add(int, StorableField)}. * @throws IOException */ public abstract void finish(int docCount) throws IOException; Index: lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsWriter.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsWriter.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsWriter.java (working copy) @@ -21,6 +21,7 @@ import org.apache.lucene.codecs.StoredFieldsReader; import org.apache.lucene.codecs.StoredFieldsWriter; import org.apache.lucene.document.Document; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexFileNames; @@ -28,6 +29,7 @@ import org.apache.lucene.index.MergePolicy.MergeAbortedException; import org.apache.lucene.index.MergeState; import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -131,7 +133,7 @@ IndexFileNames.segmentFileName(segment, "", FIELDS_INDEX_EXTENSION)); } - public void writeField(FieldInfo info, IndexableField field) throws IOException { + public void writeField(FieldInfo info, StorableField field) throws IOException { fieldsStream.writeVInt(info.number); int bits = 0; final BytesRef bytes; @@ -297,7 +299,7 @@ // on the fly? // NOTE: it's very important to first assign to doc then pass it to // fieldsWriter.addDocument; see LUCENE-1282 - Document doc = reader.reader.document(j); + StoredDocument doc = reader.reader.document(j); addDocument(doc, mergeState.fieldInfos); docCount++; mergeState.checkAbort.work(300); @@ -324,7 +326,7 @@ for (; docCount < maxDoc; docCount++) { // NOTE: it's very important to first assign to doc then pass it to // fieldsWriter.addDocument; see LUCENE-1282 - Document doc = reader.reader.document(docCount); + StoredDocument doc = reader.reader.document(docCount); addDocument(doc, mergeState.fieldInfos); mergeState.checkAbort.work(300); } Index: lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/Bytes.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/Bytes.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/Bytes.java (working copy) @@ -29,6 +29,7 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.DataOutput; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; @@ -415,7 +416,7 @@ } @Override - public void add(int docID, IndexableField value) throws IOException { + public void add(int docID, StorableField value) throws IOException { BytesRef bytes = value.binaryValue(); assert bytes != null; if (bytes.length == 0) { // default value - skip it Index: lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/FixedStraightBytesImpl.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/FixedStraightBytesImpl.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/FixedStraightBytesImpl.java (working copy) @@ -28,6 +28,7 @@ import org.apache.lucene.index.DocValues.Type; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -74,7 +75,7 @@ } @Override - public void add(int docID, IndexableField value) throws IOException { + public void add(int docID, StorableField value) throws IOException { final BytesRef bytes = value.binaryValue(); assert bytes != null; assert lastDocID < docID; Index: lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/Floats.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/Floats.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/Floats.java (working copy) @@ -24,6 +24,7 @@ import org.apache.lucene.index.DocValues.Type; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -88,7 +89,7 @@ } @Override - public void add(int docID, IndexableField value) throws IOException { + public void add(int docID, StorableField value) throws IOException { template.toBytes(value.numericValue().doubleValue(), bytesRef); bytesSpareField.setBytesValue(bytesRef); super.add(docID, bytesSpareField); Index: lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/Ints.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/Ints.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/Ints.java (working copy) @@ -25,6 +25,7 @@ import org.apache.lucene.index.DocValues.Type; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -112,7 +113,7 @@ } @Override - public void add(int docID, IndexableField value) throws IOException { + public void add(int docID, StorableField value) throws IOException { template.toBytes(value.numericValue().longValue(), bytesRef); bytesSpareField.setBytesValue(bytesRef); super.add(docID, bytesSpareField); Index: lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/PackedIntValues.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/PackedIntValues.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/PackedIntValues.java (working copy) @@ -25,6 +25,7 @@ import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -126,7 +127,7 @@ } @Override - public void add(int docID, IndexableField docValue) throws IOException { + public void add(int docID, StorableField docValue) throws IOException { final long v = docValue.numericValue().longValue(); assert lastDocId < docID; if (!started) { Index: lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/VarStraightBytesImpl.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/VarStraightBytesImpl.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/codecs/lucene40/values/VarStraightBytesImpl.java (working copy) @@ -27,6 +27,7 @@ import org.apache.lucene.index.DocValues.Type; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; @@ -85,7 +86,7 @@ } @Override - public void add(int docID, IndexableField value) throws IOException { + public void add(int docID, StorableField value) throws IOException { final BytesRef bytes = value.binaryValue(); assert bytes != null; assert !merge; Index: lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesConsumer.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesConsumer.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextDocValuesConsumer.java (working copy) @@ -21,7 +21,7 @@ import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.index.DocValues.Type; import org.apache.lucene.index.IndexFileNames; -import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; @@ -72,7 +72,7 @@ } @Override - public void add(int docID, IndexableField value) throws IOException { + public void add(int docID, StorableField value) throws IOException { assert docID >= 0; final int ord, vSize; switch (type) { Index: lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsWriter.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsWriter.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/codecs/simpletext/SimpleTextStoredFieldsWriter.java (working copy) @@ -22,7 +22,7 @@ import org.apache.lucene.codecs.StoredFieldsWriter; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexFileNames; -import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; @@ -88,7 +88,7 @@ } @Override - public void writeField(FieldInfo info, IndexableField field) throws IOException { + public void writeField(FieldInfo info, StorableField field) throws IOException { write(FIELD); write(Integer.toString(info.number)); newLine(); Index: lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java =================================================================== --- lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java (working copy) @@ -4,10 +4,12 @@ import java.io.IOException; import org.apache.lucene.document.Document; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.MergeState; +import org.apache.lucene.index.StorableField; import org.apache.lucene.util.Bits; /** @@ -32,7 +34,7 @@ *
    *
  1. For every document, {@link #startDocument(int)} is called, * informing the Codec how many fields will be written. - *
  2. {@link #writeField(FieldInfo, IndexableField)} is called for + *
  3. {@link #writeField(FieldInfo, StorableField)} is called for * each field in the document. *
  4. After all documents have been written, {@link #finish(int)} * is called for verification/sanity-checks. @@ -44,14 +46,14 @@ public abstract class StoredFieldsWriter implements Closeable { /** Called before writing the stored fields of the document. - * {@link #writeField(FieldInfo, IndexableField)} will be called + * {@link #writeField(FieldInfo, StorableField)} will be called * numStoredFields times. Note that this is * called even if the document has no stored fields, in * this case numStoredFields will be zero. */ public abstract void startDocument(int numStoredFields) throws IOException; /** Writes a single stored field. */ - public abstract void writeField(FieldInfo info, IndexableField field) throws IOException; + public abstract void writeField(FieldInfo info, StorableField field) throws IOException; /** Aborts writing entirely, implementation should remove * any partially-written files, etc. */ @@ -68,7 +70,7 @@ /** Merges in the stored fields from the readers in * mergeState. The default implementation skips * over deleted documents, and uses {@link #startDocument(int)}, - * {@link #writeField(FieldInfo, IndexableField)}, and {@link #finish(int)}, + * {@link #writeField(FieldInfo, StorableField)}, and {@link #finish(int)}, * returning the number of documents that were written. * Implementations can override this method for more sophisticated * merging (bulk-byte copying, etc). */ @@ -88,7 +90,7 @@ // on the fly? // NOTE: it's very important to first assign to doc then pass it to // fieldsWriter.addDocument; see LUCENE-1282 - Document doc = reader.reader.document(i); + StoredDocument doc = reader.reader.document(i); addDocument(doc, mergeState.fieldInfos); docCount++; mergeState.checkAbort.work(300); @@ -99,20 +101,16 @@ } /** sugar method for startDocument() + writeField() for every stored field in the document */ - protected final void addDocument(Iterable doc, FieldInfos fieldInfos) throws IOException { + protected final void addDocument(Iterable doc, FieldInfos fieldInfos) throws IOException { int storedCount = 0; - for (IndexableField field : doc) { - if (field.fieldType().stored()) { - storedCount++; - } + for (StorableField field : doc) { + storedCount++; } startDocument(storedCount); - for (IndexableField field : doc) { - if (field.fieldType().stored()) { + for (StorableField field : doc) { writeField(fieldInfos.fieldInfo(field.name()), field); - } } } } Index: lucene/core/src/java/org/apache/lucene/document/Document.java =================================================================== --- lucene/core/src/java/org/apache/lucene/document/Document.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/document/Document.java (working copy) @@ -105,49 +105,7 @@ } - /** - * Returns an array of byte arrays for of the fields that have the name specified - * as the method parameter. This method returns an empty - * array when there are no matching fields. It never - * returns null. - * - * @param name the name of the field - * @return a byte[][] of binary field values - */ - public final BytesRef[] getBinaryValues(String name) { - final List result = new ArrayList(); - for (IndexableField field : fields) { - if (field.name().equals(name)) { - final BytesRef bytes = field.binaryValue(); - if (bytes != null) { - result.add(bytes); - } - } - } - return result.toArray(new BytesRef[result.size()]); - } - - /** - * Returns an array of bytes for the first (or only) field that has the name - * specified as the method parameter. This method will return null - * if no binary fields with the specified name are available. - * There may be non-binary fields with the same name. - * - * @param name the name of the field. - * @return a byte[] containing the binary field value or null - */ - public final BytesRef getBinaryValue(String name) { - for (IndexableField field : fields) { - if (field.name().equals(name)) { - final BytesRef bytes = field.binaryValue(); - if (bytes != null) { - return bytes; - } - } - } - return null; - } /** Returns a field with the given name if any exist in this document, or * null. If multiple fields exists with this name, this method returns the @@ -191,50 +149,6 @@ return fields; } - private final static String[] NO_STRINGS = new String[0]; - - /** - * Returns an array of values of the field specified as the method parameter. - * This method returns an empty array when there are no - * matching fields. It never returns null. - * For {@link IntField}, {@link LongField}, {@link - * FloatField} and {@link DoubleField} it returns the string value of the number. If you want - * the actual numeric field instances back, use {@link #getFields}. - * @param name the name of the field - * @return a String[] of field values - */ - public final String[] getValues(String name) { - List result = new ArrayList(); - for (IndexableField field : fields) { - if (field.name().equals(name) && field.stringValue() != null) { - result.add(field.stringValue()); - } - } - - if (result.size() == 0) { - return NO_STRINGS; - } - - return result.toArray(new String[result.size()]); - } - - /** Returns the string value of the field with the given name if any exist in - * this document, or null. If multiple fields exist with this name, this - * method returns the first value added. If only binary fields with this name - * exist, returns null. - * For {@link IntField}, {@link LongField}, {@link - * FloatField} and {@link DoubleField} it returns the string value of the number. If you want - * the actual numeric field instance back, use {@link #getField}. - */ - public final String get(String name) { - for (IndexableField field : fields) { - if (field.name().equals(name) && field.stringValue() != null) { - return field.stringValue(); - } - } - return null; - } - /** Prints the fields of a document for human consumption. */ @Override public final String toString() { Index: lucene/core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java =================================================================== --- lucene/core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/document/DocumentStoredFieldVisitor.java (working copy) @@ -34,7 +34,7 @@ * @lucene.experimental */ public class DocumentStoredFieldVisitor extends StoredFieldVisitor { - private final Document doc = new Document(); + private final StoredDocument doc = new StoredDocument(); private final Set fieldsToAdd; /** Load only fields named in the provided Set<String>. */ @@ -62,13 +62,16 @@ @Override public void stringField(FieldInfo fieldInfo, String value) throws IOException { + /* final FieldType ft = new FieldType(TextField.TYPE_STORED); ft.setStoreTermVectors(fieldInfo.storeTermVector); ft.setStoreTermVectors(fieldInfo.storeTermVector); ft.setIndexed(fieldInfo.isIndexed); ft.setOmitNorms(fieldInfo.omitNorms); ft.setIndexOptions(fieldInfo.indexOptions); - doc.add(new Field(fieldInfo.name, value, ft)); + */ + doc.add(new StoredField(fieldInfo.name, value)); + //doc.add(new Field(fieldInfo.name, value, ft)); } @Override @@ -96,7 +99,7 @@ return fieldsToAdd == null || fieldsToAdd.contains(fieldInfo.name) ? Status.YES : Status.NO; } - public Document getDocument() { + public StoredDocument getDocument() { return doc; } } Index: lucene/core/src/java/org/apache/lucene/document/Field.java =================================================================== --- lucene/core/src/java/org/apache/lucene/document/Field.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/document/Field.java (working copy) @@ -31,6 +31,7 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.index.Norm; // javadocs +import org.apache.lucene.index.StorableField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.index.FieldInvertState; // javadocs Index: lucene/core/src/java/org/apache/lucene/document/StoredDocument.java =================================================================== --- lucene/core/src/java/org/apache/lucene/document/StoredDocument.java (revision 0) +++ lucene/core/src/java/org/apache/lucene/document/StoredDocument.java (working copy) @@ -0,0 +1,77 @@ +package org.apache.lucene.document; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.StorableField; +import org.apache.lucene.search.IndexSearcher; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class StoredDocument implements Iterable{ + + private final List fields = new ArrayList(); + + + public final void add(StorableField field) { + fields.add(field); + } + + public StorableField[] getFields(String name) { + List result = new ArrayList(); + for (StorableField field : fields) { + if (field.name().equals(name)) { + result.add(field); + } + } + + return result.toArray(new StorableField[result.size()]); + } + + public final StorableField getField(String name) { + for (StorableField field : fields) { + if (field.name().equals(name)) { + return field; + } + } + return null; + } + + public final void removeField(String name) { + Iterator it = fields.iterator(); + while (it.hasNext()) { + StorableField field = it.next(); + if (field.name().equals(name)) { + it.remove(); + return; + } + } + } + + public final List getFields() { + return fields; + } + + @Override + public Iterator iterator() { + return this.fields.iterator(); + } +} Index: lucene/core/src/java/org/apache/lucene/document/StoredField.java =================================================================== --- lucene/core/src/java/org/apache/lucene/document/StoredField.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/document/StoredField.java (working copy) @@ -1,6 +1,7 @@ package org.apache.lucene.document; import org.apache.lucene.index.IndexReader; // javadocs +import org.apache.lucene.index.StorableField; import org.apache.lucene.search.IndexSearcher; // javadocs import org.apache.lucene.util.BytesRef; @@ -24,7 +25,7 @@ /** A field whose value is stored so that {@link * IndexSearcher#doc} and {@link IndexReader#document} will * return the field and its value. */ -public final class StoredField extends Field { +public final class StoredField extends Field implements StorableField { public final static FieldType TYPE; static { Index: lucene/core/src/java/org/apache/lucene/index/CheckIndex.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/CheckIndex.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/index/CheckIndex.java (working copy) @@ -31,6 +31,7 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldType; // for javadocs +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.index.DocValues.SortedSource; import org.apache.lucene.index.DocValues.Source; import org.apache.lucene.search.DocIdSetIterator; @@ -1199,7 +1200,7 @@ for (int j = 0; j < info.docCount; ++j) { // Intentionally pull even deleted documents to // make sure they too are not corrupt: - Document doc = reader.document(j); + StoredDocument doc = reader.document(j); if (liveDocs == null || liveDocs.get(j)) { status.docCount++; status.totFields += doc.getFields().size(); Index: lucene/core/src/java/org/apache/lucene/index/IndexableField.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/IndexableField.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/index/IndexableField.java (working copy) @@ -33,30 +33,12 @@ * * @lucene.experimental */ -public interface IndexableField { +public interface IndexableField extends StorableField { - /** Field name */ - public String name(); - /** {@link IndexableFieldType} describing the properties * of this field. */ public IndexableFieldType fieldType(); - - /** Field boost (you must pre-multiply in any doc boost). */ - public float boost(); - /** Non-null if this field has a binary value */ - public BytesRef binaryValue(); - - /** Non-null if this field has a string value */ - public String stringValue(); - - /** Non-null if this field has a Reader value */ - public Reader readerValue(); - - /** Non-null if this field has a numeric value */ - public Number numericValue(); - /** * Creates the TokenStream used for indexing this field. If appropriate, * implementations should use the given Analyzer to create the TokenStreams. Index: lucene/core/src/java/org/apache/lucene/index/IndexReader.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/IndexReader.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/index/IndexReader.java (working copy) @@ -27,116 +27,130 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.DocumentStoredFieldVisitor; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.search.SearcherManager; // javadocs import org.apache.lucene.store.*; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.ReaderUtil; // for javadocs -/** IndexReader is an abstract class, providing an interface for accessing an - index. Search of an index is done entirely through this abstract interface, - so that any subclass which implements it is searchable. - -

    There are two different types of IndexReaders: -

      -
    • {@link AtomicReader}: These indexes do not consist of several sub-readers, - they are atomic. They support retrieval of stored fields, doc values, terms, - and postings. -
    • {@link CompositeReader}: Instances (like {@link DirectoryReader}) - of this reader can only - be used to get stored fields from the underlying AtomicReaders, - but it is not possible to directly retrieve postings. To do that, get - the sub-readers via {@link CompositeReader#getSequentialSubReaders}. - Alternatively, you can mimic an {@link AtomicReader} (with a serious slowdown), - by wrapping composite readers with {@link SlowCompositeReaderWrapper}. -
    - -

    IndexReader instances for indexes on disk are usually constructed - with a call to one of the static DirectoryReader,open() methods, - e.g. {@link DirectoryReader#open(Directory)}. {@link DirectoryReader} implements - the {@link CompositeReader} interface, it is not possible to directly get postings. - -

    For efficiency, in this API documents are often referred to via - document numbers, non-negative integers which each name a unique - document in the index. These document numbers are ephemeral -- they may change - as documents are added to and deleted from an index. Clients should thus not - rely on a given document having the same number between sessions. - -

    -

    NOTE: {@link - IndexReader} instances are completely thread - safe, meaning multiple threads can call any of its methods, - concurrently. If your application requires external - synchronization, you should not synchronize on the - IndexReader instance; use your own - (non-Lucene) objects instead. -*/ +/** + * IndexReader is an abstract class, providing an interface for accessing an + * index. Search of an index is done entirely through this abstract interface, + * so that any subclass which implements it is searchable. + * + *

    + * There are two different types of IndexReaders: + *

      + *
    • {@link AtomicReader}: These indexes do not consist of several + * sub-readers, they are atomic. They support retrieval of stored fields, doc + * values, terms, and postings. + *
    • {@link CompositeReader}: Instances (like {@link DirectoryReader}) of this + * reader can only be used to get stored fields from the underlying + * AtomicReaders, but it is not possible to directly retrieve postings. To do + * that, get the sub-readers via {@link CompositeReader#getSequentialSubReaders} + * . Alternatively, you can mimic an {@link AtomicReader} (with a serious + * slowdown), by wrapping composite readers with + * {@link SlowCompositeReaderWrapper}. + *
    + * + *

    + * IndexReader instances for indexes on disk are usually constructed with a call + * to one of the static DirectoryReader,open() methods, e.g. + * {@link DirectoryReader#open(Directory)}. {@link DirectoryReader} implements + * the {@link CompositeReader} interface, it is not possible to directly get + * postings. + * + *

    + * For efficiency, in this API documents are often referred to via document + * numbers, non-negative integers which each name a unique document in the + * index. These document numbers are ephemeral -- they may change as documents + * are added to and deleted from an index. Clients should thus not rely on a + * given document having the same number between sessions. + * + *

    + * + *

    + * NOTE: {@link IndexReader} instances are completely thread safe, + * meaning multiple threads can call any of its methods, concurrently. If your + * application requires external synchronization, you should not + * synchronize on the IndexReader instance; use your own + * (non-Lucene) objects instead. + */ public abstract class IndexReader implements Closeable { private boolean closed = false; private boolean closedByChild = false; private final AtomicInteger refCount = new AtomicInteger(1); - + IndexReader() { - if (!(this instanceof CompositeReader || this instanceof AtomicReader)) - throw new Error("IndexReader should never be directly extended, subclass AtomicReader or CompositeReader instead."); + if (!(this instanceof CompositeReader || this instanceof AtomicReader)) throw new Error( + "IndexReader should never be directly extended, subclass AtomicReader or CompositeReader instead."); } /** - * A custom listener that's invoked when the IndexReader - * is closed. - * + * A custom listener that's invoked when the IndexReader is closed. + * * @lucene.experimental */ public static interface ReaderClosedListener { public void onClose(IndexReader reader); } - - private final Set readerClosedListeners = - Collections.synchronizedSet(new LinkedHashSet()); - - private final Set parentReaders = - Collections.synchronizedSet(Collections.newSetFromMap(new WeakHashMap())); - - /** Expert: adds a {@link ReaderClosedListener}. The - * provided listener will be invoked when this reader is closed. - * - * @lucene.experimental */ + + private final Set readerClosedListeners = Collections + .synchronizedSet(new LinkedHashSet()); + + private final Set parentReaders = Collections + .synchronizedSet(Collections + .newSetFromMap(new WeakHashMap())); + + /** + * Expert: adds a {@link ReaderClosedListener}. The provided listener will be + * invoked when this reader is closed. + * + * @lucene.experimental + */ public final void addReaderClosedListener(ReaderClosedListener listener) { ensureOpen(); readerClosedListeners.add(listener); } - - /** Expert: remove a previously added {@link ReaderClosedListener}. - * - * @lucene.experimental */ + + /** + * Expert: remove a previously added {@link ReaderClosedListener}. + * + * @lucene.experimental + */ public final void removeReaderClosedListener(ReaderClosedListener listener) { ensureOpen(); readerClosedListeners.remove(listener); } - /** Expert: This method is called by {@code IndexReader}s which wrap other readers - * (e.g. {@link CompositeReader} or {@link FilterAtomicReader}) to register the parent - * at the child (this reader) on construction of the parent. When this reader is closed, - * it will mark all registered parents as closed, too. The references to parent readers - * are weak only, so they can be GCed once they are no longer in use. - * @lucene.experimental */ + /** + * Expert: This method is called by {@code IndexReader}s which wrap other + * readers (e.g. {@link CompositeReader} or {@link FilterAtomicReader}) to + * register the parent at the child (this reader) on construction of the + * parent. When this reader is closed, it will mark all registered parents as + * closed, too. The references to parent readers are weak only, so they can be + * GCed once they are no longer in use. + * + * @lucene.experimental + */ public final void registerParentReader(IndexReader reader) { ensureOpen(); parentReaders.add(reader); } - + private void notifyReaderClosedListeners() { - synchronized(readerClosedListeners) { - for(ReaderClosedListener listener : readerClosedListeners) { + synchronized (readerClosedListeners) { + for (ReaderClosedListener listener : readerClosedListeners) { listener.onClose(this); } } } - + private void reportCloseToParentReaders() { - synchronized(parentReaders) { - for(IndexReader parent : parentReaders) { + synchronized (parentReaders) { + for (IndexReader parent : parentReaders) { parent.closedByChild = true; // cross memory barrier by a fake write: parent.refCount.addAndGet(0); @@ -145,7 +159,7 @@ } } } - + /** Expert: returns the current refCount for this reader */ public final int getRefCount() { // NOTE: don't ensureOpen, so that callers can see @@ -154,17 +168,14 @@ } /** - * Expert: increments the refCount of this IndexReader - * instance. RefCounts are used to determine when a - * reader can be closed safely, i.e. as soon as there are - * no more references. Be sure to always call a - * corresponding {@link #decRef}, in a finally clause; - * otherwise the reader may never be closed. Note that - * {@link #close} simply calls decRef(), which means that - * the IndexReader will not really be closed until {@link - * #decRef} has been called for all outstanding - * references. - * + * Expert: increments the refCount of this IndexReader instance. RefCounts are + * used to determine when a reader can be closed safely, i.e. as soon as there + * are no more references. Be sure to always call a corresponding + * {@link #decRef}, in a finally clause; otherwise the reader may never be + * closed. Note that {@link #close} simply calls decRef(), which means that + * the IndexReader will not really be closed until {@link #decRef} has been + * called for all outstanding references. + * * @see #decRef * @see #tryIncRef */ @@ -174,46 +185,41 @@ } /** - * Expert: increments the refCount of this IndexReader - * instance only if the IndexReader has not been closed yet - * and returns true iff the refCount was - * successfully incremented, otherwise false. - * If this method returns false the reader is either - * already closed or is currently been closed. Either way this - * reader instance shouldn't be used by an application unless - * true is returned. + * Expert: increments the refCount of this IndexReader instance only if the + * IndexReader has not been closed yet and returns true iff the + * refCount was successfully incremented, otherwise false. If + * this method returns false the reader is either already closed + * or is currently been closed. Either way this reader instance shouldn't be + * used by an application unless true is returned. *

    - * RefCounts are used to determine when a - * reader can be closed safely, i.e. as soon as there are - * no more references. Be sure to always call a - * corresponding {@link #decRef}, in a finally clause; - * otherwise the reader may never be closed. Note that - * {@link #close} simply calls decRef(), which means that - * the IndexReader will not really be closed until {@link - * #decRef} has been called for all outstanding - * references. - * + * RefCounts are used to determine when a reader can be closed safely, i.e. as + * soon as there are no more references. Be sure to always call a + * corresponding {@link #decRef}, in a finally clause; otherwise the reader + * may never be closed. Note that {@link #close} simply calls decRef(), which + * means that the IndexReader will not really be closed until {@link #decRef} + * has been called for all outstanding references. + * * @see #decRef * @see #incRef */ public final boolean tryIncRef() { int count; while ((count = refCount.get()) > 0) { - if (refCount.compareAndSet(count, count+1)) { + if (refCount.compareAndSet(count, count + 1)) { return true; } } return false; } - + /** - * Expert: decreases the refCount of this IndexReader - * instance. If the refCount drops to 0, then this - * reader is closed. If an exception is hit, the refCount - * is unchanged. - * - * @throws IOException in case an IOException occurs in doClose() - * + * Expert: decreases the refCount of this IndexReader instance. If the + * refCount drops to 0, then this reader is closed. If an exception is hit, + * the refCount is unchanged. + * + * @throws IOException + * in case an IOException occurs in doClose() + * * @see #incRef */ public final void decRef() throws IOException { @@ -238,28 +244,34 @@ reportCloseToParentReaders(); notifyReaderClosedListeners(); } else if (rc < 0) { - throw new IllegalStateException("too many decRef calls: refCount is " + rc + " after decrement"); + throw new IllegalStateException("too many decRef calls: refCount is " + + rc + " after decrement"); } } /** - * @throws AlreadyClosedException if this IndexReader is closed + * @throws AlreadyClosedException + * if this IndexReader is closed */ protected final void ensureOpen() throws AlreadyClosedException { if (refCount.get() <= 0) { throw new AlreadyClosedException("this IndexReader is closed"); } - // the happens before rule on reading the refCount, which must be after the fake write, + // the happens before rule on reading the refCount, which must be after the + // fake write, // ensures that we see the value: if (closedByChild) { - throw new AlreadyClosedException("this IndexReader cannot be used anymore as one of its child readers was closed"); + throw new AlreadyClosedException( + "this IndexReader cannot be used anymore as one of its child readers was closed"); } } - /** {@inheritDoc} - *

    For caching purposes, {@code IndexReader} subclasses are not allowed - * to implement equals/hashCode, so methods are declared final. - * To lookup instances from caches use {@link #getCoreCacheKey} and + /** + * {@inheritDoc} + *

    + * For caching purposes, {@code IndexReader} subclasses are not allowed to + * implement equals/hashCode, so methods are declared final. To lookup + * instances from caches use {@link #getCoreCacheKey} and * {@link #getCombinedCoreAndDeletesKey}. */ @Override @@ -267,10 +279,12 @@ return (this == obj); } - /** {@inheritDoc} - *

    For caching purposes, {@code IndexReader} subclasses are not allowed - * to implement equals/hashCode, so methods are declared final. - * To lookup instances from caches use {@link #getCoreCacheKey} and + /** + * {@inheritDoc} + *

    + * For caching purposes, {@code IndexReader} subclasses are not allowed to + * implement equals/hashCode, so methods are declared final. To lookup + * instances from caches use {@link #getCoreCacheKey} and * {@link #getCombinedCoreAndDeletesKey}. */ @Override @@ -278,188 +292,222 @@ return System.identityHashCode(this); } - /** Returns a IndexReader reading the index in the given - * Directory - * @param directory the index directory - * @throws CorruptIndexException if the index is corrupt - * @throws IOException if there is a low-level IO error + /** + * Returns a IndexReader reading the index in the given Directory + * + * @param directory + * the index directory + * @throws CorruptIndexException + * if the index is corrupt + * @throws IOException + * if there is a low-level IO error * @deprecated Use {@link DirectoryReader#open(Directory)} */ @Deprecated - public static DirectoryReader open(final Directory directory) throws CorruptIndexException, IOException { + public static DirectoryReader open(final Directory directory) + throws CorruptIndexException, IOException { return DirectoryReader.open(directory); } - /** Expert: Returns a IndexReader reading the index in the given - * Directory with the given termInfosIndexDivisor. - * @param directory the index directory - * @param termInfosIndexDivisor Subsamples which indexed - * terms are loaded into RAM. This has the same effect as {@link - * IndexWriterConfig#setTermIndexInterval} except that setting - * must be done at indexing time while this setting can be - * set per reader. When set to N, then one in every - * N*termIndexInterval terms in the index is loaded into - * memory. By setting this to a value > 1 you can reduce - * memory usage, at the expense of higher latency when - * loading a TermInfo. The default value is 1. Set this - * to -1 to skip loading the terms index entirely. - * @throws CorruptIndexException if the index is corrupt - * @throws IOException if there is a low-level IO error + /** + * Expert: Returns a IndexReader reading the index in the given Directory with + * the given termInfosIndexDivisor. + * + * @param directory + * the index directory + * @param termInfosIndexDivisor + * Subsamples which indexed terms are loaded into RAM. This has the + * same effect as {@link IndexWriterConfig#setTermIndexInterval} + * except that setting must be done at indexing time while this + * setting can be set per reader. When set to N, then one in every + * N*termIndexInterval terms in the index is loaded into memory. By + * setting this to a value > 1 you can reduce memory usage, at the + * expense of higher latency when loading a TermInfo. The default + * value is 1. Set this to -1 to skip loading the terms index + * entirely. + * @throws CorruptIndexException + * if the index is corrupt + * @throws IOException + * if there is a low-level IO error * @deprecated Use {@link DirectoryReader#open(Directory,int)} */ @Deprecated - public static DirectoryReader open(final Directory directory, int termInfosIndexDivisor) throws CorruptIndexException, IOException { + public static DirectoryReader open(final Directory directory, + int termInfosIndexDivisor) throws CorruptIndexException, IOException { return DirectoryReader.open(directory, termInfosIndexDivisor); } /** - * Open a near real time IndexReader from the {@link org.apache.lucene.index.IndexWriter}. - * - * @param writer The IndexWriter to open from - * @param applyAllDeletes If true, all buffered deletes will - * be applied (made visible) in the returned reader. If - * false, the deletes are not applied but remain buffered - * (in IndexWriter) so that they will be applied in the - * future. Applying deletes can be costly, so if your app - * can tolerate deleted documents being returned you might - * gain some performance by passing false. + * Open a near real time IndexReader from the + * {@link org.apache.lucene.index.IndexWriter}. + * + * @param writer + * The IndexWriter to open from + * @param applyAllDeletes + * If true, all buffered deletes will be applied (made visible) in + * the returned reader. If false, the deletes are not applied but + * remain buffered (in IndexWriter) so that they will be applied in + * the future. Applying deletes can be costly, so if your app can + * tolerate deleted documents being returned you might gain some + * performance by passing false. * @return The new IndexReader * @throws CorruptIndexException - * @throws IOException if there is a low-level IO error - * + * @throws IOException + * if there is a low-level IO error + * * @see DirectoryReader#openIfChanged(DirectoryReader,IndexWriter,boolean) - * + * * @lucene.experimental * @deprecated Use {@link DirectoryReader#open(IndexWriter,boolean)} */ @Deprecated - public static DirectoryReader open(final IndexWriter writer, boolean applyAllDeletes) throws CorruptIndexException, IOException { + public static DirectoryReader open(final IndexWriter writer, + boolean applyAllDeletes) throws CorruptIndexException, IOException { return DirectoryReader.open(writer, applyAllDeletes); } - - /** Expert: returns an IndexReader reading the index in the given - * {@link IndexCommit}. - * @param commit the commit point to open - * @throws CorruptIndexException if the index is corrupt - * @throws IOException if there is a low-level IO error + + /** + * Expert: returns an IndexReader reading the index in the given + * {@link IndexCommit}. + * + * @param commit + * the commit point to open + * @throws CorruptIndexException + * if the index is corrupt + * @throws IOException + * if there is a low-level IO error * @deprecated Use {@link DirectoryReader#open(IndexCommit)} */ @Deprecated - public static DirectoryReader open(final IndexCommit commit) throws CorruptIndexException, IOException { + public static DirectoryReader open(final IndexCommit commit) + throws CorruptIndexException, IOException { return DirectoryReader.open(commit); } - - - /** Expert: returns an IndexReader reading the index in the given - * {@link IndexCommit} and termInfosIndexDivisor. - * @param commit the commit point to open - * @param termInfosIndexDivisor Subsamples which indexed - * terms are loaded into RAM. This has the same effect as {@link - * IndexWriterConfig#setTermIndexInterval} except that setting - * must be done at indexing time while this setting can be - * set per reader. When set to N, then one in every - * N*termIndexInterval terms in the index is loaded into - * memory. By setting this to a value > 1 you can reduce - * memory usage, at the expense of higher latency when - * loading a TermInfo. The default value is 1. Set this - * to -1 to skip loading the terms index entirely. - * @throws CorruptIndexException if the index is corrupt - * @throws IOException if there is a low-level IO error + + /** + * Expert: returns an IndexReader reading the index in the given + * {@link IndexCommit} and termInfosIndexDivisor. + * + * @param commit + * the commit point to open + * @param termInfosIndexDivisor + * Subsamples which indexed terms are loaded into RAM. This has the + * same effect as {@link IndexWriterConfig#setTermIndexInterval} + * except that setting must be done at indexing time while this + * setting can be set per reader. When set to N, then one in every + * N*termIndexInterval terms in the index is loaded into memory. By + * setting this to a value > 1 you can reduce memory usage, at the + * expense of higher latency when loading a TermInfo. The default + * value is 1. Set this to -1 to skip loading the terms index + * entirely. + * @throws CorruptIndexException + * if the index is corrupt + * @throws IOException + * if there is a low-level IO error * @deprecated Use {@link DirectoryReader#open(IndexCommit,int)} */ @Deprecated - public static DirectoryReader open(final IndexCommit commit, int termInfosIndexDivisor) throws CorruptIndexException, IOException { + public static DirectoryReader open(final IndexCommit commit, + int termInfosIndexDivisor) throws CorruptIndexException, IOException { return DirectoryReader.open(commit, termInfosIndexDivisor); } - - /** Retrieve term vectors for this document, or null if - * term vectors were not indexed. The returned Fields - * instance acts like a single-document inverted index - * (the docID will be 0). */ - public abstract Fields getTermVectors(int docID) - throws IOException; - - /** Retrieve term vector for this document and field, or - * null if term vectors were not indexed. The returned - * Fields instance acts like a single-document inverted - * index (the docID will be 0). */ - public final Terms getTermVector(int docID, String field) - throws IOException { + + /** + * Retrieve term vectors for this document, or null if term vectors were not + * indexed. The returned Fields instance acts like a single-document inverted + * index (the docID will be 0). + */ + public abstract Fields getTermVectors(int docID) throws IOException; + + /** + * Retrieve term vector for this document and field, or null if term vectors + * were not indexed. The returned Fields instance acts like a single-document + * inverted index (the docID will be 0). + */ + public final Terms getTermVector(int docID, String field) throws IOException { Fields vectors = getTermVectors(docID); if (vectors == null) { return null; } return vectors.terms(field); } - + /** Returns the number of documents in this index. */ public abstract int numDocs(); - - /** Returns one greater than the largest possible document number. - * This may be used to, e.g., determine how big to allocate an array which - * will have an element for every document number in an index. + + /** + * Returns one greater than the largest possible document number. This may be + * used to, e.g., determine how big to allocate an array which will have an + * element for every document number in an index. */ public abstract int maxDoc(); - + /** Returns the number of deleted documents. */ public final int numDeletedDocs() { return maxDoc() - numDocs(); } - - /** Expert: visits the fields of a stored document, for - * custom processing/loading of each field. If you - * simply want to load all fields, use {@link - * #document(int)}. If you want to load a subset, use - * {@link DocumentStoredFieldVisitor}. */ - public abstract void document(int docID, StoredFieldVisitor visitor) throws CorruptIndexException, IOException; /** + * Expert: visits the fields of a stored document, for custom + * processing/loading of each field. If you simply want to load all fields, + * use {@link #document(int)}. If you want to load a subset, use + * {@link DocumentStoredFieldVisitor}. + */ + public abstract void document(int docID, StoredFieldVisitor visitor) + throws CorruptIndexException, IOException; + + /** * Returns the stored fields of the nth - * Document in this index. This is just - * sugar for using {@link DocumentStoredFieldVisitor}. + * Document in this index. This is just sugar for using + * {@link DocumentStoredFieldVisitor}. *

    * NOTE: for performance reasons, this method does not check if the * requested document is deleted, and therefore asking for a deleted document * may yield unspecified results. Usually this is not required, however you - * can test if the doc is deleted by checking the {@link - * Bits} returned from {@link MultiFields#getLiveDocs}. - * - * NOTE: only the content of a field is returned, - * if that field was stored during indexing. Metadata - * like boost, omitNorm, IndexOptions, tokenized, etc., - * are not preserved. + * can test if the doc is deleted by checking the {@link Bits} returned from + * {@link MultiFields#getLiveDocs}. * - * @throws CorruptIndexException if the index is corrupt - * @throws IOException if there is a low-level IO error + * NOTE: only the content of a field is returned, if that field was + * stored during indexing. Metadata like boost, omitNorm, IndexOptions, + * tokenized, etc., are not preserved. + * + * @throws CorruptIndexException + * if the index is corrupt + * @throws IOException + * if there is a low-level IO error */ // TODO: we need a separate StoredField, so that the // Document returned here contains that class not // IndexableField - public final Document document(int docID) throws CorruptIndexException, IOException { + public final StoredDocument document(int docID) throws CorruptIndexException, + IOException { final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(); document(docID, visitor); return visitor.getDocument(); } - + /** - * Like {@link #document(int)} but only loads the specified - * fields. Note that this is simply sugar for {@link - * DocumentStoredFieldVisitor#DocumentStoredFieldVisitor(Set)}. + * Like {@link #document(int)} but only loads the specified fields. Note that + * this is simply sugar for + * {@link DocumentStoredFieldVisitor#DocumentStoredFieldVisitor(Set)}. */ - public final Document document(int docID, Set fieldsToLoad) throws CorruptIndexException, IOException { - final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(fieldsToLoad); + public final StoredDocument document(int docID, Set fieldsToLoad) + throws CorruptIndexException, IOException { + final DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor( + fieldsToLoad); document(docID, visitor); return visitor.getDocument(); } - + /** Returns true if any documents have been deleted */ public abstract boolean hasDeletions(); - + /** - * Closes files associated with this index. - * Also saves any new deletions to disk. - * No other methods should be called after this has been called. - * @throws IOException if there is a low-level IO error + * Closes files associated with this index. Also saves any new deletions to + * disk. No other methods should be called after this has been called. + * + * @throws IOException + * if there is a low-level IO error */ public final synchronized void close() throws IOException { if (!closed) { @@ -470,59 +518,65 @@ /** Implements close. */ protected abstract void doClose() throws IOException; - + /** * Expert: Returns a the root {@link IndexReaderContext} for this * {@link IndexReader}'s sub-reader tree. Iff this reader is composed of sub * readers ,ie. this reader being a composite reader, this method returns a - * {@link CompositeReaderContext} holding the reader's direct children as well as a - * view of the reader tree's atomic leaf contexts. All sub- + * {@link CompositeReaderContext} holding the reader's direct children as well + * as a view of the reader tree's atomic leaf contexts. All sub- * {@link IndexReaderContext} instances referenced from this readers top-level * context are private to this reader and are not shared with another context * tree. For example, IndexSearcher uses this API to drive searching by one * atomic leaf reader at a time. If this reader is not composed of child * readers, this method returns an {@link AtomicReaderContext}. *

    - * Note: Any of the sub-{@link CompositeReaderContext} instances reference from this - * top-level context holds a null {@link CompositeReaderContext#leaves()} - * reference. Only the top-level context maintains the convenience leaf-view - * for performance reasons. + * Note: Any of the sub-{@link CompositeReaderContext} instances reference + * from this top-level context holds a null + * {@link CompositeReaderContext#leaves()} reference. Only the top-level + * context maintains the convenience leaf-view for performance reasons. * * @lucene.experimental */ public abstract IndexReaderContext getTopReaderContext(); - - /** Expert: Returns a key for this IndexReader, so FieldCache/CachingWrapperFilter can find - * it again. - * This key must not have equals()/hashCode() methods, so "equals" means "identical". */ + + /** + * Expert: Returns a key for this IndexReader, so + * FieldCache/CachingWrapperFilter can find it again. This key must not have + * equals()/hashCode() methods, so "equals" means + * "identical". + */ public Object getCoreCacheKey() { // Don't can ensureOpen since FC calls this (to evict) // on close return this; } - - /** Expert: Returns a key for this IndexReader that also includes deletions, - * so FieldCache/CachingWrapperFilter can find it again. - * This key must not have equals()/hashCode() methods, so "equals" means "identical". */ + + /** + * Expert: Returns a key for this IndexReader that also includes deletions, so + * FieldCache/CachingWrapperFilter can find it again. This key must not have + * equals()/hashCode() methods, so "equals" means + * "identical". + */ public Object getCombinedCoreAndDeletesKey() { // Don't can ensureOpen since FC calls this (to evict) // on close return this; } - /** Returns the number of documents containing the - * term. This method returns 0 if the term or - * field does not exists. This method does not take into - * account deleted documents that have not yet been merged - * away. */ + /** + * Returns the number of documents containing the term. This + * method returns 0 if the term or field does not exists. This method does not + * take into account deleted documents that have not yet been merged away. + */ public final int docFreq(Term term) throws IOException { return docFreq(term.field(), term.bytes()); } - - /** Returns the number of documents containing the - * term. This method returns 0 if the term or - * field does not exists. This method does not take into - * account deleted documents that have not yet been merged - * away. */ + + /** + * Returns the number of documents containing the term. This + * method returns 0 if the term or field does not exists. This method does not + * take into account deleted documents that have not yet been merged away. + */ public abstract int docFreq(String field, BytesRef term) throws IOException; } Index: lucene/core/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/index/PersistentSnapshotDeletionPolicy.java (working copy) @@ -26,6 +26,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockObtainFailedException; @@ -68,12 +69,12 @@ int numDocs = r.numDocs(); // index is allowed to have exactly one document or 0. if (numDocs == 1) { - Document doc = r.document(r.maxDoc() - 1); + StoredDocument doc = r.document(r.maxDoc() - 1); if (doc.getField(SNAPSHOTS_ID) == null) { throw new IllegalStateException("directory is not a valid snapshots store!"); } doc.removeField(SNAPSHOTS_ID); - for (IndexableField f : doc) { + for (StorableField f : doc) { snapshots.put(f.name(), f.stringValue()); } } else if (numDocs != 0) { Index: lucene/core/src/java/org/apache/lucene/index/StorableField.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/StorableField.java (revision 0) +++ lucene/core/src/java/org/apache/lucene/index/StorableField.java (working copy) @@ -0,0 +1,43 @@ +package org.apache.lucene.index; + +import java.io.Reader; + +import org.apache.lucene.util.BytesRef; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public interface StorableField { + + /** Field name */ + public String name(); + + /** Field boost (you must pre-multiply in any doc boost). */ + public float boost(); + + /** Non-null if this field has a binary value */ + public BytesRef binaryValue(); + + /** Non-null if this field has a string value */ + public String stringValue(); + + /** Non-null if this field has a Reader value */ + public Reader readerValue(); + + /** Non-null if this field has a numeric value */ + public Number numericValue(); +} Index: lucene/core/src/java/org/apache/lucene/index/StoredFieldsConsumer.java =================================================================== --- lucene/core/src/java/org/apache/lucene/index/StoredFieldsConsumer.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/index/StoredFieldsConsumer.java (working copy) @@ -44,12 +44,12 @@ } private int numStoredFields; - private IndexableField[] storedFields; + private StorableField[] storedFields; private FieldInfo[] fieldInfos; public void reset() { numStoredFields = 0; - storedFields = new IndexableField[1]; + storedFields = new StorableField[1]; fieldInfos = new FieldInfo[1]; } @@ -125,10 +125,10 @@ assert docWriter.writer.testPoint("StoredFieldsWriter.finishDocument end"); } - public void addField(IndexableField field, FieldInfo fieldInfo) throws IOException { + public void addField(StorableField field, FieldInfo fieldInfo) throws IOException { if (numStoredFields == storedFields.length) { int newSize = ArrayUtil.oversize(numStoredFields + 1, RamUsageEstimator.NUM_BYTES_OBJECT_REF); - IndexableField[] newArray = new IndexableField[newSize]; + StorableField[] newArray = new StorableField[newSize]; System.arraycopy(storedFields, 0, newArray, 0, numStoredFields); storedFields = newArray; Index: lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java =================================================================== --- lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java (revision 1340366) +++ lucene/core/src/java/org/apache/lucene/search/IndexSearcher.java (working copy) @@ -31,6 +31,7 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.lucene.document.Document; +import org.apache.lucene.document.StoredDocument; import org.apache.lucene.index.AtomicReaderContext; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; // javadocs @@ -180,7 +181,7 @@ } /** Sugar for .getIndexReader().document(docID) */ - public Document doc(int docID) throws CorruptIndexException, IOException { + public StoredDocument doc(int docID) throws CorruptIndexException, IOException { return reader.document(docID); } @@ -190,7 +191,7 @@ } /** Sugar for .getIndexReader().document(docID, fieldsToLoad) */ - public final Document document(int docID, Set fieldsToLoad) throws CorruptIndexException, IOException { + public final StoredDocument document(int docID, Set fieldsToLoad) throws CorruptIndexException, IOException { return reader.document(docID, fieldsToLoad); }