Index: lucene/src/java/org/apache/lucene/document/Field.java =================================================================== --- lucene/src/java/org/apache/lucene/document/Field.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/document/Field.java (working copy) @@ -20,15 +20,14 @@ import java.io.Reader; import org.apache.lucene.analysis.TokenStream; -<<<<<<< import org.apache.lucene.index.FieldInfo.IndexOptions; import org.apache.lucene.index.IndexWriter; -======= import org.apache.lucene.document.NumericField; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.values.PerDocFieldValues; +import org.apache.lucene.index.values.ValueType; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.StringHelper; ->>>>>>> /** * A field is a section of a Document. Each field has two parts, a name and a @@ -50,6 +49,7 @@ // length/offset for all primitive types protected int binaryLength; protected int binaryOffset; + protected PerDocFieldValues docValues; protected float boost = 1.0f; @@ -64,7 +64,7 @@ if (reader == null) throw new NullPointerException("reader cannot be null"); - this.name = StringHelper.intern(name); // field names are interned + this.name = name; // field names are interned this.fieldsData = reader; this.type = type; } @@ -75,7 +75,7 @@ if (tokenStream == null) throw new NullPointerException("tokenStream cannot be null"); - this.name = StringHelper.intern(name); // field names are interned + this.name = name; // field names are interned this.fieldsData = null; this.tokenStream = tokenStream; this.type = type; @@ -91,7 +91,7 @@ this.type = type; this.binaryOffset = offset; this.binaryLength = length; - this.name = StringHelper.intern(name); + this.name = name; } public Field(String name, FieldType type, String value) { @@ -117,9 +117,6 @@ this.type = type; this.name = name; this.fieldsData = value; - - if (internName) // field names are optionally interned - name = StringHelper.intern(name); } public boolean isNumeric() { @@ -237,63 +234,6 @@ return name; } -<<<<<<< - /** - * Create a field by specifying its name, value and how it will - * be saved in the index. - * - * @param name The name of the field - * @param value The string to process - * @param store Whether value should be stored in the index - * @param index Whether the field should be indexed, and if so, if it should - * be tokenized before indexing - * @param termVector Whether term vector should be stored - * @throws NullPointerException if name or value is null - * @throws IllegalArgumentException in any of the following situations: - * - */ - public Field(String name, String value, Store store, Index index, TermVector termVector) { - if (name == null) - throw new NullPointerException("name cannot be null"); - if (value == null) - throw new NullPointerException("value cannot be null"); - if (name.length() == 0 && value.length() == 0) - throw new IllegalArgumentException("name and value cannot both be empty"); - if (index == Index.NO && store == Store.NO) - throw new IllegalArgumentException("it doesn't make sense to have a field that " - + "is neither indexed nor stored"); - if (index == Index.NO && termVector != TermVector.NO) - throw new IllegalArgumentException("cannot store term vector information " - + "for a field that is not indexed"); - - this.name = name; - - this.fieldsData = value; - - this.isStored = store.isStored(); - - this.isIndexed = index.isIndexed(); - this.isTokenized = index.isAnalyzed(); - this.omitNorms = index.omitNorms(); - if (index == Index.NO) { - // note: now this reads even wierder than before - this.indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS; - } - - this.isBinary = false; - - setStoreTermVector(termVector); - } - - /** - * Create a tokenized and indexed field that is not stored. Term vectors will - * not be stored. The Reader is read only when the Document is added to the index, - * i.e. you may not close the Reader until {@link IndexWriter#addDocument(Document)} - * has been called. -======= public float boost() { return boost; } @@ -310,7 +250,6 @@ * {@link org.apache.lucene.search.Similarity#encodeNormValue(float)} before * it is stored in the index. One should attempt to ensure that this product * does not overflow the range of that encoding. ->>>>>>> * * @see org.apache.lucene.search.Similarity#computeNorm(FieldInvertState) * @see org.apache.lucene.search.Similarity#encodeNormValue(float) @@ -323,83 +262,20 @@ return false; } -<<<<<<< - /** - * Create a tokenized and indexed field that is not stored, optionally with - * storing term vectors. The Reader is read only when the Document is added to the index, - * i.e. you may not close the Reader until {@link IndexWriter#addDocument(Document)} - * has been called. - * - * @param name The name of the field - * @param reader The reader with the content - * @param termVector Whether term vector should be stored - * @throws NullPointerException if name or reader is null - */ - public Field(String name, Reader reader, TermVector termVector) { - if (name == null) - throw new NullPointerException("name cannot be null"); - if (reader == null) - throw new NullPointerException("reader cannot be null"); - - this.name = name; - this.fieldsData = reader; - - this.isStored = false; - - this.isIndexed = true; - this.isTokenized = true; - - this.isBinary = false; - - setStoreTermVector(termVector); -======= public Number numericValue() { return null; ->>>>>>> } public NumericField.DataType numericDataType() { return null; } -<<<<<<< - /** - * Create a tokenized and indexed field that is not stored, optionally with - * storing term vectors. This is useful for pre-analyzed fields. - * The TokenStream is read only when the Document is added to the index, - * i.e. you may not close the TokenStream until {@link IndexWriter#addDocument(Document)} - * has been called. - * - * @param name The name of the field - * @param tokenStream The TokenStream with the content - * @param termVector Whether term vector should be stored - * @throws NullPointerException if name or tokenStream is null - */ - public Field(String name, TokenStream tokenStream, TermVector termVector) { - if (name == null) - throw new NullPointerException("name cannot be null"); - if (tokenStream == null) - throw new NullPointerException("tokenStream cannot be null"); - - this.name = name; - this.fieldsData = null; - this.tokenStream = tokenStream; - - this.isStored = false; - - this.isIndexed = true; - this.isTokenized = true; - - this.isBinary = false; - - setStoreTermVector(termVector); -======= private byte[] getBinaryValue(byte[] result /* unused */) { if (isBinary || fieldsData instanceof byte[]) return (byte[]) fieldsData; else return null; } - private byte[] getBinaryValue() { + protected byte[] getBinaryValue() { return getBinaryValue(null); } @@ -417,7 +293,6 @@ } else { return null; } ->>>>>>> } /** @@ -426,7 +301,7 @@ * * @return length of byte[] segment that represents this Field value */ - private int getBinaryLength() { + protected int getBinaryLength() { if (isBinary) { return binaryLength; } else if (fieldsData instanceof byte[]) return ((byte[]) fieldsData).length; @@ -466,8 +341,8 @@ return type.omitNorms(); } - public boolean omitTermFreqAndPositions() { - return type.omitTermFreqAndPositions(); + public IndexOptions getIndexOptions() { + return type.indexOptions(); } public boolean storeTermVectors() { @@ -495,33 +370,32 @@ result.append(name); result.append(':'); -<<<<<<< - if (name == null) - throw new IllegalArgumentException("name cannot be null"); - if (value == null) - throw new IllegalArgumentException("value cannot be null"); - - this.name = name; - fieldsData = value; - - isStored = true; - isIndexed = false; - isTokenized = false; - indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS; - omitNorms = true; - - isBinary = true; - binaryLength = length; - binaryOffset = offset; - - setStoreTermVector(TermVector.NO); -======= if (fieldsData != null && type.lazy() == false) { result.append(fieldsData); } result.append('>'); return result.toString(); ->>>>>>> } + + public PerDocFieldValues getDocValues() { + return docValues; + } + + public void setDocValues(PerDocFieldValues docValues) { + this.docValues = docValues; + } + + public boolean hasDocValues() { + return docValues != null && docValues.type() != null; + } + + public ValueType docValuesType() { + return docValues == null? null : docValues.type(); + } + + public FieldType getFieldType() { + // get a copy + return new FieldType(type); + } } Index: lucene/src/java/org/apache/lucene/document/FieldType.java =================================================================== --- lucene/src/java/org/apache/lucene/document/FieldType.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/document/FieldType.java (working copy) @@ -17,6 +17,8 @@ * limitations under the License. */ +import org.apache.lucene.index.FieldInfo.IndexOptions; + public class FieldType { private boolean indexed; @@ -26,7 +28,7 @@ private boolean storeTermVectorOffsets; private boolean storeTermVectorPositions; private boolean omitNorms; - private boolean omitTermFreqsAndPositions; + private IndexOptions indexOptions = IndexOptions.DOCS_AND_FREQS_AND_POSITIONS; private boolean lazy; private boolean frozen; @@ -38,7 +40,7 @@ this.storeTermVectorOffsets = ref.storeTermVectorOffsets(); this.storeTermVectorPositions = ref.storeTermVectorPositions(); this.omitNorms = ref.omitNorms(); - this.omitTermFreqsAndPositions = ref.omitTermFreqAndPositions(); + this.indexOptions = ref.indexOptions(); this.lazy = ref.lazy(); } @@ -118,13 +120,13 @@ this.omitNorms = value; } - public boolean omitTermFreqAndPositions() { - return this.omitTermFreqsAndPositions; + public IndexOptions indexOptions() { + return this.indexOptions; } - public void setOmitTermFreqAndPositions(boolean value) { + public void setIndexOptions(IndexOptions value) { checkIfFrozen(); - this.omitTermFreqsAndPositions = value; + this.indexOptions = value; } public boolean lazy() { @@ -171,8 +173,9 @@ if (omitNorms()) { result.append(",omitNorms"); } - if (omitTermFreqAndPositions()) { - result.append(",omitTermFreqAndPositions"); + if (indexOptions != IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) { + result.append(",indexOptions="); + result.append(indexOptions); } if (lazy()){ result.append(",lazy"); Index: lucene/src/java/org/apache/lucene/document/IndexDocValuesField.java =================================================================== --- lucene/src/java/org/apache/lucene/document/IndexDocValuesField.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/document/IndexDocValuesField.java (working copy) @@ -20,9 +20,6 @@ import java.util.Comparator; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; -import org.apache.lucene.document.Field.TermVector; import org.apache.lucene.index.values.PerDocFieldValues; import org.apache.lucene.index.values.ValueType; import org.apache.lucene.util.BytesRef; @@ -73,7 +70,7 @@ * * * */ -public class IndexDocValuesField extends AbstractField implements PerDocFieldValues { +public class IndexDocValuesField extends Field implements PerDocFieldValues { protected BytesRef bytes; protected double doubleValue; @@ -85,7 +82,7 @@ * Creates a new {@link IndexDocValuesField} with the given name. */ public IndexDocValuesField(String name) { - super(name, Store.NO, Index.NO, TermVector.NO); + super(name, new FieldType()); setDocValues(this); } @@ -329,7 +326,7 @@ * returns the given field. Any modifications to this instance will be visible * to the given field. */ - public T set(T field) { + public T set(T field) { field.setDocValues(this); return field; } @@ -339,7 +336,7 @@ * given type and returns it. * */ - public static T set(T field, ValueType type) { + public static T set(T field, ValueType type) { if (field instanceof IndexDocValuesField) return field; final IndexDocValuesField valField = new IndexDocValuesField(); Index: lucene/src/java/org/apache/lucene/document/NumericField.java =================================================================== --- lucene/src/java/org/apache/lucene/document/NumericField.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/document/NumericField.java (working copy) @@ -21,11 +21,8 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.NumericTokenStream; -<<<<<<< import org.apache.lucene.index.FieldInfo.IndexOptions; -======= import org.apache.lucene.document.NumericField.DataType; ->>>>>>> import org.apache.lucene.util.NumericUtils; import org.apache.lucene.search.NumericRangeQuery; // javadocs import org.apache.lucene.search.NumericRangeFilter; // javadocs @@ -56,7 +53,6 @@ * ... * } * -<<<<<<< * *

The java native types int, long, * float and double are @@ -133,85 +129,6 @@ * class is a wrapper around this token stream type for * easier, more intuitive usage.

* -======= - * - *

- * The java native types int, long, float - * and double are directly supported. However, any value that can - * be converted into these native types can also be indexed. For example, - * date/time values represented by a {@link java.util.Date} can be translated - * into a long value using the {@link java.util.Date#getTime} method. If you - * don't need millisecond precision, you can quantize the value, either by - * dividing the result of {@link java.util.Date#getTime} or using the separate - * getters (for year, month, etc.) to construct an int or - * long value. - *

- * - *

- * To perform range querying or filtering against a NumericField, - * use {@link NumericRangeQuery} or {@link NumericRangeFilter}. To sort - * according to a NumericField, use the normal numeric sort types, - * eg {@link SortField#INT}. NumericField values can also be loaded - * directly from {@link FieldCache}. - *

- * - *

- * By default, a NumericField's value is not stored but is indexed - * for range filtering and sorting. You can use the - * {@link #NumericField(String,FieldType)} constructor if you need to - * change these defaults, and alter the default field type (set it to stored). - *

- * - *

- * You may add the same field name as a NumericField to the same - * document more than once. Range querying and filtering will be the logical OR - * of all values; so a range query will hit all documents that have at least one - * value in the range. However sort behavior is not defined. If you need to - * sort, you should separately index a single-valued NumericField. - *

- * - *

- * A NumericField will consume somewhat more disk space in the - * index than an ordinary single-valued field. However, for a typical index that - * includes substantial textual content per document, this increase will likely - * be in the noise. - *

- * - *

- * Within Lucene, each numeric value is indexed as a trie structure, - * where each term is logically assigned to larger and larger pre-defined - * brackets (which are simply lower-precision representations of the value). The - * step size between each successive bracket is called the - * precisionStep, measured in bits. Smaller - * precisionStep values result in larger number of brackets, which - * consumes more disk space in the index but may result in faster range search - * performance. The default value, 4, was selected for a reasonable tradeoff of - * disk space consumption versus performance. You can use the expert constructor - * {@link #NumericField(String,int,FieldType)} if you'd like to change - * the value. Note that you must also specify a congruent value when creating - * {@link NumericRangeQuery} or {@link NumericRangeFilter}. For low cardinality - * fields larger precision steps are good. If the cardinality is < 100, it is - * fair to use {@link Integer#MAX_VALUE}, which produces one term per value. - * - *

- * For more information on the internals of numeric trie indexing, including the - * - * precisionStep configuration, see {@link NumericRangeQuery}. - * The format of indexed values is described in {@link NumericUtils}. - * - *

- * If you only need to sort by numeric value, and never run range - * querying/filtering, you can index using a precisionStep of - * {@link Integer#MAX_VALUE}. This will minimize disk space consumed. - *

- * - *

- * More advanced users can instead use {@link NumericTokenStream} directly, when - * indexing numbers. This class is a wrapper around this token stream type for - * easier, more intuitive usage. - *

- * ->>>>>>> * @since 2.9 */ public final class NumericField extends Field { @@ -227,14 +144,14 @@ TYPE_UNSTORED.setIndexed(true); TYPE_UNSTORED.setTokenized(true); TYPE_UNSTORED.setOmitNorms(true); - TYPE_UNSTORED.setOmitTermFreqAndPositions(true); + TYPE_UNSTORED.setIndexOptions(IndexOptions.DOCS_ONLY); TYPE_UNSTORED.freeze(); TYPE_STORED.setIndexed(true); TYPE_STORED.setStored(true); TYPE_STORED.setTokenized(true); TYPE_STORED.setOmitNorms(true); - TYPE_STORED.setOmitTermFreqAndPositions(true); + TYPE_STORED.setIndexOptions(IndexOptions.DOCS_ONLY); TYPE_STORED.freeze(); } @@ -317,10 +234,6 @@ public NumericField(String name, int precisionStep, FieldType type) { super(name, type); this.precisionStep = precisionStep; -<<<<<<< - setIndexOptions(IndexOptions.DOCS_ONLY); -======= ->>>>>>> } /** Returns a {@link NumericTokenStream} for indexing the numeric value. */ Index: lucene/src/java/org/apache/lucene/document/StringField.java =================================================================== --- lucene/src/java/org/apache/lucene/document/StringField.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/document/StringField.java (working copy) @@ -1,5 +1,7 @@ package org.apache.lucene.document; +import org.apache.lucene.index.FieldInfo.IndexOptions; + /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with @@ -24,13 +26,13 @@ static { TYPE_UNSTORED.setIndexed(true); TYPE_UNSTORED.setOmitNorms(true); - TYPE_UNSTORED.setOmitTermFreqAndPositions(true); + TYPE_UNSTORED.setIndexOptions(IndexOptions.DOCS_ONLY); TYPE_UNSTORED.freeze(); TYPE_STORED.setIndexed(true); TYPE_STORED.setStored(true); TYPE_STORED.setOmitNorms(true); - TYPE_STORED.setOmitTermFreqAndPositions(true); + TYPE_STORED.setIndexOptions(IndexOptions.DOCS_ONLY); TYPE_STORED.freeze(); } Index: lucene/src/java/org/apache/lucene/index/CheckIndex.java =================================================================== --- lucene/src/java/org/apache/lucene/index/CheckIndex.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/index/CheckIndex.java (working copy) @@ -25,12 +25,9 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; -<<<<<<< -======= import org.apache.lucene.document.Document; import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.index.codecs.DefaultSegmentInfosWriter; ->>>>>>> import java.io.File; import java.io.IOException; import java.io.PrintStream; @@ -41,7 +38,6 @@ import java.util.List; import java.util.Map; -import org.apache.lucene.document.AbstractField; // for javadocs import org.apache.lucene.document.Document; import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.index.codecs.DefaultSegmentInfosWriter; @@ -193,13 +189,8 @@ int numFields; /** True if at least one of the fields in this segment -<<<<<<< * has position data - * @see AbstractField#setIndexOptions(org.apache.lucene.index.FieldInfo.IndexOptions) */ -======= - * does not omitTermFreqAndPositions. - * @see FieldType#setOmitTermFreqAndPositions */ ->>>>>>> + * @see FieldType#setIndexOptions(org.apache.lucene.index.FieldInfo.IndexOptions) */ public boolean hasProx; /** Map that includes certain Index: lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java =================================================================== --- lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java (working copy) @@ -24,19 +24,12 @@ import java.util.HashSet; import java.util.Map; -<<<<<<< import org.apache.lucene.document.Document; -import org.apache.lucene.document.Fieldable; -======= import org.apache.lucene.util.ArrayUtil; - - ->>>>>>> import org.apache.lucene.index.DocumentsWriterPerThread.DocState; import org.apache.lucene.index.codecs.Codec; import org.apache.lucene.index.codecs.PerDocConsumer; import org.apache.lucene.index.codecs.DocValuesConsumer; -import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.IOUtils; @@ -230,15 +223,9 @@ // needs to be more "pluggable" such that if I want // to have a new "thing" my Fields can do, I can // easily add it -<<<<<<< - FieldInfo fi = fieldInfos.addOrUpdate(fieldName, field.isIndexed(), field.isTermVectorStored(), - field.isStorePositionWithTermVector(), field.isStoreOffsetWithTermVector(), - field.getOmitNorms(), false, field.getIndexOptions(), field.docValuesType()); -======= FieldInfo fi = fieldInfos.addOrUpdate(fieldName, field.indexed(), field.storeTermVectors(), field.storeTermVectorPositions(), field.storeTermVectorOffsets(), - field.omitNorms(), false, field.omitTermFreqAndPositions()); ->>>>>>> + field.omitNorms(), false, field.getIndexOptions(), field.docValuesType()); fp = new DocFieldProcessorPerField(this, fi); fp.next = fieldHash[hashPos]; @@ -249,15 +236,9 @@ rehash(); } } else { -<<<<<<< - fieldInfos.addOrUpdate(fp.fieldInfo.name, field.isIndexed(), field.isTermVectorStored(), - field.isStorePositionWithTermVector(), field.isStoreOffsetWithTermVector(), - field.getOmitNorms(), false, field.getIndexOptions(), field.docValuesType()); -======= fieldInfos.addOrUpdate(fp.fieldInfo.name, field.indexed(), field.storeTermVectors(), field.storeTermVectorPositions(), field.storeTermVectorOffsets(), - field.omitNorms(), false, field.omitTermFreqAndPositions()); ->>>>>>> + field.omitNorms(), false, field.getIndexOptions(), field.docValuesType()); } if (thisFieldGen != fp.lastGen) { Index: lucene/src/java/org/apache/lucene/index/DocInverterPerField.java =================================================================== --- lucene/src/java/org/apache/lucene/index/DocInverterPerField.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/index/DocInverterPerField.java (working copy) @@ -190,13 +190,8 @@ } } -<<<<<<< fieldState.offset += docState.analyzer == null ? 0 : docState.analyzer.getOffsetGap(field); - fieldState.boost *= field.getBoost(); -======= - fieldState.offset += docState.analyzer.getOffsetGap(field); fieldState.boost *= field.boost(); ->>>>>>> } // LUCENE-2387: don't hang onto the field, so GC can Index: lucene/src/java/org/apache/lucene/index/FieldInvertState.java =================================================================== --- lucene/src/java/org/apache/lucene/index/FieldInvertState.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/index/FieldInvertState.java (working copy) @@ -56,12 +56,8 @@ numOverlap = 0; offset = 0; maxTermFrequency = 0; -<<<<<<< uniqueTermCount = 0; - boost = docBoost; -======= boost = 1.0f; ->>>>>>> attributeSource = null; } Index: lucene/src/java/org/apache/lucene/index/FieldsReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/FieldsReader.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/index/FieldsReader.java (working copy) @@ -19,12 +19,12 @@ import java.io.IOException; +import org.apache.lucene.document.NumericField; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.BufferedIndexInput; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; -<<<<<<< import org.apache.lucene.util.CloseableThreadLocal; import org.apache.lucene.util.IOUtils; @@ -32,8 +32,6 @@ import java.io.IOException; import java.io.Reader; import java.util.ArrayList; -======= ->>>>>>> /** * Class responsible for access to stored document fields. @@ -46,7 +44,8 @@ private final static int FORMAT_SIZE = 4; private final FieldInfos fieldInfos; - + private CloseableThreadLocal fieldsStreamTL = new CloseableThreadLocal(); + // The main fieldStream, used only for cloning. private final IndexInput cloneableFieldsStream; @@ -269,7 +268,6 @@ return fieldsStream; } -<<<<<<< /** * Skip the field. We still have to read some of the information about the field, but can skip past the actual content. @@ -300,6 +298,7 @@ fieldsStream.seek(fieldsStream.getFilePointer() + toRead); } + /* private NumericField loadNumericField(FieldInfo fi, int numeric) throws IOException { assert numeric != 0; switch(numeric) { @@ -315,7 +314,9 @@ throw new FieldReaderException("Invalid numeric type: " + Integer.toHexString(numeric)); } } - + */ + + /* private void addFieldLazy(Document doc, FieldInfo fi, boolean binary, boolean tokenize, boolean cacheResult, int numeric) throws IOException { final AbstractField f; if (binary) { @@ -397,11 +398,12 @@ doc.add(new Field(fi.name, sizebytes)); return size; } + */ /** * A Lazy implementation of Fieldable that defers loading of fields until asked for, instead of when the Document is * loaded. - */ + * private class LazyField extends AbstractField implements Fieldable { private int toRead; private long pointer; @@ -438,9 +440,9 @@ return localFieldsStream; } - /** The value of the field as a Reader, or null. If null, the String value, + ** The value of the field as a Reader, or null. If null, the String value, * binary value, or TokenStream value is used. Exactly one of stringValue(), - * readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */ + * readerValue(), getBinaryValue(), and tokenStreamValue() must be set. * public Reader readerValue() { ensureOpen(); return null; @@ -448,7 +450,7 @@ /** The value of the field as a TokenStream, or null. If null, the Reader value, * String value, or binary value is used. Exactly one of stringValue(), - * readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */ + * readerValue(), getBinaryValue(), and tokenStreamValue() must be set. * public TokenStream tokenStreamValue() { ensureOpen(); return null; @@ -456,7 +458,7 @@ /** The value of the field as a String, or null. If null, the Reader value, * binary value, or TokenStream value is used. Exactly one of stringValue(), - * readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */ + * readerValue(), getBinaryValue(), and tokenStreamValue() must be set. * public String stringValue() { ensureOpen(); if (isBinary) @@ -520,6 +522,5 @@ return null; } } -======= ->>>>>>> + */ } Index: lucene/src/java/org/apache/lucene/index/FilterIndexReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/FilterIndexReader.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/index/FilterIndexReader.java (working copy) @@ -17,13 +17,10 @@ * limitations under the License. */ -<<<<<<< import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldSelector; import org.apache.lucene.index.codecs.PerDocValues; -======= import org.apache.lucene.index.IndexReader.ReaderContext; ->>>>>>> import org.apache.lucene.store.Directory; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; Index: lucene/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java =================================================================== --- lucene/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java (working copy) @@ -22,11 +22,8 @@ import java.util.Map; import org.apache.lucene.analysis.tokenattributes.PayloadAttribute; -<<<<<<< import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.FieldInfo.IndexOptions; -======= ->>>>>>> import org.apache.lucene.index.codecs.FieldsConsumer; import org.apache.lucene.index.codecs.PostingsConsumer; import org.apache.lucene.index.codecs.TermStats; Index: lucene/src/java/org/apache/lucene/index/IndexReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/IndexReader.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/index/IndexReader.java (working copy) @@ -27,15 +27,13 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldSelector; import org.apache.lucene.index.codecs.Codec; import org.apache.lucene.index.codecs.CodecProvider; -<<<<<<< import org.apache.lucene.index.codecs.PerDocValues; import org.apache.lucene.index.values.IndexDocValues; -======= import org.apache.lucene.search.FieldCache; // javadocs import org.apache.lucene.search.Similarity; ->>>>>>> import org.apache.lucene.store.*; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.Bits; @@ -979,7 +977,6 @@ return visitor.getDocument(); } -<<<<<<< /** * Get the {@link org.apache.lucene.document.Document} at the n * th position. The {@link FieldSelector} may be used to determine @@ -1013,8 +1010,6 @@ // TODO (1.5): When we convert to JDK 1.5 make this Set public abstract Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException; -======= ->>>>>>> /** Returns true if any documents have been deleted */ public abstract boolean hasDeletions(); Index: lucene/src/java/org/apache/lucene/index/IndexableField.java =================================================================== --- lucene/src/java/org/apache/lucene/index/IndexableField.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/index/IndexableField.java (working copy) @@ -22,6 +22,9 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.NumericField; import org.apache.lucene.document.NumericField.DataType; +import org.apache.lucene.index.FieldInfo.IndexOptions; +import org.apache.lucene.index.values.PerDocFieldValues; +import org.apache.lucene.index.values.ValueType; import org.apache.lucene.util.BytesRef; // nocommit jdocs @@ -78,9 +81,15 @@ // yet decoupled) public boolean tokenized(); public boolean omitNorms(); - public boolean omitTermFreqAndPositions(); + public IndexOptions getIndexOptions(); public boolean storeTermVectors(); public boolean storeTermVectorOffsets(); public boolean storeTermVectorPositions(); + + // doc values + public PerDocFieldValues getDocValues(); + public void setDocValues(PerDocFieldValues docValues); + public boolean hasDocValues(); + public ValueType docValuesType(); } \ No newline at end of file Index: lucene/src/java/org/apache/lucene/index/MultiReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/MultiReader.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/index/MultiReader.java (working copy) @@ -22,12 +22,9 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -<<<<<<< import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldSelector; import org.apache.lucene.index.codecs.PerDocValues; -======= ->>>>>>> import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.ReaderUtil; Index: lucene/src/java/org/apache/lucene/index/ParallelReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/ParallelReader.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/index/ParallelReader.java (working copy) @@ -17,15 +17,12 @@ * limitations under the License. */ -<<<<<<< import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldSelector; import org.apache.lucene.document.FieldSelectorResult; import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.codecs.PerDocValues; import org.apache.lucene.index.values.IndexDocValues; -======= ->>>>>>> import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.MapBackedSet; Index: lucene/src/java/org/apache/lucene/index/SegmentReader.java =================================================================== --- lucene/src/java/org/apache/lucene/index/SegmentReader.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/index/SegmentReader.java (working copy) @@ -27,14 +27,11 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; -<<<<<<< import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldSelector; -======= import org.apache.lucene.store.BufferedIndexInput; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IndexInput; ->>>>>>> import org.apache.lucene.index.FieldInfo.IndexOptions; import org.apache.lucene.index.codecs.PerDocValues; import org.apache.lucene.store.Directory; Index: lucene/src/java/org/apache/lucene/index/StoredFieldsWriter.java =================================================================== --- lucene/src/java/org/apache/lucene/index/StoredFieldsWriter.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/index/StoredFieldsWriter.java (working copy) @@ -19,11 +19,8 @@ import java.io.IOException; -<<<<<<< import org.apache.lucene.document.Fieldable; import org.apache.lucene.store.IOContext; -======= ->>>>>>> import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.RamUsageEstimator; Index: lucene/src/java/org/apache/lucene/search/Similarity.java =================================================================== --- lucene/src/java/org/apache/lucene/search/Similarity.java (revision 1158404) +++ lucene/src/java/org/apache/lucene/search/Similarity.java (working copy) @@ -221,44 +221,4 @@ */ public abstract void normalize(float queryNorm, float topLevelBoost); } -} -<<<<<<< -======= - * norm(t,d) encapsulates a few (indexing time) boost and length factors: - * - *
    - *
  • Field boost - set by calling - * {@link org.apache.lucene.document.Field#setBoost(float) field.setBoost()} - * before adding the field to a document. - *
  • - *
  • lengthNorm - computed ->>>>>>> -<<<<<<< -======= - * - * - * ->>>>>>> -<<<<<<< -======= - * - * - * - * - * ->>>>>>> -<<<<<<< -======= - * and larger values when state.getLength() is small. - * - *

    Note that the return values are computed under - * {@link org.apache.lucene.index.IndexWriter#addDocument(Iterable)} - * and then stored using - * {@link #encodeNormValue(float)}. - * Thus they have limited precision, and documents ->>>>>>> +} \ No newline at end of file Index: lucene/src/test-framework/org/apache/lucene/index/DocHelper.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/index/DocHelper.java (revision 1158404) +++ lucene/src/test-framework/org/apache/lucene/index/DocHelper.java (working copy) @@ -29,13 +29,9 @@ import org.apache.lucene.document.BinaryField; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -<<<<<<< -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.FieldInfo.IndexOptions; -======= import org.apache.lucene.document.FieldType; import org.apache.lucene.document.TextField; ->>>>>>> import org.apache.lucene.search.SimilarityProvider; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; @@ -109,14 +105,10 @@ public static final String NO_TF_KEY = "omitTermFreqAndPositions"; public static Field noTFField; static { -<<<<<<< - noTFField.setIndexOptions(IndexOptions.DOCS_ONLY); -======= customType6 = new FieldType(TextField.TYPE_UNSTORED); - customType6.setOmitTermFreqAndPositions(true); + customType6.setIndexOptions(IndexOptions.DOCS_ONLY); customType6.setStored(true); noTFField = new Field(NO_TF_KEY, customType6, NO_TF_TEXT); ->>>>>>> } public static final FieldType customType7; @@ -228,15 +220,10 @@ if (f.indexed() && !f.storeTermVectors()) add(notermvector,f); if (f.stored()) add(stored,f); else add(unstored,f); -<<<<<<< - if (f.getOmitNorms()) add(noNorms,f); if (f.getIndexOptions() == IndexOptions.DOCS_ONLY) add(noTf,f); - if (f.isLazy()) add(lazy, f); -======= if (f.omitNorms()) add(noNorms,f); - if (f.omitTermFreqAndPositions()) add(noTf,f); + if (f.getIndexOptions() == IndexOptions.DOCS_ONLY) add(noTf,f); //if (f.isLazy()) add(lazy, f); ->>>>>>> } } Index: lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java (revision 1158404) +++ lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java (working copy) @@ -24,11 +24,8 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; -<<<<<<< import org.apache.lucene.document.IndexDocValuesField; import org.apache.lucene.document.Document; -======= ->>>>>>> import org.apache.lucene.index.IndexWriter; // javadoc import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.index.values.ValueType; @@ -125,15 +122,10 @@ * Adds a Document. * @see IndexWriter#addDocument(Iterable) */ -<<<<<<< - public void addDocument(final Document doc) throws IOException { + public void addDocument(final Iterable doc) throws IOException { if (doDocValues) { - randomPerDocFieldValues(r, doc); + randomPerDocFieldValues(r, (Document) doc); } - -======= - public void addDocument(final Iterable doc) throws IOException { ->>>>>>> if (r.nextInt(5) == 3) { // TODO: maybe, we should simply buffer up added docs // (but we need to clone them), and only when @@ -141,15 +133,9 @@ // addDocuments? Would be better testing. w.addDocuments(new Iterable>() { -<<<<<<< - @Override - public Iterator iterator() { - return new Iterator() { -======= // @Override -- not until Java 1.6 public Iterator> iterator() { return new Iterator>() { ->>>>>>> boolean done; @Override @@ -162,13 +148,8 @@ throw new UnsupportedOperationException(); } -<<<<<<< - @Override - public Document next() { -======= // @Override -- not until Java 1.6 public Iterable next() { ->>>>>>> if (done) { throw new IllegalStateException(); } @@ -190,7 +171,7 @@ ValueType[] values = ValueType.values(); ValueType type = values[random.nextInt(values.length)]; String name = "random_" + type.name() + "" + docValuesFieldPrefix; - if ("PreFlex".equals(codecProvider.getFieldCodec(name)) || doc.getFieldable(name) != null) + if ("PreFlex".equals(codecProvider.getFieldCodec(name)) || doc.getField(name) != null) return; IndexDocValuesField docValuesField = new IndexDocValuesField(name); switch (type) { @@ -270,27 +251,16 @@ * Updates a document. * @see IndexWriter#updateDocument(Term, Iterable) */ -<<<<<<< - public void updateDocument(final Term t, final Document doc) throws IOException { + public void updateDocument(Term t, final Iterable doc) throws IOException { if (doDocValues) { - randomPerDocFieldValues(r, doc); + randomPerDocFieldValues(r, (Document) doc); } - -======= - public void updateDocument(Term t, final Iterable doc) throws IOException { ->>>>>>> if (r.nextInt(5) == 3) { w.updateDocuments(t, new Iterable>() { -<<<<<<< - @Override - public Iterator iterator() { - return new Iterator() { -======= // @Override -- not until Java 1.6 public Iterator> iterator() { return new Iterator>() { ->>>>>>> boolean done; @Override @@ -303,13 +273,8 @@ throw new UnsupportedOperationException(); } -<<<<<<< - @Override - public Document next() { -======= // @Override -- not until Java 1.6 public Iterable next() { ->>>>>>> if (done) { throw new IllegalStateException(); } Index: lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (revision 1158404) +++ lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (working copy) @@ -36,6 +36,7 @@ import java.util.regex.Pattern; import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.*; import org.apache.lucene.index.codecs.Codec; @@ -1105,63 +1106,16 @@ return dir; } - public static org.apache.lucene.document.Field newField(String name, String value, FieldType type) { + public static Field newField(String name, String value, FieldType type) { return newField(random, name, value, type); } -<<<<<<< - /** Returns a new field instance. - * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */ - public static Field newField(String name, String value, Store store, Index index) { - return newField(random, name, value, store, index); - } - - /** - * Returns a new Field instance. Use this when the test does not - * care about some specific field settings (most tests) - *

      - *
    • If the store value is set to Store.NO, sometimes the field will be randomly stored. - *
    • More term vector data than you ask for might be indexed, for example if you choose YES - * it might index term vectors with offsets too. - *
    - */ - public static Field newField(String name, String value, Store store, Index index, TermVector tv) { - return newField(random, name, value, store, index, tv); - } - - /** Returns a new field instance, using the specified random. - * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */ - public static Field newField(Random random, String name, String value, Index index) { - return newField(random, name, value, Store.NO, index); - } - - /** Returns a new field instance, using the specified random. - * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */ - public static Field newField(Random random, String name, String value, Store store, Index index) { - return newField(random, name, value, store, index, TermVector.NO); - } - - /** Returns a new field instance, using the specified random. - * See {@link #newField(String, String, Field.Store, Field.Index, Field.TermVector)} for more information */ - public static Field newField(Random random, String name, String value, Store store, Index index, TermVector tv) { - -======= - public static org.apache.lucene.document.Field newField(Random random, String name, String value, FieldType type) { ->>>>>>> + public static Field newField(Random random, String name, String value, FieldType type) { if (usually(random)) { // most of the time, don't modify the params -<<<<<<< - return new Field(name, value, store, index, tv); + return new Field(name, type, value); } - if (random.nextBoolean()) { - // tickle any code still relying on field names being interned: - name = new String(name); -======= - return new org.apache.lucene.document.Field(name, type, value); ->>>>>>> - } - FieldType newType = new FieldType(type); if (!newType.stored() && random.nextBoolean()) { newType.setStored(true); // randomly store it @@ -1189,7 +1143,7 @@ } */ - return new org.apache.lucene.document.Field(name, newType, value); + return new Field(name, newType, value); } /** return a random Locale from the available locales on the system */ Index: lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java (revision 1158404) +++ lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java (working copy) @@ -35,11 +35,8 @@ import java.util.zip.ZipFile; import org.apache.lucene.document.Document; -<<<<<<< import org.apache.lucene.document.Field; import org.apache.lucene.document.Fieldable; -======= ->>>>>>> import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.ConcurrentMergeScheduler; import org.apache.lucene.index.FieldInfos; @@ -428,16 +425,9 @@ /** Adds field info for a Document. */ public static void add(Document doc, FieldInfos fieldInfos) { -<<<<<<< - List fields = doc.getFields(); - for (Fieldable field : fields) { - fieldInfos.addOrUpdate(field.name(), field.isIndexed(), field.isTermVectorStored(), field.isStorePositionWithTermVector(), - field.isStoreOffsetWithTermVector(), field.getOmitNorms(), false, field.getIndexOptions(), field.docValuesType()); -======= for (IndexableField field : doc) { fieldInfos.addOrUpdate(field.name(), field.indexed(), field.storeTermVectors(), field.storeTermVectorPositions(), - field.storeTermVectorOffsets(), field.omitNorms(), false, field.omitTermFreqAndPositions()); ->>>>>>> + field.storeTermVectorOffsets(), field.omitNorms(), false, field.getIndexOptions(), field.docValuesType()); } } @@ -514,15 +504,13 @@ // TODO: is there a pre-existing way to do this!!! public static Document cloneDocument(Document doc1) { final Document doc2 = new Document(); - for(Fieldable f : doc1.getFields()) { + for(IndexableField f : doc1) { Field field1 = (Field) f; Field field2 = new Field(field1.name(), - field1.stringValue(), - field1.isStored() ? Field.Store.YES : Field.Store.NO, - field1.isIndexed() ? (field1.isTokenized() ? Field.Index.ANALYZED : Field.Index.NOT_ANALYZED) : Field.Index.NO); - field2.setOmitNorms(field1.getOmitNorms()); - field2.setIndexOptions(field1.getIndexOptions()); + field1.getFieldType(), + field1.stringValue() + ); doc2.add(field2); } Index: lucene/src/test/org/apache/lucene/index/Test2BPostings.java =================================================================== --- lucene/src/test/org/apache/lucene/index/Test2BPostings.java (revision 1158404) +++ lucene/src/test/org/apache/lucene/index/Test2BPostings.java (working copy) @@ -24,6 +24,8 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.FieldInfo.IndexOptions; import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.store.MockDirectoryWrapper; @@ -62,9 +64,10 @@ } Document doc = new Document(); - Field field = new Field("field", new MyTokenStream()); - field.setIndexOptions(IndexOptions.DOCS_ONLY); - field.setOmitNorms(true); + FieldType ft = new FieldType(TextField.TYPE_UNSTORED); + ft.setOmitNorms(true); + ft.setIndexOptions(IndexOptions.DOCS_ONLY); + Field field = new Field("field", ft, new MyTokenStream()); doc.add(field); final int numDocs = (Integer.MAX_VALUE / 26) + 1; Index: lucene/src/test/org/apache/lucene/index/Test2BTerms.java =================================================================== --- lucene/src/test/org/apache/lucene/index/Test2BTerms.java (revision 1158404) +++ lucene/src/test/org/apache/lucene/index/Test2BTerms.java (working copy) @@ -177,18 +177,12 @@ Document doc = new Document(); final MyTokenStream ts = new MyTokenStream(random, TERMS_PER_DOC); -<<<<<<< - Field field = new Field("field", ts); - field.setIndexOptions(IndexOptions.DOCS_ONLY); - field.setOmitNorms(true); -======= FieldType customType = new FieldType(TextField.TYPE_UNSTORED); customType.setStored(true); - customType.setOmitTermFreqAndPositions(true); + customType.setIndexOptions(IndexOptions.DOCS_ONLY); customType.setOmitNorms(true); Field field = new Field("field", customType, ts); ->>>>>>> doc.add(field); //w.setInfoStream(System.out); final int numDocs = (int) (TERM_COUNT/TERMS_PER_DOC); Index: lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (revision 1158404) +++ lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (working copy) @@ -31,11 +31,8 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.NumericField; -<<<<<<< import org.apache.lucene.index.FieldInfo.IndexOptions; -======= import org.apache.lucene.document.TextField; ->>>>>>> import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.search.DefaultSimilarity; import org.apache.lucene.search.DocIdSetIterator; @@ -620,25 +617,14 @@ private void addNoProxDoc(IndexWriter writer) throws IOException { Document doc = new Document(); -<<<<<<< - Field f = new Field("content3", "aaa", Field.Store.YES, Field.Index.ANALYZED); - f.setIndexOptions(IndexOptions.DOCS_ONLY); -======= - FieldType customType = new FieldType(TextField.TYPE_UNSTORED); - customType.setStored(true); - customType.setOmitTermFreqAndPositions(true); + FieldType customType = new FieldType(TextField.TYPE_STORED); + customType.setIndexOptions(IndexOptions.DOCS_ONLY); Field f = new Field("content3", customType, "aaa"); ->>>>>>> doc.add(f); -<<<<<<< - f = new Field("content4", "aaa", Field.Store.YES, Field.Index.NO); - f.setIndexOptions(IndexOptions.DOCS_ONLY); -======= FieldType customType2 = new FieldType(); customType2.setStored(true); - customType2.setOmitTermFreqAndPositions(true); + customType2.setIndexOptions(IndexOptions.DOCS_ONLY); f = new Field("content4", customType2, "aaa"); ->>>>>>> doc.add(f); writer.addDocument(doc); } Index: lucene/src/test/org/apache/lucene/index/TestCodecs.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestCodecs.java (revision 1158404) +++ lucene/src/test/org/apache/lucene/index/TestCodecs.java (working copy) @@ -24,13 +24,9 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -<<<<<<< -import org.apache.lucene.document.Field.Store; import org.apache.lucene.index.FieldInfo.IndexOptions; -======= import org.apache.lucene.document.FieldType; import org.apache.lucene.document.TextField; ->>>>>>> import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.index.codecs.FieldsConsumer; import org.apache.lucene.index.codecs.FieldsProducer; Index: lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (revision 1158404) +++ lucene/src/test/org/apache/lucene/index/TestDocumentWriter.java (working copy) @@ -30,16 +30,9 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -<<<<<<< -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; -import org.apache.lucene.document.Field.TermVector; -import org.apache.lucene.document.Fieldable; import org.apache.lucene.index.FieldInfo.IndexOptions; -======= import org.apache.lucene.document.FieldType; import org.apache.lucene.document.TextField; ->>>>>>> import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IOContext.Context; @@ -325,15 +318,9 @@ doc.add(newField("f1", "v1", customType)); doc.add(newField("f1", "v2", customType2)); // f2 has no TF -<<<<<<< - Field f = newField("f2", "v1", Store.NO, Index.ANALYZED); - f.setIndexOptions(IndexOptions.DOCS_ONLY); -======= FieldType customType3 = new FieldType(TextField.TYPE_UNSTORED); - customType3.setStored(true); - customType3.setOmitTermFreqAndPositions(true); + customType3.setIndexOptions(IndexOptions.DOCS_ONLY); Field f = newField("f2", "v1", customType3); ->>>>>>> doc.add(f); doc.add(newField("f2", "v2", customType2)); Index: lucene/src/test/org/apache/lucene/index/TestFieldsReader.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (revision 1158404) +++ lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (working copy) @@ -90,39 +90,33 @@ assertTrue(field != null); assertTrue(field.storeTermVectors() == true); -<<<<<<< - assertTrue(field.isStoreOffsetWithTermVector() == true); - assertTrue(field.isStorePositionWithTermVector() == true); - assertTrue(field.getOmitNorms() == false); - assertTrue(field.getIndexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); -======= assertTrue(field.storeTermVectorOffsets() == true); assertTrue(field.storeTermVectorPositions() == true); ->>>>>>> + assertTrue(field.omitNorms() == false); + assertTrue(field.getIndexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); field = (Field) doc.getField(DocHelper.TEXT_FIELD_3_KEY); assertTrue(field != null); -<<<<<<< - assertTrue(field.isTermVectorStored() == false); - assertTrue(field.isStoreOffsetWithTermVector() == false); - assertTrue(field.isStorePositionWithTermVector() == false); - assertTrue(field.getOmitNorms() == true); - assertTrue(field.getIndexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); -======= assertTrue(field.storeTermVectors() == false); assertTrue(field.storeTermVectorOffsets() == false); assertTrue(field.storeTermVectorPositions() == false); ->>>>>>> + assertTrue(field.omitNorms() == true); + assertTrue(field.getIndexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); field = (Field) doc.getField(DocHelper.NO_TF_KEY); assertTrue(field != null); -<<<<<<< - assertTrue(field.isTermVectorStored() == false); - assertTrue(field.isStoreOffsetWithTermVector() == false); - assertTrue(field.isStorePositionWithTermVector() == false); - assertTrue(field.getOmitNorms() == false); + assertTrue(field.storeTermVectors() == false); + assertTrue(field.storeTermVectorOffsets() == false); + assertTrue(field.storeTermVectorPositions() == false); + assertTrue(field.omitNorms() == false); assertTrue(field.getIndexOptions() == IndexOptions.DOCS_ONLY); reader.close(); + + DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(DocHelper.TEXT_FIELD_3_KEY); + reader.document(0, visitor); + final List fields = visitor.getDocument().getFields(); + assertEquals(1, fields.size()); + assertEquals(DocHelper.TEXT_FIELD_3_KEY, fields.get(0).name()); } @@ -174,18 +168,7 @@ assertTrue("calling binaryValue() twice should give same reference", field.getBinaryValue() == field.getBinaryValue()); for (int i = 0; i < bytes.length; i++) { assertTrue("byte[" + i + "] is mismatched", bytes[i] == DocHelper.LAZY_FIELD_BINARY_BYTES[i]); -======= - assertTrue(field.storeTermVectors() == false); - assertTrue(field.storeTermVectorOffsets() == false); - assertTrue(field.storeTermVectorPositions() == false); - DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(DocHelper.TEXT_FIELD_3_KEY); - reader.document(0, visitor); - final List fields = visitor.getDocument().getFields(); - assertEquals(1, fields.size()); - assertEquals(DocHelper.TEXT_FIELD_3_KEY, fields.get(0).name()); ->>>>>>> - reader.close(); } Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (revision 1158404) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (working copy) @@ -31,13 +31,10 @@ import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Document; -<<<<<<< import org.apache.lucene.document.Field; -======= import org.apache.lucene.document.FieldType; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; ->>>>>>> import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermQuery; @@ -971,7 +968,7 @@ int count = 0; while(true) { Document doc = new Document(); - doc.add(new Field("id", count+"", Field.Store.NO, Field.Index.NOT_ANALYZED)); + doc.add(new StringField("id", count+"")); final Term delTerm; if (count == 1010) { // This is the only delete that applies @@ -1017,7 +1014,7 @@ int count = 0; while(true) { Document doc = new Document(); - doc.add(new Field("id", count+"", Field.Store.NO, Field.Index.NOT_ANALYZED)); + doc.add(new StringField("id", count+"")); final Term delTerm; if (count == 1010) { // This is the only delete that applies @@ -1071,8 +1068,8 @@ w.deleteDocuments(new Term("id", "0")); } Document doc = new Document(); - doc.add(newField("id", ""+id, Field.Index.NOT_ANALYZED)); - doc.add(newField("body", sb.toString(), Field.Index.ANALYZED)); + doc.add(newField("id", ""+id, StringField.TYPE_UNSTORED)); + doc.add(newField("body", sb.toString(), TextField.TYPE_UNSTORED)); w.updateDocument(new Term("id", ""+id), doc); docsInSegment.incrementAndGet(); if (dir.fileExists("_0_1.del")) { Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java (revision 1158404) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriterWithThreads.java (working copy) @@ -476,8 +476,7 @@ public void run() { try { Document doc = new Document(); - Field field = newField("field", "testData", Field.Store.YES, - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS); + Field field = newField("field", "testData", TextField.TYPE_STORED); doc.add(field); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))); Index: lucene/src/test/org/apache/lucene/index/TestLongPostings.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestLongPostings.java (revision 1158404) +++ lucene/src/test/org/apache/lucene/index/TestLongPostings.java (working copy) @@ -26,11 +26,9 @@ import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -<<<<<<< +import org.apache.lucene.document.FieldType; import org.apache.lucene.index.FieldInfo.IndexOptions; -======= import org.apache.lucene.document.TextField; ->>>>>>> import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; @@ -306,11 +304,12 @@ iwc.setMaxBufferedDocs(-1); final RandomIndexWriter riw = new RandomIndexWriter(random, dir, iwc); + FieldType ft = new FieldType(TextField.TYPE_UNSTORED); + ft.setIndexOptions(options); for(int idx=0;idx>>>>>> import org.apache.lucene.index.codecs.CodecProvider; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.PhraseQuery; @@ -75,27 +72,6 @@ } @Test -<<<<<<< -======= - // TODO: is there a pre-existing way to do this!!! - private Document cloneDoc(Document doc1) { - final Document doc2 = new Document(); - for(IndexableField field1 : doc1.getFields()) { - - FieldType ft = new FieldType(); - ft.setStored(field1.stored()); - ft.setIndexed(field1.indexed()); - ft.setTokenized(field1.tokenized()); - ft.setOmitNorms(field1.omitNorms()); - ft.setOmitTermFreqAndPositions(field1.omitTermFreqAndPositions()); - - Field field2 = new Field(field1.name(), - ft, - field1.stringValue()); - doc2.add(field2); - } - ->>>>>>> public void testNRTThreads() throws Exception { final long t0 = System.currentTimeMillis(); Index: lucene/src/test/org/apache/lucene/index/TestOmitPositions.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestOmitPositions.java (revision 1158404) +++ lucene/src/test/org/apache/lucene/index/TestOmitPositions.java (working copy) @@ -21,6 +21,8 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.TextField; import org.apache.lucene.index.FieldInfo.IndexOptions; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.Directory; @@ -37,8 +39,9 @@ Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random, dir); Document doc = new Document(); - Field f = newField("foo", "this is a test test", Field.Index.ANALYZED); - f.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + FieldType ft = new FieldType(TextField.TYPE_UNSTORED); + ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + Field f = newField("foo", "this is a test test", ft); doc.add(f); for (int i = 0; i < 100; i++) { w.addDocument(doc); @@ -67,42 +70,42 @@ Document d = new Document(); // f1,f2,f3: docs only - Field f1 = newField("f1", "This field has docs only", Field.Store.NO, Field.Index.ANALYZED); - f1.setIndexOptions(IndexOptions.DOCS_ONLY); + FieldType ft = new FieldType(TextField.TYPE_UNSTORED); + ft.setIndexOptions(IndexOptions.DOCS_ONLY); + + Field f1 = newField("f1", "This field has docs only", ft); d.add(f1); - Field f2 = newField("f2", "This field has docs only", Field.Store.NO, Field.Index.ANALYZED); - f2.setIndexOptions(IndexOptions.DOCS_ONLY); + Field f2 = newField("f2", "This field has docs only", ft); d.add(f2); - Field f3 = newField("f3", "This field has docs only", Field.Store.NO, Field.Index.ANALYZED); - f3.setIndexOptions(IndexOptions.DOCS_ONLY); + Field f3 = newField("f3", "This field has docs only", ft); d.add(f3); + + FieldType ft2 = new FieldType(TextField.TYPE_UNSTORED); + ft2.setIndexOptions(IndexOptions.DOCS_AND_FREQS); // f4,f5,f6 docs and freqs - Field f4 = newField("f4", "This field has docs and freqs", Field.Store.NO, Field.Index.ANALYZED); - f4.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + Field f4 = newField("f4", "This field has docs and freqs", ft2); d.add(f4); - Field f5 = newField("f5", "This field has docs and freqs", Field.Store.NO, Field.Index.ANALYZED); - f5.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + Field f5 = newField("f5", "This field has docs and freqs", ft2); d.add(f5); - Field f6 = newField("f6", "This field has docs and freqs", Field.Store.NO, Field.Index.ANALYZED); - f6.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + Field f6 = newField("f6", "This field has docs and freqs", ft2); d.add(f6); + FieldType ft3 = new FieldType(TextField.TYPE_UNSTORED); + ft3.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); + // f7,f8,f9 docs/freqs/positions - Field f7 = newField("f7", "This field has docs and freqs and positions", Field.Store.NO, Field.Index.ANALYZED); - f7.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); + Field f7 = newField("f7", "This field has docs and freqs and positions", ft3); d.add(f7); - Field f8 = newField("f8", "This field has docs and freqs and positions", Field.Store.NO, Field.Index.ANALYZED); - f8.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); + Field f8 = newField("f8", "This field has docs and freqs and positions", ft3); d.add(f8); - Field f9 = newField("f9", "This field has docs and freqs and positions", Field.Store.NO, Field.Index.ANALYZED); - f9.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); + Field f9 = newField("f9", "This field has docs and freqs and positions", ft3); d.add(f9); writer.addDocument(d); @@ -113,42 +116,33 @@ d = new Document(); // f1,f4,f7: docs only - f1 = newField("f1", "This field has docs only", Field.Store.NO, Field.Index.ANALYZED); - f1.setIndexOptions(IndexOptions.DOCS_ONLY); + f1 = newField("f1", "This field has docs only", ft); d.add(f1); - f4 = newField("f4", "This field has docs only", Field.Store.NO, Field.Index.ANALYZED); - f4.setIndexOptions(IndexOptions.DOCS_ONLY); + f4 = newField("f4", "This field has docs only", ft); d.add(f4); - f7 = newField("f7", "This field has docs only", Field.Store.NO, Field.Index.ANALYZED); - f7.setIndexOptions(IndexOptions.DOCS_ONLY); + f7 = newField("f7", "This field has docs only", ft); d.add(f7); // f2, f5, f8: docs and freqs - f2 = newField("f2", "This field has docs and freqs", Field.Store.NO, Field.Index.ANALYZED); - f2.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + f2 = newField("f2", "This field has docs and freqs", ft2); d.add(f2); - f5 = newField("f5", "This field has docs and freqs", Field.Store.NO, Field.Index.ANALYZED); - f5.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + f5 = newField("f5", "This field has docs and freqs", ft2); d.add(f5); - f8 = newField("f8", "This field has docs and freqs", Field.Store.NO, Field.Index.ANALYZED); - f8.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + f8 = newField("f8", "This field has docs and freqs", ft2); d.add(f8); // f3, f6, f9: docs and freqs and positions - f3 = newField("f3", "This field has docs and freqs and positions", Field.Store.NO, Field.Index.ANALYZED); - f3.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); + f3 = newField("f3", "This field has docs and freqs and positions", ft3); d.add(f3); - f6 = newField("f6", "This field has docs and freqs and positions", Field.Store.NO, Field.Index.ANALYZED); - f6.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + f6 = newField("f6", "This field has docs and freqs and positions", ft3); d.add(f6); - f9 = newField("f9", "This field has docs and freqs and positions", Field.Store.NO, Field.Index.ANALYZED); - f9.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); + f9 = newField("f9", "This field has docs and freqs and positions", ft3); d.add(f9); writer.addDocument(d); @@ -201,9 +195,10 @@ lmp.setMergeFactor(2); lmp.setUseCompoundFile(false); Document d = new Document(); - - Field f1 = newField("f1", "This field has term freqs", Field.Store.NO, Field.Index.ANALYZED); - f1.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + + FieldType ft = new FieldType(TextField.TYPE_UNSTORED); + ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + Field f1 = newField("f1", "This field has term freqs", ft); d.add(f1); for(int i=0;i<30;i++) @@ -215,7 +210,7 @@ // now add some documents with positions, and check there is no prox after optimization d = new Document(); - f1 = newField("f1", "This field has positions", Field.Store.NO, Field.Index.ANALYZED); + f1 = newField("f1", "This field has positions", TextField.TYPE_UNSTORED); d.add(f1); for(int i=0;i<30;i++) Index: lucene/src/test/org/apache/lucene/index/TestOmitTf.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestOmitTf.java (revision 1158404) +++ lucene/src/test/org/apache/lucene/index/TestOmitTf.java (working copy) @@ -27,12 +27,9 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -<<<<<<< import org.apache.lucene.index.FieldInfo.IndexOptions; -======= import org.apache.lucene.document.FieldType; import org.apache.lucene.document.TextField; ->>>>>>> import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.search.*; import org.apache.lucene.search.BooleanClause.Occur; @@ -63,7 +60,7 @@ private static final FieldType normalType = new FieldType(TextField.TYPE_UNSTORED); static { - omitType.setOmitTermFreqAndPositions(true); + omitType.setIndexOptions(IndexOptions.DOCS_ONLY); } // Tests whether the DocumentWriter correctly enable the @@ -79,12 +76,7 @@ d.add(f1); // this field will NOT have Tf -<<<<<<< - Field f2 = newField("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED); - f2.setIndexOptions(IndexOptions.DOCS_ONLY); -======= Field f2 = newField("f2", "This field has NO Tf in all docs", omitType); ->>>>>>> d.add(f2); writer.addDocument(d); @@ -94,18 +86,10 @@ d = new Document(); // Reverse -<<<<<<< - f1.setIndexOptions(IndexOptions.DOCS_ONLY); -======= f1 = newField("f1", "This field has term freqs", omitType); ->>>>>>> d.add(f1); -<<<<<<< - f2.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); -======= f2 = newField("f2", "This field has NO Tf in all docs", normalType); ->>>>>>> d.add(f2); writer.addDocument(d); @@ -143,12 +127,7 @@ d.add(f1); // this field will NOT have Tf -<<<<<<< - Field f2 = newField("f2", "This field has NO Tf in all docs", Field.Store.NO, Field.Index.ANALYZED); - f2.setIndexOptions(IndexOptions.DOCS_ONLY); -======= Field f2 = newField("f2", "This field has NO Tf in all docs", omitType); ->>>>>>> d.add(f2); for(int i=0;i<30;i++) @@ -159,18 +138,10 @@ d = new Document(); // Reverese -<<<<<<< - f1.setIndexOptions(IndexOptions.DOCS_ONLY); -======= f1 = newField("f1", "This field has term freqs", omitType); ->>>>>>> d.add(f1); -<<<<<<< - f2.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); -======= f2 = newField("f2", "This field has NO Tf in all docs", normalType); ->>>>>>> d.add(f2); for(int i=0;i<30;i++) @@ -215,11 +186,6 @@ for(int i=0;i<5;i++) writer.addDocument(d); -<<<<<<< - f2.setIndexOptions(IndexOptions.DOCS_ONLY); - -======= ->>>>>>> for(int i=0;i<20;i++) writer.addDocument(d); @@ -257,12 +223,7 @@ lmp.setUseCompoundFile(false); Document d = new Document(); -<<<<<<< - Field f1 = newField("f1", "This field has no term freqs", Field.Store.NO, Field.Index.ANALYZED); - f1.setIndexOptions(IndexOptions.DOCS_ONLY); -======= Field f1 = newField("f1", "This field has term freqs", omitType); ->>>>>>> d.add(f1); for(int i=0;i<30;i++) @@ -274,7 +235,7 @@ // now add some documents with positions, and check there is no prox after optimization d = new Document(); - f1 = newField("f1", "This field has positions", Field.Store.NO, Field.Index.ANALYZED); + f1 = newField("f1", "This field has positions", TextField.TYPE_UNSTORED); d.add(f1); for(int i=0;i<30;i++) @@ -308,12 +269,7 @@ Document d = new Document(); sb.append(term).append(" "); String content = sb.toString(); -<<<<<<< - Field noTf = newField("noTf", content + (i%2==0 ? "" : " notf"), Field.Store.NO, Field.Index.ANALYZED); - noTf.setIndexOptions(IndexOptions.DOCS_ONLY); -======= Field noTf = newField("noTf", content + (i%2==0 ? "" : " notf"), omitType); ->>>>>>> d.add(noTf); Field tf = newField("tf", content + (i%2==0 ? " tf" : ""), normalType); Index: lucene/src/test/org/apache/lucene/index/TestRollback.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestRollback.java (revision 1158404) +++ lucene/src/test/org/apache/lucene/index/TestRollback.java (working copy) @@ -46,15 +46,9 @@ for (int i = 0; i < 3; i++) { Document doc = new Document(); String value = Integer.toString(i); -<<<<<<< - doc.add(newField("pk", value, Store.YES, Index.ANALYZED_NO_NORMS)); - doc.add(newField("text", "foo", Store.YES, Index.ANALYZED_NO_NORMS)); - w.updateDocument(new Term("pk", value), doc); -======= doc.add(newField("pk", value, customType)); doc.add(newField("text", "foo", customType)); - w.updateDocument(pkTerm.createTerm(value), doc); ->>>>>>> + w.updateDocument(new Term("pk", value), doc); } w.rollback(); Index: lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java (revision 1158404) +++ lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java (working copy) @@ -19,11 +19,7 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.*; -<<<<<<< -import org.apache.lucene.document.Field.Index; import org.apache.lucene.index.codecs.CodecProvider; -======= ->>>>>>> import org.apache.lucene.store.*; import org.apache.lucene.util.*; import org.junit.Test; Index: lucene/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java (revision 1158404) +++ lucene/src/test/org/apache/lucene/index/TestSameTokenSamePosition.java (working copy) @@ -58,8 +58,7 @@ Directory dir = newDirectory(); RandomIndexWriter riw = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new BugReproAnalyzer())); Document doc = new Document(); - doc.add(new Field("eng", "Six drunken" /*This shouldn't matter. */, - Field.Store.YES, Field.Index.ANALYZED)); + doc.add(new Field("eng", TextField.TYPE_STORED, "Six drunken" /*This shouldn't matter. */)); for (int i = 0; i < 100; i++) { riw.addDocument(doc); } Index: lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java (revision 1158404) +++ lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java (working copy) @@ -674,18 +674,13 @@ public void indexDoc() throws IOException { Document d = new Document(); - FieldType customType1 = new FieldType(TextField.TYPE_UNSTORED); - customType1.setStored(true); + FieldType customType1 = new FieldType(TextField.TYPE_STORED); customType1.setTokenized(false); customType1.setOmitNorms(true); ArrayList fields = new ArrayList(); String idString = getIdString(); -<<<<<<< - Field idField = newField("id", idString, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS); -======= - Field idField = newField(idTerm.field(), idString, customType1); ->>>>>>> + Field idField = newField("id", idString, customType1); fields.add(idField); int nFields = nextInt(maxFields); Index: lucene/src/test/org/apache/lucene/index/TestStressNRT.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestStressNRT.java (revision 1158404) +++ lucene/src/test/org/apache/lucene/index/TestStressNRT.java (working copy) @@ -29,6 +29,8 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.StringField; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; @@ -228,8 +230,10 @@ // add tombstone first if (tombstones) { Document d = new Document(); - d.add(new Field("id","-"+Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); - d.add(new Field(field, Long.toString(nextVal), Field.Store.YES, Field.Index.NO)); + d.add(new Field("id", StringField.TYPE_STORED, "-"+Integer.toString(id))); + FieldType stored = new FieldType(); + stored.setStored(true); + d.add(new Field(field, stored, Long.toString(nextVal))); writer.updateDocument(new Term("id", "-"+Integer.toString(id)), d); } @@ -244,8 +248,10 @@ // add tombstone first if (tombstones) { Document d = new Document(); - d.add(new Field("id","-"+Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); - d.add(new Field(field, Long.toString(nextVal), Field.Store.YES, Field.Index.NO)); + d.add(new Field("id", StringField.TYPE_STORED, "-"+Integer.toString(id))); + FieldType stored = new FieldType(); + stored.setStored(true); + d.add(new Field(field, stored, Long.toString(nextVal))); writer.updateDocument(new Term("id", "-"+Integer.toString(id)), d); } @@ -257,8 +263,10 @@ } else { // assertU(adoc("id",Integer.toString(id), field, Long.toString(nextVal))); Document d = new Document(); - d.add(newField("id",Integer.toString(id), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS)); - d.add(newField(field, Long.toString(nextVal), Field.Store.YES, Field.Index.NO)); + d.add(new Field("id", StringField.TYPE_STORED, "-"+Integer.toString(id))); + FieldType stored = new FieldType(); + stored.setStored(true); + d.add(new Field(field, stored, Long.toString(nextVal))); if (VERBOSE) { System.out.println("TEST: " + Thread.currentThread().getName() + ": u id:" + id + " val=" + nextVal); } Index: lucene/src/test/org/apache/lucene/index/TestSumDocFreq.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestSumDocFreq.java (revision 1158404) +++ lucene/src/test/org/apache/lucene/index/TestSumDocFreq.java (working copy) @@ -20,6 +20,7 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; @@ -37,8 +38,8 @@ RandomIndexWriter writer = new RandomIndexWriter(random, dir); Document doc = new Document(); - Field field1 = newField("foo", "", Field.Index.ANALYZED); - Field field2 = newField("bar", "", Field.Index.ANALYZED); + Field field1 = newField("foo", "", TextField.TYPE_UNSTORED); + Field field2 = newField("bar", "", TextField.TYPE_UNSTORED); doc.add(field1); doc.add(field2); for (int i = 0; i < numDocs; i++) { Index: modules/grouping/src/java/org/apache/lucene/search/grouping/package.html =================================================================== --- modules/grouping/src/java/org/apache/lucene/search/grouping/package.html (revision 1158404) +++ modules/grouping/src/java/org/apache/lucene/search/grouping/package.html (working copy) @@ -130,7 +130,7 @@ List<Document> oneGroup = ...; Field groupEndField = new Field("groupEnd", "x", Field.Store.NO, Field.Index.NOT_ANALYZED); - groupEndField.setOmitTermFreqAndPositions(true); + groupEndField.setIndexOptions(IndexOptions.DOCS_ONLY); groupEndField.setOmitNorms(true); oneGroup.get(oneGroup.size()-1).add(groupEndField);
    - * lengthNorm - *  ·  - * - * {@link org.apache.lucene.index.IndexableField#boost() f.getBoost}() - *