Index: src/test/org/apache/lucene/index/TestDocumentWriter.java =================================================================== --- src/test/org/apache/lucene/index/TestDocumentWriter.java (revision 1068310) +++ src/test/org/apache/lucene/index/TestDocumentWriter.java (working copy) @@ -19,6 +19,7 @@ import java.io.IOException; import java.io.Reader; +import java.util.Iterator; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; @@ -98,8 +99,9 @@ // test that the norms are not present in the segment if // omitNorms is true - for (int i = 0; i < reader.core.fieldInfos.size(); i++) { - FieldInfo fi = reader.core.fieldInfos.fieldInfo(i); + Iterator it = reader.core.fieldInfos.getFieldInfoIterator(); + while(it.hasNext()) { + FieldInfo fi = it.next(); if (fi.isIndexed) { assertTrue(fi.omitNorms == !reader.hasNorms(fi.name)); } Index: src/test/org/apache/lucene/index/TestIndexFileDeleter.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexFileDeleter.java (revision 1068310) +++ src/test/org/apache/lucene/index/TestIndexFileDeleter.java (working copy) @@ -92,10 +92,11 @@ CompoundFileReader cfsReader = new CompoundFileReader(dir, "_2.cfs"); FieldInfos fieldInfos = new FieldInfos(cfsReader, "_2.fnm"); int contentFieldIndex = -1; - for(i=0;i it = fieldInfos.getFieldInfoIterator(); + while(it.hasNext()) { + FieldInfo fi = it.next(); if (fi.name.equals("content")) { - contentFieldIndex = i; + contentFieldIndex = fi.number; break; } } Index: src/test/org/apache/lucene/index/TestBackwardsCompatibility.java =================================================================== --- src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (revision 1068310) +++ src/test/org/apache/lucene/index/TestBackwardsCompatibility.java (working copy) @@ -22,6 +22,7 @@ import java.io.IOException; import java.io.PrintStream; import java.util.Arrays; +import java.util.Iterator; import java.util.List; import java.util.Random; @@ -528,10 +529,11 @@ CompoundFileReader cfsReader = new CompoundFileReader(dir, "_0.cfs"); FieldInfos fieldInfos = new FieldInfos(cfsReader, "_0.fnm"); int contentFieldIndex = -1; - for(int i=0;i it = fieldInfos.getFieldInfoIterator(); + while(it.hasNext()) { + FieldInfo fi = it.next(); if (fi.name.equals("content")) { - contentFieldIndex = i; + contentFieldIndex = fi.number; break; } } Index: src/java/org/apache/lucene/index/FieldInfos.java =================================================================== --- src/java/org/apache/lucene/index/FieldInfos.java (revision 1068310) +++ src/java/org/apache/lucene/index/FieldInfos.java (working copy) @@ -35,7 +35,46 @@ * @lucene.experimental */ public final class FieldInfos { + private static final class FieldNumberBiMap { + private final static int FIELD_NOT_FOUND = -1; + private final ArrayList numberToName; + private final Map nameToNumber; + + private FieldNumberBiMap() { + this.nameToNumber = new HashMap(); + this.numberToName = new ArrayList(); + } + + synchronized int addOrGet(String fieldName) { + Integer number = nameToNumber.get(fieldName); + if (number == null) { + numberToName.add(fieldName); + number = numberToName.size(); + nameToNumber.put(fieldName, number); + } + + return number; + } + + synchronized String getFieldName(int fieldNumber) { + if (fieldNumber < numberToName.size()) { + return numberToName.get(fieldNumber); + } + + return null; + } + + synchronized int getFieldNumber(String fieldName) { + Integer number = nameToNumber.get(fieldName); + if (number == null) { + return FIELD_NOT_FOUND; + } + + return number; + } + } + // First used in 2.9; prior to 2.9 there was no format header public static final int FORMAT_START = -2; public static final int FORMAT_PER_FIELD_CODEC = -3; @@ -53,13 +92,19 @@ static final byte STORE_PAYLOADS = 0x20; static final byte OMIT_TERM_FREQ_AND_POSITIONS = 0x40; - private final ArrayList byNumber = new ArrayList(); + private final SortedMap byNumber = new TreeMap(); private final HashMap byName = new HashMap(); + private final FieldNumberBiMap globalFieldNumbers; private int format; public FieldInfos() { + this.globalFieldNumbers = new FieldNumberBiMap(); } + private FieldInfos(FieldNumberBiMap globalFieldNumbers) { + this.globalFieldNumbers = globalFieldNumbers; + } + /** * Construct a FieldInfos object using the directory and the name of the file * IndexInput @@ -68,6 +113,7 @@ * @throws IOException */ public FieldInfos(Directory d, String name) throws IOException { + this.globalFieldNumbers = new FieldNumberBiMap(); IndexInput input = d.openInput(name); try { read(input, name); @@ -76,16 +122,20 @@ } } + public FieldInfos newFieldInfosWithGlobalFieldNumberMap() { + return new FieldInfos(this.globalFieldNumbers); + } + /** * Returns a deep clone of this FieldInfos instance. */ @Override synchronized public Object clone() { - FieldInfos fis = new FieldInfos(); - final int numField = byNumber.size(); - for(int i=0;i it = getFieldInfoIterator(); + while (it.hasNext()) { + FieldInfo fi = (FieldInfo) (it.next()).clone(); + fis.byNumber.put(fi.number, fi); fis.byName.put(fi.name, fi); } return fis; @@ -102,9 +152,9 @@ /** Returns true if any fields do not omitTermFreqAndPositions */ public boolean hasProx() { - final int numFields = byNumber.size(); - for(int i=0;i it = getFieldInfoIterator(); + while (it.hasNext()) { + final FieldInfo fi = it.next(); if (fi.isIndexed && !fi.omitTermFreqAndPositions) { return true; } @@ -217,7 +267,9 @@ boolean omitNorms, boolean storePayloads, boolean omitTermFreqAndPositions) { FieldInfo fi = fieldInfo(name); if (fi == null) { - return addInternal(name, isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions); + // get a global number for this field + int fieldNumber = globalFieldNumbers.addOrGet(name); + return addInternal(name, fieldNumber, isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions); } else { fi.update(isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions); } @@ -231,13 +283,13 @@ fi.omitTermFreqAndPositions); } - private FieldInfo addInternal(String name, boolean isIndexed, + private FieldInfo addInternal(String name, int fieldNumber, boolean isIndexed, boolean storeTermVector, boolean storePositionWithTermVector, boolean storeOffsetWithTermVector, boolean omitNorms, boolean storePayloads, boolean omitTermFreqAndPositions) { name = StringHelper.intern(name); - FieldInfo fi = new FieldInfo(name, isIndexed, byNumber.size(), storeTermVector, storePositionWithTermVector, + FieldInfo fi = new FieldInfo(name, isIndexed, fieldNumber, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions); - byNumber.add(fi); + byNumber.put(fi.number, fi); byName.put(name, fi); return fi; } @@ -273,13 +325,18 @@ return (fieldNumber >= 0) ? byNumber.get(fieldNumber) : null; } + public Iterator getFieldInfoIterator() { + return byNumber.values().iterator(); + } + public int size() { return byNumber.size(); } public boolean hasVectors() { - for (int i = 0; i < size(); i++) { - if (fieldInfo(i).storeTermVector) { + Iterator it = getFieldInfoIterator(); + while (it.hasNext()) { + if (it.next().storeTermVector) { return true; } } @@ -287,8 +344,9 @@ } public boolean hasNorms() { - for (int i = 0; i < size(); i++) { - if (!fieldInfo(i).omitNorms) { + Iterator it = getFieldInfoIterator(); + while (it.hasNext()) { + if (!it.next().omitNorms) { return true; } } @@ -307,8 +365,9 @@ public void write(IndexOutput output) throws IOException { output.writeVInt(FORMAT_CURRENT); output.writeVInt(size()); - for (int i = 0; i < size(); i++) { - FieldInfo fi = fieldInfo(i); + Iterator it = getFieldInfoIterator(); + while (it.hasNext()) { + FieldInfo fi = it.next(); byte bits = 0x0; if (fi.isIndexed) bits |= IS_INDEXED; if (fi.storeTermVector) bits |= STORE_TERMVECTOR; @@ -318,6 +377,7 @@ if (fi.storePayloads) bits |= STORE_PAYLOADS; if (fi.omitTermFreqAndPositions) bits |= OMIT_TERM_FREQ_AND_POSITIONS; output.writeString(fi.name); + output.writeInt(fi.number); output.writeInt(fi.codecId); output.writeByte(bits); } @@ -338,6 +398,7 @@ for (int i = 0; i < size; i++) { String name = StringHelper.intern(input.readString()); // if this is a previous format codec 0 will be preflex! + final int fieldNumber = format <= FORMAT_PER_FIELD_CODEC? input.readInt():i; final int codecId = format <= FORMAT_PER_FIELD_CODEC? input.readInt():0; byte bits = input.readByte(); boolean isIndexed = (bits & IS_INDEXED) != 0; @@ -347,7 +408,7 @@ boolean omitNorms = (bits & OMIT_NORMS) != 0; boolean storePayloads = (bits & STORE_PAYLOADS) != 0; boolean omitTermFreqAndPositions = (bits & OMIT_TERM_FREQ_AND_POSITIONS) != 0; - final FieldInfo addInternal = addInternal(name, isIndexed, storeTermVector, storePositionsWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions); + final FieldInfo addInternal = addInternal(name, fieldNumber, isIndexed, storeTermVector, storePositionsWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions); addInternal.codecId = codecId; } Index: src/java/org/apache/lucene/index/FieldsWriter.java =================================================================== --- src/java/org/apache/lucene/index/FieldsWriter.java (revision 1068310) +++ src/java/org/apache/lucene/index/FieldsWriter.java (working copy) @@ -45,14 +45,12 @@ // If null - we were supplied with streams, if notnull - we manage them ourselves private Directory directory; private String segment; - private FieldInfos fieldInfos; private IndexOutput fieldsStream; private IndexOutput indexStream; - FieldsWriter(Directory directory, String segment, FieldInfos fn) throws IOException { + FieldsWriter(Directory directory, String segment) throws IOException { this.directory = directory; this.segment = segment; - fieldInfos = fn; boolean success = false; try { @@ -70,10 +68,9 @@ } } - FieldsWriter(IndexOutput fdx, IndexOutput fdt, FieldInfos fn) { + FieldsWriter(IndexOutput fdx, IndexOutput fdt) { directory = null; segment = null; - fieldInfos = fn; fieldsStream = fdt; indexStream = fdx; } @@ -166,7 +163,7 @@ assert fieldsStream.getFilePointer() == position; } - final void addDocument(Document doc) throws IOException { + final void addDocument(Document doc, FieldInfos fieldInfos) throws IOException { indexStream.writeLong(fieldsStream.getFilePointer()); int storedCount = 0; Index: src/java/org/apache/lucene/index/SegmentInfo.java =================================================================== --- src/java/org/apache/lucene/index/SegmentInfo.java (revision 1068310) +++ src/java/org/apache/lucene/index/SegmentInfo.java (working copy) @@ -17,22 +17,23 @@ * limitations under the License. */ -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.store.IndexInput; -import org.apache.lucene.util.Constants; -import org.apache.lucene.index.codecs.Codec; -import org.apache.lucene.index.codecs.CodecProvider; -import org.apache.lucene.index.codecs.DefaultSegmentInfosWriter; import java.io.IOException; -import java.util.Arrays; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Set; -import java.util.HashSet; -import java.util.HashMap; -import java.util.ArrayList; +import org.apache.lucene.index.codecs.Codec; +import org.apache.lucene.index.codecs.CodecProvider; +import org.apache.lucene.index.codecs.DefaultSegmentInfosWriter; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.util.Constants; + /** * Information about a segment such as it's name, directory, and files related * to the segment. @@ -62,7 +63,7 @@ * - NO says this field has no separate norms * >= YES says this field has separate norms with the specified generation */ - private long[] normGen; + private Map normGen; private boolean isCompoundFile; @@ -133,9 +134,11 @@ if (src.normGen == null) { normGen = null; } else { - normGen = new long[src.normGen.length]; - System.arraycopy(src.normGen, 0, normGen, 0, src.normGen.length); + normGen = new HashMap(src.normGen.size()); + for (Entry entry : src.normGen.entrySet()) { + normGen.put(entry.getKey(), entry.getValue()); } + } isCompoundFile = src.isCompoundFile; delCount = src.delCount; segmentCodecs = src.segmentCodecs; @@ -184,11 +187,16 @@ if (numNormGen == NO) { normGen = null; } else { - normGen = new long[numNormGen]; + normGen = new HashMap(); for(int j=0;j(diagnostics); if (normGen != null) { - si.normGen = normGen.clone(); + si.normGen = new HashMap(); + for (Entry entry : normGen.entrySet()) { + si.normGen.put(entry.getKey(), entry.getValue()); } + } si.hasVectors = hasVectors; si.version = version; return si; @@ -339,9 +350,14 @@ * @param fieldNumber the field index to check */ public boolean hasSeparateNorms(int fieldNumber) { - return normGen != null && normGen[fieldNumber] != NO; + if (normGen == null) { + return false; } + Long gen = normGen.get(fieldNumber); + return gen != null && gen.longValue() != NO; + } + /** * Returns true if any fields in this segment have separate norms. */ @@ -349,7 +365,7 @@ if (normGen == null) { return false; } else { - for (long fieldNormGen : normGen) { + for (long fieldNormGen : normGen.values()) { if (fieldNormGen >= YES) { return true; } @@ -359,10 +375,9 @@ return false; } - void initNormGen(int numFields) { + void initNormGen() { if (normGen == null) { // normGen is null if this segments file hasn't had any norms set against it yet - normGen = new long[numFields]; - Arrays.fill(normGen, NO); + normGen = new HashMap(); } } @@ -373,10 +388,11 @@ * @param fieldIndex field whose norm file will be rewritten */ void advanceNormGen(int fieldIndex) { - if (normGen[fieldIndex] == NO) { - normGen[fieldIndex] = YES; + Long gen = normGen.get(fieldIndex); + if (gen == null || gen.longValue() == NO) { + normGen.put(fieldIndex, new Long(YES)); } else { - normGen[fieldIndex]++; + normGen.put(fieldIndex, gen+1); } clearFiles(); } @@ -388,7 +404,7 @@ */ public String getNormFileName(int number) { if (hasSeparateNorms(number)) { - return IndexFileNames.fileNameFromGeneration(name, "s" + number, normGen[number]); + return IndexFileNames.fileNameFromGeneration(name, "s" + number, normGen.get(number)); } else { // single file for all norms return IndexFileNames.fileNameFromGeneration(name, IndexFileNames.NORMS_EXTENSION, WITHOUT_GEN); @@ -474,9 +490,10 @@ if (normGen == null) { output.writeInt(NO); } else { - output.writeInt(normGen.length); - for (long fieldNormGen : normGen) { - output.writeLong(fieldNormGen); + output.writeInt(normGen.size()); + for (Entry entry : normGen.entrySet()) { + output.writeInt(entry.getKey()); + output.writeLong(entry.getValue()); } } @@ -572,11 +589,11 @@ } if (normGen != null) { - for (int i = 0; i < normGen.length; i++) { - long gen = normGen[i]; + for (Entry entry : normGen.entrySet()) { + long gen = entry.getValue(); if (gen >= YES) { // Definitely a separate norm file, with generation: - fileSet.add(IndexFileNames.fileNameFromGeneration(name, IndexFileNames.SEPARATE_NORMS_EXTENSION + i, gen)); + fileSet.add(IndexFileNames.fileNameFromGeneration(name, IndexFileNames.SEPARATE_NORMS_EXTENSION + entry.getKey(), gen)); } } } Index: src/java/org/apache/lucene/index/InvertedDocConsumer.java =================================================================== --- src/java/org/apache/lucene/index/InvertedDocConsumer.java (revision 1068310) +++ src/java/org/apache/lucene/index/InvertedDocConsumer.java (working copy) @@ -35,10 +35,4 @@ /** Attempt to free RAM, returning true if any RAM was * freed */ abstract boolean freeRAM(); - - FieldInfos fieldInfos; - - void setFieldInfos(FieldInfos fieldInfos) { - this.fieldInfos = fieldInfos; } -} Index: src/java/org/apache/lucene/index/DocFieldConsumer.java =================================================================== --- src/java/org/apache/lucene/index/DocFieldConsumer.java (revision 1068310) +++ src/java/org/apache/lucene/index/DocFieldConsumer.java (working copy) @@ -22,9 +22,6 @@ import java.util.Map; abstract class DocFieldConsumer { - - FieldInfos fieldInfos; - /** Called when DocumentsWriter decides to create a new * segment */ abstract void flush(Map> threadsAndFields, SegmentWriteState state) throws IOException; @@ -39,8 +36,4 @@ * The consumer should free RAM, if possible, returning * true if any RAM was in fact freed. */ abstract boolean freeRAM(); - - void setFieldInfos(FieldInfos fieldInfos) { - this.fieldInfos = fieldInfos; } -} Index: src/java/org/apache/lucene/index/SegmentReader.java =================================================================== --- src/java/org/apache/lucene/index/SegmentReader.java (revision 1068310) +++ src/java/org/apache/lucene/index/SegmentReader.java (working copy) @@ -22,6 +22,7 @@ import java.util.Collection; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; @@ -598,12 +599,13 @@ && (!si.hasDeletions() || this.si.getDelFileName().equals(si.getDelFileName())); boolean normsUpToDate = true; - boolean[] fieldNormsChanged = new boolean[core.fieldInfos.size()]; - final int fieldCount = core.fieldInfos.size(); - for (int i = 0; i < fieldCount; i++) { - if (!this.si.getNormFileName(i).equals(si.getNormFileName(i))) { + Set fieldNormsChanged = new HashSet(); + Iterator it = core.fieldInfos.getFieldInfoIterator(); + while(it.hasNext()) { + int fieldNumber = it.next().number; + if (!this.si.getNormFileName(fieldNumber).equals(si.getNormFileName(fieldNumber))) { normsUpToDate = false; - fieldNormsChanged[i] = true; + fieldNormsChanged.add(fieldNumber); } } @@ -659,11 +661,12 @@ clone.norms = new HashMap(); // Clone norms - for (int i = 0; i < fieldNormsChanged.length; i++) { - + it = core.fieldInfos.getFieldInfoIterator(); + while(it.hasNext()) { + FieldInfo info = it.next(); // Clone unchanged norms to the cloned reader - if (doClone || !fieldNormsChanged[i]) { - final String curField = core.fieldInfos.fieldInfo(i).name; + if (doClone || !fieldNormsChanged.contains(info.number)) { + final String curField = info.name; Norm norm = this.norms.get(curField); if (norm != null) clone.norms.put(curField, (Norm) norm.clone()); @@ -735,7 +738,7 @@ } if (normsDirty) { // re-write norms - si.initNormGen(core.fieldInfos.size()); + si.initNormGen(); for (final Norm norm : norms.values()) { if (norm.dirty) { norm.reWrite(si); @@ -880,8 +883,9 @@ ensureOpen(); Set fieldSet = new HashSet(); - for (int i = 0; i < core.fieldInfos.size(); i++) { - FieldInfo fi = core.fieldInfos.fieldInfo(i); + Iterator it = core.fieldInfos.getFieldInfoIterator(); + while(it.hasNext()) { + FieldInfo fi = it.next(); if (fieldOption == IndexReader.FieldOption.ALL) { fieldSet.add(fi.name); } @@ -959,8 +963,10 @@ private void openNorms(Directory cfsDir, int readBufferSize) throws IOException { long nextNormSeek = SegmentMerger.NORMS_HEADER.length; //skip header (header unused for now) int maxDoc = maxDoc(); - for (int i = 0; i < core.fieldInfos.size(); i++) { - FieldInfo fi = core.fieldInfos.fieldInfo(i); + Iterator it = core.fieldInfos.getFieldInfoIterator(); + while(it.hasNext()) { + + FieldInfo fi = it.next(); if (norms.containsKey(fi.name)) { // in case this SegmentReader is being re-opened, we might be able to // reuse some norm instances and skip loading them here Index: src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java =================================================================== --- src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java (revision 1068310) +++ src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java (working copy) @@ -41,14 +41,13 @@ float docBoost; int fieldGen; final DocFieldProcessor docFieldProcessor; - final FieldInfos fieldInfos; final DocFieldConsumerPerThread consumer; // Holds all fields seen in current doc DocFieldProcessorPerField[] fields = new DocFieldProcessorPerField[1]; int fieldCount; - // Hash table for all fields ever seen + // Hash table for all fields seen in current segment DocFieldProcessorPerField[] fieldHash = new DocFieldProcessorPerField[2]; int hashMask = 1; int totalFieldCount; @@ -60,7 +59,6 @@ public DocFieldProcessorPerThread(DocumentsWriterThreadState threadState, DocFieldProcessor docFieldProcessor) throws IOException { this.docState = threadState.docState; this.docFieldProcessor = docFieldProcessor; - this.fieldInfos = docFieldProcessor.fieldInfos; this.consumer = docFieldProcessor.consumer.addThread(this); fieldsWriter = docFieldProcessor.fieldsWriter.addThread(docState); } @@ -75,6 +73,7 @@ field = next; } } + doAfterFlush(); fieldsWriter.abort(); consumer.abort(); } @@ -92,45 +91,15 @@ return fields; } - /** If there are fields we've seen but did not see again - * in the last run, then free them up. */ - - void trimFields(SegmentWriteState state) { - - for(int i=0;i it = fieldInfos.getFieldInfoIterator(); + while(it.hasNext()) { + final FieldInfo fi = it.next(); if (fi.isIndexed && !fi.omitNorms) { if (output == null) { output = directory.createOutput(IndexFileNames.segmentFileName(segment, "", IndexFileNames.NORMS_EXTENSION)); Index: src/java/org/apache/lucene/index/DocFieldProcessor.java =================================================================== --- src/java/org/apache/lucene/index/DocFieldProcessor.java (revision 1068310) +++ src/java/org/apache/lucene/index/DocFieldProcessor.java (working copy) @@ -34,16 +34,13 @@ final class DocFieldProcessor extends DocConsumer { final DocumentsWriter docWriter; - final FieldInfos fieldInfos; final DocFieldConsumer consumer; final StoredFieldsWriter fieldsWriter; public DocFieldProcessor(DocumentsWriter docWriter, DocFieldConsumer consumer) { this.docWriter = docWriter; this.consumer = consumer; - fieldInfos = docWriter.getFieldInfos(); - consumer.setFieldInfos(fieldInfos); - fieldsWriter = new StoredFieldsWriter(docWriter, fieldInfos); + fieldsWriter = new StoredFieldsWriter(docWriter); } @Override @@ -53,7 +50,6 @@ for ( DocConsumerPerThread thread : threads) { DocFieldProcessorPerThread perThread = (DocFieldProcessorPerThread) thread; childThreadsAndFields.put(perThread.consumer, perThread.fields()); - perThread.trimFields(state); } fieldsWriter.flush(state); consumer.flush(childThreadsAndFields, state); @@ -63,7 +59,7 @@ // FreqProxTermsWriter does this with // FieldInfo.storePayload. final String fileName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.FIELD_INFOS_EXTENSION); - fieldInfos.write(state.directory, fileName); + state.fieldInfos.write(state.directory, fileName); } @Override Index: src/java/org/apache/lucene/index/SegmentCodecs.java =================================================================== --- src/java/org/apache/lucene/index/SegmentCodecs.java (revision 1068310) +++ src/java/org/apache/lucene/index/SegmentCodecs.java (working copy) @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.IdentityHashMap; +import java.util.Iterator; import java.util.Map; import java.util.Set; @@ -74,12 +75,12 @@ } static SegmentCodecs build(FieldInfos infos, CodecProvider provider) { - final int size = infos.size(); final Map codecRegistry = new IdentityHashMap(); final ArrayList codecs = new ArrayList(); - for (int i = 0; i < size; i++) { - final FieldInfo info = infos.fieldInfo(i); + Iterator it = infos.getFieldInfoIterator(); + while(it.hasNext()) { + final FieldInfo info = it.next(); if (info.isIndexed) { final Codec fieldCodec = provider.lookup(provider .getFieldCodec(info.name)); Index: src/java/org/apache/lucene/index/PerFieldCodecWrapper.java =================================================================== --- src/java/org/apache/lucene/index/PerFieldCodecWrapper.java (revision 1068310) +++ src/java/org/apache/lucene/index/PerFieldCodecWrapper.java (working copy) @@ -100,12 +100,12 @@ public FieldsReader(Directory dir, FieldInfos fieldInfos, SegmentInfo si, int readBufferSize, int indexDivisor) throws IOException { - final int fieldCount = fieldInfos.size(); final Map producers = new HashMap(); boolean success = false; try { - for (int i = 0; i < fieldCount; i++) { - FieldInfo fi = fieldInfos.fieldInfo(i); + Iterator it = fieldInfos.getFieldInfoIterator(); + while(it.hasNext()) { + FieldInfo fi = it.next(); if (fi.isIndexed) { // TODO this does not work for non-indexed fields fields.add(fi.name); Codec codec = segmentCodecs.codecs[fi.codecId]; Index: src/java/org/apache/lucene/index/DocumentsWriter.java =================================================================== --- src/java/org/apache/lucene/index/DocumentsWriter.java (revision 1068310) +++ src/java/org/apache/lucene/index/DocumentsWriter.java (working copy) @@ -279,12 +279,13 @@ private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS; private boolean closed; - private final FieldInfos fieldInfos; + private FieldInfos fieldInfos; private final BufferedDeletesStream bufferedDeletesStream; private final IndexWriter.FlushControl flushControl; - DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates, FieldInfos fieldInfos, BufferedDeletesStream bufferedDeletesStream) throws IOException { + DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates, FieldInfos fieldInfos, + BufferedDeletesStream bufferedDeletesStream) throws IOException { this.directory = directory; this.writer = writer; this.similarityProvider = writer.getConfig().getSimilarityProvider(); @@ -350,10 +351,6 @@ return doFlush; } - public FieldInfos getFieldInfos() { - return fieldInfos; - } - /** If non-null, various details of indexing are printed * here. */ synchronized void setInfoStream(PrintStream infoStream) { @@ -482,9 +479,14 @@ private void doAfterFlush() throws IOException { // All ThreadStates should be idle when we are called assert allThreadsIdle(); + for (DocumentsWriterThreadState threadState : threadStates) { + threadState.consumer.doAfterFlush(); + } + threadBindings.clear(); waitQueue.reset(); segment = null; + fieldInfos = fieldInfos.newFieldInfosWithGlobalFieldNumberMap(); numDocs = 0; nextDocID = 0; bufferIsFull = false; @@ -788,7 +790,7 @@ // work final DocWriter perDoc; try { - perDoc = state.consumer.processDocument(); + perDoc = state.consumer.processDocument(fieldInfos); } finally { docState.clear(); } Index: src/java/org/apache/lucene/index/IndexWriter.java =================================================================== --- src/java/org/apache/lucene/index/IndexWriter.java (revision 1068310) +++ src/java/org/apache/lucene/index/IndexWriter.java (working copy) @@ -221,6 +221,7 @@ volatile long pendingCommitChangeCount; final SegmentInfos segmentInfos; // the segments + final FieldInfos fieldInfos; private DocumentsWriter docWriter; private IndexFileDeleter deleter; @@ -777,7 +778,10 @@ setRollbackSegmentInfos(segmentInfos); - docWriter = new DocumentsWriter(directory, this, conf.getIndexingChain(), conf.getMaxThreadStates(), getCurrentFieldInfos(), bufferedDeletesStream); + // start with previous field numbers, but new FieldInfos + fieldInfos = getCurrentFieldInfos(); + docWriter = new DocumentsWriter(directory, this, conf.getIndexingChain(), conf.getMaxThreadStates(), + fieldInfos.newFieldInfosWithGlobalFieldNumberMap(), bufferedDeletesStream); docWriter.setInfoStream(infoStream); // Default deleter (for backwards compatibility) is @@ -846,9 +850,9 @@ fieldInfos = new FieldInfos(); for(SegmentInfo info : segmentInfos) { final FieldInfos segFieldInfos = getFieldInfos(info); - final int fieldCount = segFieldInfos.size(); - for(int fieldNumber=0;fieldNumber it = segFieldInfos.getFieldInfoIterator(); + while(it.hasNext()) { + fieldInfos.add(it.next()); } } } else { @@ -2258,7 +2262,7 @@ String mergedName = newSegmentName(); SegmentMerger merger = new SegmentMerger(directory, termIndexInterval, mergedName, null, codecs, payloadProcessorProvider, - ((FieldInfos) docWriter.getFieldInfos().clone())); + fieldInfos.newFieldInfosWithGlobalFieldNumberMap()); for (IndexReader reader : readers) // add new indexes merger.add(reader); @@ -3112,13 +3116,12 @@ SegmentMerger merger = new SegmentMerger(directory, termIndexInterval, mergedName, merge, codecs, payloadProcessorProvider, - ((FieldInfos) docWriter.getFieldInfos().clone())); + fieldInfos.newFieldInfosWithGlobalFieldNumberMap()); if (infoStream != null) { message("merging " + merge.segString(directory) + " mergeVectors=" + merger.fieldInfos().hasVectors()); } - merge.info.setHasVectors(merger.fieldInfos().hasVectors()); merge.readers = new SegmentReader[numSegments]; merge.readersClone = new SegmentReader[numSegments]; @@ -3171,6 +3174,7 @@ // this segment: //System.out.println("merger set hasProx=" + merger.hasProx() + " seg=" + merge.info.name); merge.info.setHasProx(merger.fieldInfos().hasProx()); + merge.info.setHasVectors(merger.fieldInfos().hasVectors()); boolean useCompoundFile; synchronized (this) { // Guard segmentInfos Index: src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java =================================================================== --- src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java (revision 1068310) +++ src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java (working copy) @@ -94,9 +94,9 @@ // so that if an index update removes them we'll still have them freqStream = dir.openInput(info.name + ".frq", readBufferSize); boolean anyProx = false; - final int numFields = fieldInfos.size(); - for(int i=0;i it = fieldInfos.getFieldInfoIterator(); + while(it.hasNext()) { + final FieldInfo fieldInfo = it.next(); if (fieldInfo.isIndexed) { fields.put(fieldInfo.name, fieldInfo); preTerms.put(fieldInfo.name, new PreTerms(fieldInfo)); Index: src/java/org/apache/lucene/index/DocInverter.java =================================================================== --- src/java/org/apache/lucene/index/DocInverter.java (revision 1068310) +++ src/java/org/apache/lucene/index/DocInverter.java (working copy) @@ -40,13 +40,6 @@ } @Override - void setFieldInfos(FieldInfos fieldInfos) { - super.setFieldInfos(fieldInfos); - consumer.setFieldInfos(fieldInfos); - endConsumer.setFieldInfos(fieldInfos); - } - - @Override void flush(Map> threadsAndFields, SegmentWriteState state) throws IOException { Map> childThreadsAndFields = new HashMap>(); Index: src/java/org/apache/lucene/index/NormsWriter.java =================================================================== --- src/java/org/apache/lucene/index/NormsWriter.java (revision 1068310) +++ src/java/org/apache/lucene/index/NormsWriter.java (working copy) @@ -36,7 +36,6 @@ final class NormsWriter extends InvertedDocEndConsumer { - private FieldInfos fieldInfos; @Override public InvertedDocEndConsumerPerThread addThread(DocInverterPerThread docInverterPerThread) { return new NormsWriterPerThread(docInverterPerThread, this); @@ -48,11 +47,6 @@ // We only write the _X.nrm file at flush void files(Collection files) {} - @Override - void setFieldInfos(FieldInfos fieldInfos) { - this.fieldInfos = fieldInfos; - } - /** Produce _X.nrm if any document had a field with norms * not disabled */ @Override @@ -60,7 +54,7 @@ final Map> byField = new HashMap>(); - if (!fieldInfos.hasNorms()) { + if (!state.fieldInfos.hasNorms()) { return; } @@ -96,13 +90,12 @@ try { normsOut.writeBytes(SegmentMerger.NORMS_HEADER, 0, SegmentMerger.NORMS_HEADER.length); - final int numField = fieldInfos.size(); - int normCount = 0; - for(int fieldNumber=0;fieldNumber it = state.fieldInfos.getFieldInfoIterator(); + while(it.hasNext()) { - final FieldInfo fieldInfo = fieldInfos.fieldInfo(fieldNumber); + final FieldInfo fieldInfo = it.next(); List toMerge = byField.get(fieldInfo); int upto = 0;