Index: lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/CompressingCodec.java
===================================================================
--- lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/CompressingCodec.java	(révision 1423902)
+++ lucene/test-framework/src/java/org/apache/lucene/codecs/compressing/CompressingCodec.java	(copie de travail)
@@ -21,6 +21,7 @@
 
 import org.apache.lucene.codecs.FilterCodec;
 import org.apache.lucene.codecs.StoredFieldsFormat;
+import org.apache.lucene.codecs.TermVectorsFormat;
 import org.apache.lucene.codecs.lucene41.Lucene41Codec;
 
 import com.carrotsearch.randomizedtesting.generators.RandomInts;
@@ -65,6 +66,7 @@
   }
 
   private final CompressingStoredFieldsFormat storedFieldsFormat;
+  private final CompressingTermVectorsFormat termVectorsFormat;
 
   /**
    * Creates a compressing codec with a given segment suffix
@@ -72,6 +74,7 @@
   public CompressingCodec(String name, String segmentSuffix, CompressionMode compressionMode, int chunkSize) {
     super(name, new Lucene41Codec());
     this.storedFieldsFormat = new CompressingStoredFieldsFormat(name, segmentSuffix, compressionMode, chunkSize);
+    this.termVectorsFormat = new CompressingTermVectorsFormat(name, compressionMode, chunkSize);
   }
   
   /**
@@ -87,7 +90,12 @@
   }
 
   @Override
+  public TermVectorsFormat termVectorsFormat() {
+    return termVectorsFormat;
+  }
+
+  @Override
   public String toString() {
-    return getName() + "(storedFieldsFormat=" + storedFieldsFormat + ")";
+    return getName() + "(storedFieldsFormat=" + storedFieldsFormat + ", termVectorsFormat=" + termVectorsFormat + ")";
   }
 }
Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java	(révision 0)
+++ lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java	(copie de travail)
@@ -0,0 +1,766 @@
+package org.apache.lucene.codecs.compressing;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.List;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.codecs.TermVectorsWriter;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.SegmentInfo;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.packed.PackedInts;
+
+final class CompressingTermVectorsWriter extends TermVectorsWriter {
+
+  static final String VECTORS_EXTENSION = "tvd";
+  static final String VECTORS_INDEX_EXTENSION = "tvx";
+
+  static final String CODEC_SFX_IDX = "Index";
+  static final String CODEC_SFX_DAT = "Data";
+
+  static final int VERSION_START = 0;
+  static final int VERSION_CURRENT = VERSION_START;
+
+  static final int POSITIONS = 0x01;
+  static final int   OFFSETS = 0x02;
+  static final int  PAYLOADS = 0x04;
+  static final int FLAGS_MASK = POSITIONS | OFFSETS | PAYLOADS;
+  static final int FLAGS_BITS = PackedInts.bitsRequired(FLAGS_MASK);
+
+  static long zigZagEncode(long l) {
+    return (l >> 63) ^ (l << 1);
+  }
+  
+  private final Directory directory;
+  private final String segment;
+  private CompressingStoredFieldsIndexWriter indexWriter;
+  private IndexOutput vectorsStream;
+
+  private final CompressionMode compressionMode;
+  private final Compressor compressor;
+  private final int chunkSize;
+
+  private static class DocData {
+    final int numFields;
+    final List<FieldData> fields;
+    DocData(int numFields) {
+      this.numFields = numFields;
+      this.fields = new ArrayList<FieldData>(numFields);
+    }
+    FieldData addField(int fieldNum, int flags, int numTerms) {
+      FieldData field = new FieldData(fieldNum, flags, numTerms);
+      fields.add(field);
+      return field;
+    }
+  }
+
+  private static class FieldData {
+    final int fieldNum, flags, numTerms;
+    final List<TermData> terms;
+    FieldData(int fieldNum, int flags, int numTerms) {
+      this.fieldNum = fieldNum;
+      this.flags = flags;
+      this.numTerms = numTerms;
+      terms = new ArrayList<TermData>(numTerms);
+    }
+    TermData addTerm(int length, int freq) {
+      TermData term = new TermData(length, freq, flags != 0);
+      terms.add(term);
+      return term;
+    }
+  }
+
+  private static class TermData {
+    final int length, freq;
+    final List<PositionData> positions;
+    TermData(int length, int freq, boolean hasPositions) {
+      this.length = length;
+      this.freq = freq;
+      this.positions = hasPositions ? new ArrayList<PositionData>(freq) : null;
+    }
+    PositionData addPosition(int position, int startOffset, int endOffset, int payloadLength) {
+      PositionData pd = new PositionData(position, startOffset, endOffset, payloadLength);
+      positions.add(pd);
+      return pd;
+    }
+  }
+
+  private static class PositionData {
+    final int position, startOffset, endOffset, payloadLength;
+    PositionData(int position, int startOffset, int endOffset, int payloadLength) {
+      super();
+      this.position = position;
+      this.startOffset = startOffset;
+      this.endOffset = endOffset;
+      this.payloadLength = payloadLength;
+    }
+  }
+
+  private int numDocs; // total number of docs seen
+  private final List<DocData> pendingDocs; // pending docs
+  private DocData curDoc; // current document
+  private FieldData curField; // current field
+  private TermData curTerm; // current term
+  private final GrowableByteArrayDataOutput bufferedTerms; // buffered terms and payloads to compress
+
+  CompressingTermVectorsWriter(Directory directory, SegmentInfo si, IOContext context,
+      String formatName, CompressionMode compressionMode, int chunkSize) throws IOException {
+    assert directory != null;
+    this.directory = directory;
+    this.segment = si.name;
+    this.compressionMode = compressionMode;
+    this.compressor = compressionMode.newCompressor();
+    this.chunkSize = chunkSize;
+
+    numDocs = 0;
+    pendingDocs = new ArrayList<DocData>();
+    bufferedTerms = new GrowableByteArrayDataOutput(ArrayUtil.oversize(chunkSize, 1));
+
+    boolean success = false;
+    IndexOutput indexStream = directory.createOutput(IndexFileNames.segmentFileName(segment, "", VECTORS_INDEX_EXTENSION), context);
+    try {
+      vectorsStream = directory.createOutput(IndexFileNames.segmentFileName(segment, "", VECTORS_EXTENSION), context);
+
+      final String codecNameIdx = formatName + CODEC_SFX_IDX;
+      final String codecNameDat = formatName + CODEC_SFX_DAT;
+      CodecUtil.writeHeader(indexStream, codecNameIdx, VERSION_CURRENT);
+      CodecUtil.writeHeader(vectorsStream, codecNameDat, VERSION_CURRENT);
+      assert CodecUtil.headerLength(codecNameDat) == vectorsStream.getFilePointer();
+      assert CodecUtil.headerLength(codecNameIdx) == indexStream.getFilePointer();
+
+      indexWriter = new CompressingStoredFieldsIndexWriter(indexStream);
+      indexStream = null;
+
+      vectorsStream.writeVInt(PackedInts.VERSION_CURRENT);
+
+      success = true;
+    } finally {
+      if (!success) {
+        IOUtils.closeWhileHandlingException(indexStream);
+        abort();
+      }
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    try {
+      IOUtils.close(vectorsStream, indexWriter);
+    } finally {
+      vectorsStream = null;
+      indexWriter = null;
+    }
+  }
+
+  @Override
+  public void abort() {
+    IOUtils.closeWhileHandlingException(this);
+    IOUtils.deleteFilesIgnoringExceptions(directory,
+        IndexFileNames.segmentFileName(segment, "", VECTORS_EXTENSION),
+        IndexFileNames.segmentFileName(segment, "", VECTORS_INDEX_EXTENSION));
+  }
+
+  @Override
+  public void startDocument(int numVectorFields) throws IOException {
+    assert curField == null || curDoc.numFields == curDoc.fields.size();
+    assert curTerm == null || curField.flags == 0 || curTerm.positions.size() == curTerm.freq;
+
+    if (shouldFlush()) {
+      flush();
+    }
+    ++numDocs;
+    curDoc = new DocData(numVectorFields);
+    pendingDocs.add(curDoc);
+    curField = null;
+    curTerm = null;
+  }
+
+  @Override
+  public void startField(FieldInfo info, int numTerms, boolean positions,
+      boolean offsets, boolean payloads) throws IOException {
+    assert curField == null || curField.numTerms == curField.terms.size();
+    assert curTerm == null || curField.flags == 0 || curTerm.positions.size() == curTerm.freq;
+    curField = curDoc.addField(info.number, (positions ? POSITIONS : 0) | (offsets ? OFFSETS : 0) | (payloads ? PAYLOADS : 0), numTerms);
+    curTerm = null;
+  }
+
+  @Override
+  public void startTerm(BytesRef term, int freq) throws IOException {
+    assert freq >= 1;
+    assert curTerm == null || curField.flags == 0 || curTerm.positions.size() == curTerm.freq;
+    curTerm = curField.addTerm(term.length, freq);
+    bufferedTerms.writeBytes(term.bytes, term.offset, term.length);
+  }
+
+  @Override
+  public void addPosition(int position, int startOffset, int endOffset,
+      BytesRef payload) throws IOException {
+    assert curField.flags != 0;
+    curTerm.addPosition(position, startOffset, endOffset, payload == null ? 0 : payload.length);
+    if (payload != null) {
+      bufferedTerms.writeBytes(payload.bytes, payload.offset, payload.length);
+    }
+  }
+
+  private boolean shouldFlush() {
+    return bufferedTerms.length >= chunkSize || pendingDocs.size() >= chunkSize;
+  }
+
+  private void flush() throws IOException {
+    final int chunkDocs = pendingDocs.size();
+    assert chunkDocs > 0 : chunkDocs;
+    assert expectedBufferLength() == bufferedTerms.length;
+
+    // write the index file
+    indexWriter.writeIndex(chunkDocs, vectorsStream.getFilePointer());
+
+    final int docBase = numDocs - chunkDocs;
+    vectorsStream.writeVInt(docBase);
+    vectorsStream.writeVInt(chunkDocs);
+    
+    int totalFields = flushNumFields(chunkDocs);
+
+    if (totalFields > 0) {
+      // metadata
+      final int[] fieldNums = flushFieldNums();
+      flushFields(totalFields, fieldNums);
+      flushFlags(totalFields, fieldNums);
+      final int totalTerms = flushNumTerms(totalFields);
+      flushTermLengths(totalTerms);
+      flushTermFreqs(totalTerms);
+      flushPositions();
+      flushOffsets(fieldNums);
+      flushPayloadLengths();
+
+      // compress terms and payloads and write them to the output
+      compressor.compress(bufferedTerms.bytes, 0, bufferedTerms.length, vectorsStream);
+    }
+
+    // reset
+    pendingDocs.clear();
+    curDoc = null;
+    curField = null;
+    curTerm = null;
+    bufferedTerms.length = 0;
+  }
+
+  private int flushNumFields(int chunkDocs) throws IOException {
+    if (chunkDocs == 1) {
+      final int numFields = pendingDocs.get(0).numFields;
+      vectorsStream.writeVInt(numFields);
+      return numFields;
+    } else {
+      boolean allEqual = true;
+      int maxNumField = 0;
+      int totalFields = 0;
+      for (DocData dd : pendingDocs) {
+        if (dd.numFields != pendingDocs.get(0).numFields) {
+          allEqual = false;
+        }
+        maxNumField |= dd.numFields;
+        totalFields += dd.numFields;
+      }
+      if (allEqual) {
+        vectorsStream.writeVInt(0);
+        vectorsStream.writeVInt(maxNumField);
+      } else {
+        final int bitsRequired = PackedInts.bitsRequired(maxNumField);
+        vectorsStream.writeVInt(bitsRequired);
+        final PackedInts.Writer writer = PackedInts.getWriterNoHeader(
+            vectorsStream, PackedInts.Format.PACKED, chunkDocs, bitsRequired, 1);
+        for (DocData dd : pendingDocs) {
+          writer.add(dd.numFields);
+        }
+        assert writer.ord() == chunkDocs - 1;
+        writer.finish();
+      }
+      return totalFields;
+    }
+  }
+
+  private int[] flushFieldNums() throws IOException {
+    SortedSet<Integer> fieldNums = new TreeSet<Integer>();
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        fieldNums.add(fd.fieldNum);
+      }
+    }
+
+    final int numDistinctFields = fieldNums.size();
+    if (numDistinctFields == 0) {
+      vectorsStream.writeVInt(0);
+    } else {
+      final int bitsRequired = PackedInts.bitsRequired(fieldNums.last());
+      final int token = (Math.min(numDistinctFields, 0x07) << 5) | bitsRequired;
+      vectorsStream.writeByte((byte) token);
+      if (numDistinctFields >= 0x07) {
+        vectorsStream.writeVInt(numDistinctFields - 0x07);
+      }
+      final PackedInts.Writer writer = PackedInts.getWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, fieldNums.size(), bitsRequired, 1);
+      for (Integer fieldNum : fieldNums) {
+        writer.add(fieldNum);
+      }
+      writer.finish();
+    }
+
+    int[] fns = new int[fieldNums.size()];
+    int i = 0;
+    for (Integer key : fieldNums) {
+      fns[i++] = key;
+    }
+    return fns;
+  }
+
+  private void flushFields(int totalFields, int[] fieldNums) throws IOException {
+    final PackedInts.Writer writer = PackedInts.getWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, totalFields, PackedInts.bitsRequired(fieldNums.length - 1), 1);
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        final int fieldNumIndex = Arrays.binarySearch(fieldNums, fd.fieldNum);
+        assert fieldNumIndex >= 0;
+        writer.add(fieldNumIndex);
+      }
+    }
+    writer.finish();
+  }
+
+  private void flushFlags(int totalFields, int[] fieldNums) throws IOException {
+    // check if fields always have the same flags
+    boolean nonChangingFlags = true;
+    int[] fieldFlags = new int[fieldNums.length];
+    Arrays.fill(fieldFlags, -1);
+    outer:
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        final int fieldNumOff = Arrays.binarySearch(fieldNums, fd.fieldNum);
+        assert fieldNumOff >= 0;
+        if (fieldFlags[fieldNumOff] == -1) {
+          fieldFlags[fieldNumOff] = fd.flags;
+        } else if (fieldFlags[fieldNumOff] != fd.flags) {
+          nonChangingFlags = false;
+          break outer;
+        }
+      }
+    }
+    
+    if (nonChangingFlags) {
+      // write one flag per field num
+      vectorsStream.writeVInt(0);
+      final PackedInts.Writer writer = PackedInts.getWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, fieldFlags.length, FLAGS_BITS, 1);
+      for (int flags : fieldFlags) {
+        assert flags >= 0;
+        writer.add(flags);
+      }
+      assert writer.ord() == fieldFlags.length - 1;
+      writer.finish();
+    } else {
+      // write one flag for every field instance
+      vectorsStream.writeVInt(1);
+      final PackedInts.Writer writer = PackedInts.getWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, totalFields, FLAGS_BITS, 1);
+      for (DocData dd : pendingDocs) {
+        for (FieldData fd : dd.fields) {
+          writer.add(fd.flags);
+        }
+      }
+      assert writer.ord() == totalFields - 1;
+      writer.finish();
+    }
+  }
+
+  private int flushNumTerms(int totalFields) throws IOException {
+    int maxNumTerms = 0;
+    int totalNumTerms = 0;
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        maxNumTerms |= fd.numTerms;
+        totalNumTerms += fd.numTerms;
+      }
+    }
+    final int bitsRequired = PackedInts.bitsRequired(maxNumTerms);
+    vectorsStream.writeVInt(bitsRequired);
+    final PackedInts.Writer writer = PackedInts.getWriterNoHeader(
+        vectorsStream, PackedInts.Format.PACKED, totalFields, bitsRequired, 1);
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        writer.add(fd.numTerms);
+      }
+    }
+    assert writer.ord() == totalFields - 1;
+    writer.finish();
+    return totalNumTerms;
+  }
+
+  private int flushTermLengths(int totalTerms) throws IOException {
+    int maxTermLength = 0;
+    int totalTermLength = 0;
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        for (TermData td : fd.terms) {
+          maxTermLength |= td.length;
+          totalTermLength += td.length;
+        }
+      }
+    }
+    final int bitsRequired = PackedInts.bitsRequired(maxTermLength);
+    vectorsStream.writeVInt(bitsRequired);
+    final PackedInts.Writer writer = PackedInts.getWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, totalTerms, bitsRequired, 1);
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        for (TermData td : fd.terms) {
+          writer.add(td.length);
+        }
+      }
+    }
+    assert writer.ord() == totalTerms - 1;
+    writer.finish();
+    return totalTermLength;
+  }
+
+  private long estimateByteCountTermFreqs(int totalTerms, int bpv) {
+    // estimate the space required to flush term freqs using only
+    // bpv bits per value
+    assert bpv >= 1 && bpv < 32;
+    final int maxValue = (int) PackedInts.maxValue(bpv);
+
+    int patchSize = 0;
+    int maxDiff = 0;
+
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        for (TermData td : fd.terms) {
+          if (td.freq - 1 >= maxValue) {
+            patchSize += 1;
+            maxDiff = Math.max(maxDiff, td.freq - 1 - maxValue);
+          }
+        }
+      }
+    }
+
+    long result = PackedInts.Format.PACKED.byteCount(PackedInts.VERSION_CURRENT, totalTerms, bpv);
+    if (maxDiff > 0) {
+      result += 1 + PackedInts.Format.PACKED.byteCount(PackedInts.VERSION_CURRENT, patchSize, PackedInts.bitsRequired(maxDiff));
+    }
+    return result;
+  }
+
+  private void flushTermFreqs(int totalTerms) throws IOException {
+    int maxFreq = 0;
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        for (TermData td : fd.terms) {
+          maxFreq |= (td.freq - 1);
+        }
+      }
+    }
+    final int maxBitsRequired = PackedInts.bitsRequired(maxFreq);
+    int bitsRequired = maxBitsRequired;
+    long expectedSize = PackedInts.Format.PACKED.byteCount(PackedInts.VERSION_CURRENT, totalTerms, bitsRequired);
+    // can we use less space by only patching extreme values?
+    for (int i = 1; i < maxBitsRequired; ++i) {
+      final long size = estimateByteCountTermFreqs(totalTerms, i);
+      if (size < expectedSize) {
+        bitsRequired = i;
+        expectedSize = size;
+      }
+    }
+    vectorsStream.writeVInt(bitsRequired);
+    final int maxValue = (int) PackedInts.maxValue(bitsRequired);
+    int patchSize = 0;
+    int maxDelta = 0;
+    PackedInts.Writer writer = PackedInts.getWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, totalTerms, bitsRequired, 1);
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        for (TermData td : fd.terms) {
+          writer.add(Math.min(maxValue, td.freq - 1));
+          if (td.freq - 1 >= maxValue) {
+            ++patchSize;
+            maxDelta = Math.max(maxDelta, td.freq - 1 - maxValue);
+          }
+        }
+      }
+    }
+    assert writer.ord() == totalTerms - 1;
+    writer.finish();
+
+    // write patch
+    if (patchSize > 0) {
+      final int patchBitsRequired = PackedInts.bitsRequired(maxDelta);
+      if (patchBitsRequired > 0) {
+        vectorsStream.writeVInt(patchBitsRequired);
+        writer = PackedInts.getWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, patchSize, patchBitsRequired, 1);
+        for (DocData dd : pendingDocs) {
+          for (FieldData fd : dd.fields) {
+            for (TermData td : fd.terms) {
+              if (td.freq - 1 >= maxValue) {
+                writer.add(td.freq - 1 - maxValue);
+              }
+            }
+          }
+        }
+        writer.finish();
+      }
+    }
+  }
+
+  private void flushPositions() throws IOException {
+    int totalPositions = 0;
+    long minPositionDelta = Long.MAX_VALUE;
+    long maxPositionDelta = Long.MIN_VALUE;
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        if ((fd.flags & POSITIONS) != 0) {
+          for (TermData td : fd.terms) {
+            totalPositions += td.freq;
+            int previousPosition = 0;
+            for (PositionData pd : td.positions) {
+              final long positionDelta = pd.position - previousPosition;
+              minPositionDelta = Math.min(minPositionDelta, positionDelta);
+              maxPositionDelta = Math.max(maxPositionDelta, positionDelta);
+              previousPosition = pd.position;
+            }
+          }
+        }
+      }
+    }
+    if (totalPositions == 0) {
+      // nothing to do
+      return;
+    }
+    final int posBitsRequired = PackedInts.bitsRequired(maxPositionDelta - minPositionDelta);
+    vectorsStream.writeVLong((zigZagEncode(minPositionDelta)));
+    vectorsStream.writeVInt(posBitsRequired);
+
+    final PackedInts.Writer writer = PackedInts.getWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, totalPositions, posBitsRequired, 1);
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        if ((fd.flags & POSITIONS) != 0) {
+          for (TermData td : fd.terms) {
+            int previousPosition = 0;
+            for (PositionData pd : td.positions) {
+              writer.add(pd.position - previousPosition - minPositionDelta);
+              previousPosition = pd.position;
+            }
+          }
+        }
+      }
+    }
+    assert writer.ord() == totalPositions - 1;
+    writer.finish();
+  }
+
+  private void flushOffsets(int[] fieldNums) throws IOException {
+    int totalOffsets = 0;
+    long[] sumPos = new long[fieldNums.length];
+    long[] sumOffsets = new long[fieldNums.length];
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        final int fieldNumOff = Arrays.binarySearch(fieldNums, fd.fieldNum);
+        if ((fd.flags & OFFSETS) != 0) {
+          for (TermData td : fd.terms) {
+            totalOffsets += td.freq;
+            if ((fd.flags & POSITIONS) != 0) {
+              int previousPos = 0;
+              int previousOff = 0;
+              for (PositionData pd : td.positions) {
+                sumPos[fieldNumOff] += pd.position - previousPos;
+                sumOffsets[fieldNumOff] += pd.startOffset - previousOff;
+                previousPos = pd.position;
+                previousOff = pd.startOffset;
+              }
+            }
+          }
+        }
+      }
+    }
+    
+    if (totalOffsets == 0) {
+      // nothing to do
+      return;
+    }
+
+    final float[] charsPerTerm = new float[fieldNums.length];
+    for (int i = 0; i < fieldNums.length; ++i) {
+      charsPerTerm[i] = (sumPos[i] <= 0 || sumOffsets[i] <= 0) ? 0 : (float) ((double) sumOffsets[i] / sumPos[i]);
+    }
+
+    long minStartOffsetDelta = Long.MAX_VALUE;
+    long maxStartOffsetDelta = Long.MIN_VALUE;
+    long minLengthDelta = Long.MAX_VALUE;
+    long maxLengthDelta = Long.MIN_VALUE;
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        if ((fd.flags & OFFSETS) != 0) {
+          final int fieldNumOff = Arrays.binarySearch(fieldNums, fd.fieldNum);
+          final float cpt = charsPerTerm[fieldNumOff];
+          for (TermData td : fd.terms) {
+            int previousPosition = 0;
+            int previousStartOffset = 0;
+            for (PositionData pd : td.positions) {
+              final long startOffsetDelta = pd.startOffset - previousStartOffset - (int) (cpt * (pd.position - previousPosition));
+              final long lengthDelta = pd.endOffset - pd.startOffset - td.length;
+              minStartOffsetDelta = Math.min(minStartOffsetDelta, startOffsetDelta);
+              maxStartOffsetDelta = Math.max(maxStartOffsetDelta, startOffsetDelta);
+              minLengthDelta = Math.min(minLengthDelta, lengthDelta);
+              maxLengthDelta = Math.max(maxLengthDelta, lengthDelta);
+              previousPosition = pd.position;
+              previousStartOffset = pd.startOffset;
+            }
+          }
+        }
+      }
+    }
+
+    // start offsets
+    for (int i = 0; i < fieldNums.length; ++i) {
+      vectorsStream.writeInt(Float.floatToRawIntBits(charsPerTerm[i]));
+    }
+    final int startOffBitsRequired = PackedInts.bitsRequired(maxStartOffsetDelta - minStartOffsetDelta);
+    vectorsStream.writeVLong(zigZagEncode(minStartOffsetDelta));
+    vectorsStream.writeVInt(startOffBitsRequired);
+
+    PackedInts.Writer writer = PackedInts.getWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, totalOffsets, startOffBitsRequired, 1);
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        if ((fd.flags & OFFSETS) != 0) {
+          final int fieldNumOff = Arrays.binarySearch(fieldNums, fd.fieldNum);
+          final float cpt = charsPerTerm[fieldNumOff];
+          for (TermData td : fd.terms) {
+            int previousPosition = 0;
+            int previousStartOffset = 0;
+            for (PositionData pd : td.positions) {
+              writer.add(pd.startOffset - previousStartOffset - (int) (cpt * (pd.position - previousPosition)) - minStartOffsetDelta);
+              previousPosition = pd.position;
+              previousStartOffset = pd.startOffset;
+            }
+          }
+        }
+      }
+    }
+    assert writer.ord() == totalOffsets - 1;
+    writer.finish();
+    
+    // lengths
+    final int lenBitsRequired = maxLengthDelta == minLengthDelta ? 0 : PackedInts.bitsRequired(maxLengthDelta - minLengthDelta);
+    vectorsStream.writeVLong(zigZagEncode(minLengthDelta));
+    vectorsStream.writeVInt(lenBitsRequired);
+    if (lenBitsRequired > 0) {
+      writer = PackedInts.getWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, totalOffsets, lenBitsRequired, 1);
+      for (DocData dd : pendingDocs) {
+        for (FieldData fd : dd.fields) {
+          if ((fd.flags & OFFSETS) != 0) {
+            for (TermData td : fd.terms) {
+              for (PositionData pd : td.positions) {
+                writer.add(pd.endOffset - pd.startOffset - td.length - minLengthDelta);
+              }
+            }
+          }
+        }
+      }
+      assert writer.ord() == totalOffsets - 1;
+      writer.finish();
+    }
+  }
+
+  private void flushPayloadLengths() throws IOException {
+    int totalPayloads = 0;
+    int maxLength = 0;
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        if ((fd.flags & PAYLOADS) != 0) {
+          for (TermData td : fd.terms) {
+            totalPayloads += td.freq;
+            for (PositionData pd : td.positions) {
+              maxLength |= pd.payloadLength;
+            }
+          }
+        }
+      }
+    }
+    if (totalPayloads == 0) {
+      // nothing to do
+      return;
+    }
+    final int bitsRequired = PackedInts.bitsRequired(maxLength);
+    vectorsStream.writeVInt(bitsRequired);
+    final PackedInts.Writer writer = PackedInts.getWriterNoHeader(vectorsStream, PackedInts.Format.PACKED, totalPayloads, bitsRequired, 1);
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        if ((fd.flags & PAYLOADS) != 0) {
+          for (TermData td : fd.terms) {
+            for (PositionData pd : td.positions) {
+              writer.add(pd.payloadLength);
+            }
+          }
+        }
+      }
+    }
+    assert writer.ord() == totalPayloads - 1;
+    writer.finish();
+  }
+
+  private int expectedBufferLength() {
+    int expectedLength = 0;
+    for (DocData dd : pendingDocs) {
+      for (FieldData fd : dd.fields) {
+        for (TermData td : fd.terms) {
+          expectedLength += td.length;
+          if ((fd.flags & PAYLOADS) != 0) {
+            for (PositionData pd : td.positions) {
+              expectedLength += pd.payloadLength;
+            }
+          }
+        }
+      }
+    }
+    return expectedLength;
+  }
+
+  @Override
+  public void finish(FieldInfos fis, int numDocs) throws IOException {
+    assert curField == null || curDoc.numFields == curDoc.fields.size();
+    assert curTerm == null || curField.flags == 0 || curTerm.positions.size() == curTerm.freq;
+
+    if (!pendingDocs.isEmpty()) {
+      assert curDoc.numFields == curDoc.fields.size();
+      flush();
+    }
+    if (numDocs != this.numDocs) {
+      throw new RuntimeException("Wrote " + this.numDocs + " docs, finish called with numDocs=" + numDocs);
+    }
+    indexWriter.finish(numDocs);
+  }
+
+  @Override
+  public Comparator<BytesRef> getComparator() throws IOException {
+    return BytesRef.getUTF8SortedAsUnicodeComparator();
+  }
+  
+}

Modification de propriétés sur lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsWriter.java
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsFormat.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsFormat.java	(révision 0)
+++ lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsFormat.java	(copie de travail)
@@ -0,0 +1,97 @@
+package org.apache.lucene.codecs.compressing;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.codecs.StoredFieldsFormat;
+import org.apache.lucene.codecs.TermVectorsFormat;
+import org.apache.lucene.codecs.TermVectorsReader;
+import org.apache.lucene.codecs.TermVectorsWriter;
+import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.SegmentInfo;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+
+/**
+ * A {@link TermVectorsFormat} that compresses chunks of documents together in
+ * order to improve the compression ratio.
+ */
+public class CompressingTermVectorsFormat extends TermVectorsFormat {
+
+  private final String formatName;
+  private final CompressionMode compressionMode;
+  private final int chunkSize;
+
+  /**
+   * Create a new {@link CompressingTermVectorsFormat}.
+   * <p>
+   * <code>formatName</code> is the name of the format. This name will be used
+   * in the file formats to perform
+   * {@link CodecUtil#checkHeader(org.apache.lucene.store.DataInput, String, int, int) codec header checks}.
+   * <p>
+   * The <code>compressionMode</code> parameter allows you to choose between
+   * compression algorithms that have various compression and decompression
+   * speeds so that you can pick the one that best fits your indexing and
+   * searching throughput. You should never instantiate two
+   * {@link CompressingTermVectorsFormat}s that have the same name but
+   * different {@link CompressionMode}s.
+   * <p>
+   * <code>chunkSize</code> is the minimum byte size of a chunk of documents.
+   * Higher values of <code>chunkSize</code> should improve the compression
+   * ratio but will require more memory at indexing time and might make document
+   * loading a little slower (depending on the size of your OS cache compared
+   * to the size of your index).
+   *
+   * @param formatName the name of the {@link StoredFieldsFormat}
+   * @param compressionMode the {@link CompressionMode} to use
+   * @param chunkSize the minimum number of bytes of a single chunk of stored documents
+   * @see CompressionMode
+   */
+  public CompressingTermVectorsFormat(String formatName, CompressionMode compressionMode, int chunkSize) {
+    this.formatName = formatName;
+    this.compressionMode = compressionMode;
+    if (chunkSize < 1) {
+      throw new IllegalArgumentException("chunkSize must be >= 1");
+    }
+    this.chunkSize = chunkSize;
+  }
+
+  @Override
+  public TermVectorsReader vectorsReader(Directory directory,
+      SegmentInfo segmentInfo, FieldInfos fieldInfos, IOContext context)
+      throws IOException {
+    return new CompressingTermVectorsReader(directory, segmentInfo,
+        fieldInfos, context, formatName, compressionMode);
+  }
+
+  @Override
+  public TermVectorsWriter vectorsWriter(Directory directory,
+      SegmentInfo segmentInfo, IOContext context) throws IOException {
+    return new CompressingTermVectorsWriter(directory, segmentInfo, context,
+        formatName, compressionMode, chunkSize);
+  }
+
+  @Override
+  public String toString() {
+    return getClass().getSimpleName() + "(compressionMode=" + compressionMode
+        + ", chunkSize=" + chunkSize + ")";
+  }
+
+}

Modification de propriétés sur lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsFormat.java
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java	(révision 0)
+++ lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java	(copie de travail)
@@ -0,0 +1,1043 @@
+package org.apache.lucene.codecs.compressing;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.CODEC_SFX_DAT;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.CODEC_SFX_IDX;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.FLAGS_BITS;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.OFFSETS;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.PAYLOADS;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.POSITIONS;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VECTORS_EXTENSION;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VECTORS_INDEX_EXTENSION;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VERSION_CURRENT;
+import static org.apache.lucene.codecs.compressing.CompressingTermVectorsWriter.VERSION_START;
+
+import java.io.IOException;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.codecs.TermVectorsReader;
+import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.index.DocsEnum;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.SegmentInfo;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.store.AlreadyClosedException;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LongsRef;
+import org.apache.lucene.util.packed.PackedInts;
+
+
+final class CompressingTermVectorsReader extends TermVectorsReader {
+
+  static long zigZagDecode(long n) {
+    return ((n >>> 1) ^ -(n & 1));
+  }
+  
+  private final FieldInfos fieldInfos;
+  private final CompressingStoredFieldsIndexReader indexReader;
+  private final IndexInput vectorsStream;
+  private final int packedIntsVersion;
+  private final CompressionMode compressionMode;
+  private final Decompressor decompressor;
+  private final BytesRef bytes;
+  private final int numDocs;
+  private boolean closed;
+
+  // used by clone
+  private CompressingTermVectorsReader(CompressingTermVectorsReader reader) {
+    this.fieldInfos = reader.fieldInfos;
+    this.vectorsStream = reader.vectorsStream.clone();
+    this.indexReader = reader.indexReader.clone();
+    this.packedIntsVersion = reader.packedIntsVersion;
+    this.compressionMode = reader.compressionMode;
+    this.decompressor = reader.decompressor.clone();
+    this.numDocs = reader.numDocs;
+    this.bytes = new BytesRef(reader.bytes.bytes.length);
+    this.closed = false;
+  }
+
+  public CompressingTermVectorsReader( Directory d, SegmentInfo si, FieldInfos fn,
+      IOContext context, String formatName, CompressionMode compressionMode) throws IOException {
+    this.compressionMode = compressionMode;
+    final String segment = si.name;
+    boolean success = false;
+    fieldInfos = fn;
+    numDocs = si.getDocCount();
+    IndexInput indexStream = null;
+    try {
+      vectorsStream = d.openInput(IndexFileNames.segmentFileName(segment, "", VECTORS_EXTENSION), context);
+      final String indexStreamFN = IndexFileNames.segmentFileName(segment, "", VECTORS_INDEX_EXTENSION);
+      indexStream = d.openInput(indexStreamFN, context);
+
+      final String codecNameIdx = formatName + CODEC_SFX_IDX;
+      final String codecNameDat = formatName + CODEC_SFX_DAT;
+      CodecUtil.checkHeader(indexStream, codecNameIdx, VERSION_START, VERSION_CURRENT);
+      CodecUtil.checkHeader(vectorsStream, codecNameDat, VERSION_START, VERSION_CURRENT);
+      assert CodecUtil.headerLength(codecNameDat) == vectorsStream.getFilePointer();
+      assert CodecUtil.headerLength(codecNameIdx) == indexStream.getFilePointer();
+
+      indexReader = new CompressingStoredFieldsIndexReader(indexStream, si);
+      indexStream = null;
+
+      packedIntsVersion = vectorsStream.readVInt();
+      decompressor = compressionMode.newDecompressor();
+      this.bytes = new BytesRef();
+
+      success = true;
+    } finally {
+      if (!success) {
+        IOUtils.closeWhileHandlingException(this, indexStream);
+      }
+    }
+  }
+
+  /**
+   * @throws AlreadyClosedException if this FieldsReader is closed
+   */
+  private void ensureOpen() throws AlreadyClosedException {
+    if (closed) {
+      throw new AlreadyClosedException("this FieldsReader is closed");
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (!closed) {
+      IOUtils.close(vectorsStream, indexReader);
+      closed = true;
+    }
+  }
+
+  @Override
+  public TermVectorsReader clone() {
+    return new CompressingTermVectorsReader(this);
+  }
+
+  @Override
+  public Fields get(int doc) throws IOException {
+    ensureOpen();
+
+    // seek to the right place
+    {
+      final long startPointer = indexReader.getStartPointer(doc);
+      vectorsStream.seek(startPointer);
+    }
+
+    // decode metadata
+    final int docBase = vectorsStream.readVInt();
+    final int chunkDocs = vectorsStream.readVInt();
+    if (doc < docBase || doc >= docBase + chunkDocs || docBase + chunkDocs > numDocs) {
+      throw new CorruptIndexException("docBase=" + docBase + ",chunkDocs=" + chunkDocs + ",doc=" + doc);
+    }
+
+    // how many fields should we skip and how many fields does the doc have?
+    final int skip, numFields, totalFields;
+    if (chunkDocs == 1) {
+      skip = 0;
+      numFields = totalFields = vectorsStream.readVInt();
+    } else {
+      final int bitsPerValue = vectorsStream.readVInt();
+      if (bitsPerValue > 32) {
+        throw new CorruptIndexException(bitsPerValue + " > 32");
+      }
+      if (bitsPerValue == 0) {
+        numFields = vectorsStream.readVInt();
+        skip = (doc - docBase) * numFields;
+        totalFields = chunkDocs * numFields;
+      } else {
+        int sum = 0;
+        final PackedInts.ReaderIterator it = PackedInts.getReaderIteratorNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, chunkDocs, bitsPerValue, 1);
+        for (int i = docBase; i < doc; ++i) {
+          sum += it.next();
+        }
+        skip = sum;
+        numFields = (int) it.next();
+        sum += numFields;
+        for (int i = doc + 1; i < docBase + chunkDocs; ++i) {
+          sum += it.next();
+        }
+        totalFields = sum;
+      }
+    }
+
+    if (numFields == 0) {
+      return null;
+    }
+
+    // read field numbers that have term vectors
+    final int[] fieldNums;
+    {
+      final int token = vectorsStream.readByte() & 0xFF;
+      assert token != 0; // means no term vectors, cannot happen since we checked for numFields == 0
+      final int bitsPerFieldNum = token & 0x1F;
+      int totalDistinctFields = token >>> 5;
+      if (totalDistinctFields == 0x07) {
+        totalDistinctFields += vectorsStream.readVInt();
+      }
+      final PackedInts.ReaderIterator it = PackedInts.getReaderIteratorNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalDistinctFields, bitsPerFieldNum, 1);
+      fieldNums = new int[totalDistinctFields];
+      for (int i = 0; i < totalDistinctFields; ++i) {
+        fieldNums[i] = (int) it.next();
+      }
+    }
+
+    // read field numbers and flags
+    final int[] fieldNumOffs = new int[numFields];
+    final PackedInts.Reader flags;
+    {
+      final int bitsPerOff = PackedInts.bitsRequired(fieldNums.length - 1);
+      final PackedInts.Reader allFieldNumOffs = PackedInts.getReaderNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalFields, bitsPerOff);
+      switch (vectorsStream.readVInt()) {
+        case 0:
+          final PackedInts.Reader fieldFlags = PackedInts.getReaderNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, fieldNums.length, FLAGS_BITS);
+          PackedInts.Mutable f = PackedInts.getMutable(totalFields, FLAGS_BITS, PackedInts.COMPACT);
+          for (int i = 0; i < totalFields; ++i) {
+            final int fieldNumOff = (int) allFieldNumOffs.get(i);
+            assert fieldNumOff >= 0 && fieldNumOff < fieldNums.length;
+            final int fgs = (int) fieldFlags.get(fieldNumOff);
+            f.set(i, fgs);
+          }
+          flags = f;
+          break;
+        case 1:
+          flags = PackedInts.getReaderNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalFields, FLAGS_BITS);
+          break;
+        default:
+          throw new AssertionError();
+      }
+      for (int i = 0; i < numFields; ++i) {
+        fieldNumOffs[i] = (int) allFieldNumOffs.get(skip + i);
+      }
+    }
+    
+    // number of terms per field for all fields
+    final PackedInts.Reader numTerms;
+    final int totalTerms;
+    {
+      final int bitsRequired = vectorsStream.readVInt();
+      numTerms = PackedInts.getReaderNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalFields, bitsRequired);
+      int sum = 0;
+      for (int i = 0; i < totalFields; ++i) {
+        sum += numTerms.get(i);
+      }
+      totalTerms = sum;
+    }
+
+    // term lengths
+    int docOff = 0, docLen = 0, totalLen;
+    final int[] fieldLengths = new int[numFields];
+    final int[][] termLengths = new int[numFields][];
+    {
+      final int bitsRequired = vectorsStream.readVInt();
+      final PackedInts.ReaderIterator it = PackedInts.getReaderIteratorNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalTerms, bitsRequired, 1);
+      // skip
+      for (int i = 0; i < skip; ++i) {
+        for (int j = 0; j < numTerms.get(i); ++j) {
+          docOff += it.next();
+        }
+      }
+      for (int i = 0; i < numFields; ++i) {
+        final int termCount = (int) numTerms.get(skip + i);
+        termLengths[i] = new int[termCount];
+        for (int j = 0; j < termCount; ++j) {
+          termLengths[i][j] = (int) it.next();
+          fieldLengths[i] += termLengths[i][j];
+          docLen += termLengths[i][j];
+        }
+      }
+      totalLen = docOff + docLen;
+      for (int i = skip + numFields; i < totalFields; ++i) {
+        for (int j = 0; j < numTerms.get(i); ++j) {
+          totalLen += it.next();
+        }
+      }
+    }
+
+    // term freqs
+    final int[] termFreqs = new int[totalTerms];
+    {
+      final int bitsRequired = vectorsStream.readVInt();
+      final int maxValue = 1 + (int) PackedInts.maxValue(bitsRequired);
+      int patchSize = 0;
+      PackedInts.ReaderIterator it = PackedInts.getReaderIteratorNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalTerms, bitsRequired, 1);
+      for (int i = 0; i < totalTerms; ++i) {
+        termFreqs[i] = 1 + (int) it.next();
+        if (termFreqs[i] == maxValue) {
+          ++patchSize;
+        }
+      }
+      assert it.ord() == totalTerms - 1;
+      if (patchSize > 0) {
+        final int patchBitsRequired = vectorsStream.readVInt();
+        it = PackedInts.getReaderIteratorNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, patchSize, patchBitsRequired, 1);
+        for (int i = 0; i < totalTerms; ++i) {
+          if (termFreqs[i] == maxValue) {
+            termFreqs[i] += it.next();
+          }
+        }
+        assert it.ord() == patchSize - 1;
+      }
+    }
+
+    // total number of positions, offsets and payloads
+    int totalPositions = 0, totalOffsets = 0, totalPayloads = 0;
+    for (int i = 0, termIndex = 0; i < totalFields; ++i) {
+      final int f = (int) flags.get(i);
+      final int termCount = (int) numTerms.get(i);
+      for (int j = 0; j < termCount; ++j) {
+        final int freq = termFreqs[termIndex++];
+        if ((f & POSITIONS) != 0) {
+          totalPositions += freq;
+        }
+        if ((f & OFFSETS) != 0) {
+          totalOffsets += freq;
+        }
+        if ((f & PAYLOADS) != 0) {
+          totalPayloads += freq;
+        }
+      }
+      assert i != totalFields - 1 || termIndex == totalTerms : termIndex + " " + totalTerms;
+    }
+    
+
+    final int[][][] positions, startOffsets, lengths;
+    if (totalPositions > 0) {
+      positions = readPositions(skip, numFields, totalFields, totalTerms, flags, numTerms, termFreqs, POSITIONS, totalPositions);
+    } else {
+      positions = new int[numFields][][];
+    }
+
+    if (totalOffsets > 0) {
+      final float[] charsPerTerm = new float[fieldNums.length];
+      for (int i = 0; i < charsPerTerm.length; ++i) {
+        charsPerTerm[i] = Float.intBitsToFloat(vectorsStream.readInt());
+      }
+      startOffsets = readPositions(skip, numFields, totalFields, totalTerms, flags, numTerms, termFreqs, OFFSETS, totalOffsets);
+      lengths = readPositions(skip, numFields, totalFields, totalTerms, flags, numTerms, termFreqs, OFFSETS, totalOffsets);
+
+      for (int i = 0; i < numFields; ++i) {
+        // patch offsets from positions
+        if (startOffsets[i] != null && positions[i] != null) {
+          for (int j = 0; j < startOffsets[i].length; ++j) {
+            for (int k = 0; k < startOffsets[i][j].length; ++k) {
+              startOffsets[i][j][k] += (int) (charsPerTerm[fieldNumOffs[i]] * positions[i][j][k]);
+            }
+          }
+        }
+        if (startOffsets[i] != null) {
+          for (int j = 0; j < startOffsets[i].length; ++j) {
+            // delta-decode start offsets
+            for (int k = 1; k < startOffsets[i][j].length; ++k) {
+              startOffsets[i][j][k] += startOffsets[i][j][k - 1];
+            }
+            // patch lengths using term lengths
+            for (int k = 0; k < lengths[i][j].length; ++k) {
+              lengths[i][j][k] += termLengths[i][j];
+            }
+          }
+        }
+      }
+    } else {
+      startOffsets = lengths = new int[numFields][][];
+    }
+    if (totalPositions > 0) {
+      // delta-decode positions
+      for (int i = 0; i < numFields; ++i) {
+        if (positions[i] != null) {
+          for (int j = 0; j < positions[i].length; ++j) {
+            for (int k = 1; k < positions[i][j].length; ++k) {
+              positions[i][j][k] += positions[i][j][k - 1];
+            }
+          }
+        }
+      }
+    }
+
+    // payload lengths
+    final int[][][] payloadLengths = new int[numFields][][];
+    if (totalPayloads > 0) {
+      final int bitsRequired = vectorsStream.readVInt();
+      assert bitsRequired > 0 && bitsRequired < 32;
+      final PackedInts.ReaderIterator it = PackedInts.getReaderIteratorNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalPayloads, bitsRequired, 1);
+
+      // skip
+      int termIndex = 0;
+      for (int i = 0; i < skip; ++i) {
+        final int f = (int) flags.get(i);
+        final int termCount = (int) numTerms.get(i);
+        if ((f & PAYLOADS) != 0) {
+          for (int j = 0; j < termCount; ++j) {
+            final int freq = termFreqs[termIndex + j];
+            for (int k = 0; k < freq; ++k) {
+              final int l = (int) it.next();
+              docOff += l;
+              totalLen += l;
+            }
+          }
+        }
+        termIndex += termCount;
+      }
+      // read doc payload lengths
+      for (int i = 0; i < numFields; ++i) {
+        final int f = (int) flags.get(skip + i);
+        final int termCount = (int) numTerms.get(skip + i);
+        if ((f & PAYLOADS) != 0) {
+          payloadLengths[i] = new int[termCount][];
+          for (int j = 0; j < termCount; ++j) {
+            final int freq = termFreqs[termIndex + j];
+            payloadLengths[i][j] = new int[freq];
+            for (int k = 0; k < freq; ++k) {
+              payloadLengths[i][j][k] = (int) it.next();
+              fieldLengths[i] += payloadLengths[i][j][k];
+              docLen += payloadLengths[i][j][k];
+              totalLen += payloadLengths[i][j][k];
+            }
+          }
+        }
+        termIndex += termCount;
+      }
+      for (int i = skip + numFields; i < totalFields; ++i) {
+        final int f = (int) flags.get(i);
+        final int termCount = (int) numTerms.get(i);
+        if ((f & PAYLOADS) != 0) {
+          for (int j = 0; j < termCount; ++j) {
+            final int freq = termFreqs[termIndex + j];
+            for (int k = 0; k < freq; ++k) {
+              totalLen += it.next();
+            }
+          }
+        }
+        termIndex += termCount;
+      }
+      assert termIndex == totalTerms;
+    }
+
+    // decompress data
+    decompressor.decompress(vectorsStream, totalLen, docOff, docLen, bytes);
+    assert bytes.length == docLen;
+
+    final int[] fieldFlags = new int[numFields];
+    for (int i = 0; i < numFields; ++i) {
+      fieldFlags[i] = (int) flags.get(skip + i);
+    }
+
+    final int[] fieldNumTerms = new int[numFields];
+    for (int i = 0; i < numFields; ++i) {
+      fieldNumTerms[i] = (int) numTerms.get(skip + i);
+    }
+    
+    final int[][] fieldTermFreqs = new int[numFields][];
+    {
+      int termIdx = 0;
+      for (int i = 0; i < skip; ++i) {
+        termIdx += numTerms.get(i);
+      }
+      for (int i = 0; i < numFields; ++i) {
+        final int termCount = (int) numTerms.get(skip + i);
+        fieldTermFreqs[i] = new int[termCount];
+        for (int j = 0; j < termCount; ++j) {
+          fieldTermFreqs[i][j] = termFreqs[termIdx++];
+        }
+      }
+    }
+
+    // copy bytes so that two TVFields instances can be used at the same time
+    final BytesRef copy = new BytesRef(bytes.length);
+    copy.copyBytes(bytes);
+
+    assert sum(fieldLengths) == docLen : sum(fieldLengths) + " != " + docLen;
+
+    return new TVFields(fieldNums, fieldFlags, fieldNumOffs, fieldNumTerms, fieldLengths,
+        termLengths, fieldTermFreqs,
+        positions, startOffsets, lengths, payloadLengths,
+        copy);
+  }
+
+  private int[][][] readPositions(int skip, int numFields, int totalFields, int totalTerms, PackedInts.Reader flags, PackedInts.Reader numTerms, int[] termFreqs, int flag, final int totalPositions) throws IOException {
+    final int[][][] positions = new int[numFields][][];
+    final int min = (int) zigZagDecode(vectorsStream.readVLong());
+    final int bitsRequired = vectorsStream.readVInt();
+    assert bitsRequired >= 0 && bitsRequired <= 32;
+    final PackedInts.ReaderIterator it;
+    if (bitsRequired == 0) {
+      // means all zeros
+      it = new PackedInts.ReaderIterator() {
+        @Override
+        public int size() {
+          return totalPositions;
+        }
+        @Override
+        public int ord() {
+          throw new UnsupportedOperationException();
+        }
+        @Override
+        public LongsRef next(int count) throws IOException {
+          throw new UnsupportedOperationException();
+        }
+        @Override
+        public long next() throws IOException {
+          return 0L;
+        }
+        @Override
+        public int getBitsPerValue() {
+          return 0;
+        }
+      };
+    } else {
+      it = PackedInts.getReaderIteratorNoHeader(vectorsStream, PackedInts.Format.PACKED, packedIntsVersion, totalPositions, bitsRequired, 1);
+    }
+
+    // skip
+    int termIndex = 0;
+    for (int i = 0; i < skip; ++i) {
+      final int f = (int) flags.get(i);
+      final int termCount = (int) numTerms.get(i);
+      if ((f & flag) != 0) {
+        for (int j = 0; j < termCount; ++j) {
+          final int freq = termFreqs[termIndex+j];
+          for (int k = 0; k < freq; ++k) {
+            it.next();
+          }
+        }
+      }
+      termIndex += termCount;
+    }
+    // read doc positions
+    for (int i = 0; i < numFields; ++i) {
+      final int f = (int) flags.get(skip + i);
+      final int termCount = (int) numTerms.get(skip + i);
+      if ((f & flag) != 0) {
+        positions[i] = new int[termCount][];
+        for (int j = 0; j < termCount; ++j) {
+          final int freq = termFreqs[termIndex+j];
+          positions[i][j] = new int[freq];
+          for (int k = 0; k < freq; ++k) {
+            positions[i][j][k] = min + (int) it.next();
+          }
+        }
+      }
+      termIndex += termCount;
+    }
+    for (int i = skip + numFields; i < totalFields; ++i) {
+      final int f = (int) flags.get(i);
+      final int termCount = (int) numTerms.get(i);
+      if ((f & flag) != 0) {
+        for (int j = 0; j < termCount; ++j) {
+          final int freq = termFreqs[termIndex+j];
+          for (int k = 0; k < freq; ++k) {
+            it.next();
+          }
+        }
+      }
+      termIndex += termCount;
+    }
+    assert termIndex == totalTerms;
+    return positions;
+  }
+
+  private class TVFields extends Fields {
+
+    // TODO encode diff length, term length
+    private final int[] fieldNums, fieldFlags, fieldNumOffs, numTerms, fieldLengths;
+    private final int[][] termLengths, termFreqs;
+    private final int[][][] positions, startOffsets, lengths, payloadLengths;
+    private final BytesRef termsAndPayloads;
+
+    public TVFields(int[] fieldNums, int[] fieldFlags, int[] fieldNumOffs, int[] numTerms, int[] fieldLengths,
+        int[][] termLengths, int[][] termFreqs,
+        int[][][] positions, int[][][] startOffsets, int[][][] lengths, int[][][] payloadLengths,
+        BytesRef termsAndPayloads) {
+      this.fieldNums = fieldNums;
+      this.fieldFlags = fieldFlags;
+      this.fieldNumOffs = fieldNumOffs;
+      this.numTerms = numTerms;
+      this.fieldLengths = fieldLengths;
+      this.termLengths = termLengths;
+      this.termFreqs = termFreqs;
+      this.positions = positions;
+      this.startOffsets = startOffsets;
+      this.lengths = lengths;
+      this.payloadLengths = payloadLengths;
+      this.termsAndPayloads = termsAndPayloads;
+    }
+
+    @Override
+    public Iterator<String> iterator() {
+      return new Iterator<String>() {
+        int i = 0;
+        @Override
+        public boolean hasNext() {
+          return i < fieldNumOffs.length;
+        }
+        @Override
+        public String next() {
+          if (!hasNext()) {
+            throw new NoSuchElementException();
+          }
+          final int fieldNum = fieldNums[fieldNumOffs[i++]];
+          return fieldInfos.fieldInfo(fieldNum).name;
+        }
+        @Override
+        public void remove() {
+          throw new UnsupportedOperationException();
+        }
+      };
+    }
+
+    @Override
+    public Terms terms(String field) throws IOException {
+      final FieldInfo fieldInfo = fieldInfos.fieldInfo(field);
+      if (fieldInfo == null) {
+        return null;
+      }
+      int idx = -1;
+      for (int i = 0; i < fieldNumOffs.length; ++i) {
+        if (fieldNums[fieldNumOffs[i]] == fieldInfo.number) {
+          idx = i;
+          break;
+        }
+      }
+
+      if (idx == -1 || numTerms[idx] == 0) {
+        // no term
+        return null;
+      }
+      int fieldOff = 0, fieldLen = -1;
+      for (int i = 0; i < fieldNumOffs.length; ++i) {
+        if (i < idx) {
+          fieldOff += fieldLengths[i];
+        } else {
+          fieldLen = fieldLengths[i];
+          break;
+        }
+      }
+      assert fieldLen >= 0;
+      return new TVTerms(numTerms[idx], fieldFlags[idx], termLengths[idx], termFreqs[idx],
+          positions[idx], startOffsets[idx], lengths[idx], payloadLengths[idx],
+          new BytesRef(termsAndPayloads.bytes, fieldOff, fieldLen));
+    }
+
+    @Override
+    public int size() {
+      return fieldNumOffs.length;
+    }
+
+  }
+
+  private class TVTerms extends Terms {
+
+    private final int numTerms, flags;
+    private final int[] termLengths, termFreqs;
+    private final int[][] positions, startOffsets, lengths, payloadLengths;
+    private final BytesRef termsAndPayloads;
+
+    TVTerms(int numTerms, int flags, int[] termLengths, int[] termFreqs,
+        int[][] positions, int[][] startOffsets, int[][] lengths, int[][] payloadLengths,
+        BytesRef termsAndPayloads) {
+      this.numTerms = numTerms;
+      this.flags = flags;
+      this.termLengths = termLengths;
+      this.termFreqs = termFreqs;
+      this.positions = positions;
+      this.startOffsets = startOffsets;
+      this.lengths = lengths;
+      this.payloadLengths = payloadLengths;
+      this.termsAndPayloads = termsAndPayloads;
+    }
+
+    @Override
+    public TermsEnum iterator(TermsEnum reuse) throws IOException {
+      final TVTermsEnum termsEnum;
+      if (reuse != null && reuse instanceof TVTermsEnum) {
+        termsEnum = (TVTermsEnum) reuse;
+      } else {
+        termsEnum = new TVTermsEnum();
+      }
+      termsEnum.reset(numTerms, flags, termLengths, termFreqs, positions, startOffsets, lengths, payloadLengths, new ByteArrayDataInput(termsAndPayloads.bytes, termsAndPayloads.offset, termsAndPayloads.length));
+      return termsEnum;
+    }
+
+    @Override
+    public Comparator<BytesRef> getComparator() throws IOException {
+      return BytesRef.getUTF8SortedAsUnicodeComparator();
+    }
+
+    @Override
+    public long size() throws IOException {
+      return numTerms;
+    }
+
+    @Override
+    public long getSumTotalTermFreq() throws IOException {
+      return -1L;
+    }
+
+    @Override
+    public long getSumDocFreq() throws IOException {
+      return numTerms;
+    }
+
+    @Override
+    public int getDocCount() throws IOException {
+      return 1;
+    }
+
+    @Override
+    public boolean hasOffsets() {
+      return (flags & OFFSETS) != 0;
+    }
+
+    @Override
+    public boolean hasPositions() {
+      return (flags & POSITIONS) != 0;
+    }
+
+    @Override
+    public boolean hasPayloads() {
+      return (flags & PAYLOADS) != 0;
+    }
+
+  }
+
+  private static class TVTermsEnum extends TermsEnum {
+
+    private int numTerms, flags, startPos, ord;
+    private int[] termLengths, termFreqs;
+    private int[][] positions, startOffsets, lengths, payloadLengths;
+    private ByteArrayDataInput in;
+    private final BytesRef term;
+    private byte[] payloadBytes;
+
+    private TVTermsEnum() {
+      term = new BytesRef(16);
+      payloadBytes = new byte[0];
+    }
+
+    private boolean assertLengths() {
+      for (int i = 0; i < numTerms; ++i) {
+        assert positions == null || positions[i].length == termFreqs[i] : positions[i].length + " != " + termFreqs[i] + " at " + i;
+        assert startOffsets == null || startOffsets[i].length == termFreqs[i];
+        assert lengths == null || lengths[i].length == termFreqs[i];
+        assert payloadLengths == null || payloadLengths[i].length == termFreqs[i];
+      }
+      return true;
+    }
+
+    void reset(int numTerms, int flags, int[] termLengths, int[] termFreqs, int[][] positions, int[][] startOffsets, int[][] lengths, int[][] payloadLengths, ByteArrayDataInput in) {
+      this.numTerms = numTerms;
+      this.flags = flags;
+      this.termLengths = termLengths;
+      this.termFreqs = termFreqs;
+      this.positions = positions;
+      this.startOffsets = startOffsets;
+      this.lengths = lengths;
+      this.payloadLengths = payloadLengths;
+      this.in = in;
+      startPos = in.getPosition();
+      reset();
+      assert assertLengths();
+    }
+
+    void reset() {
+      term.length = 0;
+      in.setPosition(startPos);
+      ord = -1;
+    }
+
+    @Override
+    public BytesRef next() throws IOException {
+      if (ord == numTerms - 1) {
+        return null;
+      } else {
+        assert ord < numTerms;
+        ++ord;
+      }
+
+      // read term
+      term.offset = 0;
+      term.length = termLengths[ord];
+      if (term.length > term.bytes.length) {
+        term.bytes = new byte[ArrayUtil.oversize(term.length, 1)];
+      }
+      in.readBytes(term.bytes, 0, term.length);
+
+      // read payloads (if any)
+      if ((flags & PAYLOADS) != 0) {
+        int totalPayloadsLength = 0;
+        for (int i = 0; i < payloadLengths[ord].length; ++i) {
+          totalPayloadsLength += payloadLengths[ord][i];
+        }
+        if (totalPayloadsLength > payloadBytes.length) {
+          payloadBytes = new byte[ArrayUtil.oversize(totalPayloadsLength, 1)];
+        }
+        in.readBytes(payloadBytes, 0, totalPayloadsLength);
+      }
+
+      return term;
+    }
+
+    @Override
+    public Comparator<BytesRef> getComparator() {
+      return BytesRef.getUTF8SortedAsUnicodeComparator();
+    }
+
+    @Override
+    public SeekStatus seekCeil(BytesRef text, boolean useCache)
+        throws IOException {
+      if (ord < numTerms && ord >= 0) {
+        final int cmp = term().compareTo(text);
+        if (cmp == 0) {
+          return SeekStatus.FOUND;
+        } else if (cmp > 0) {
+          reset();
+        }
+      }
+      // linear scan
+      do {
+        next();
+      } while (ord < numTerms - 1 && term().compareTo(text) < 0);
+      return term().equals(text) ? SeekStatus.FOUND : SeekStatus.END;
+    }
+
+    @Override
+    public void seekExact(long ord) throws IOException {
+      if (ord < -1 || ord >= numTerms) {
+        throw new IOException("ord is out of range: ord=" + ord + ", numTerms=" + numTerms);
+      }
+      if (ord < this.ord) {
+        reset();
+      }
+      for (int i = this.ord; i < ord; ++i) {
+        next();
+      }
+      assert ord == this.ord();
+    }
+
+    @Override
+    public BytesRef term() throws IOException {
+      return term;
+    }
+
+    @Override
+    public long ord() throws IOException {
+      return ord;
+    }
+
+    @Override
+    public int docFreq() throws IOException {
+      return 1;
+    }
+
+    @Override
+    public long totalTermFreq() throws IOException {
+      return termFreqs[ord];
+    }
+
+    @Override
+    public DocsEnum docs(Bits liveDocs, DocsEnum reuse, int flags)
+        throws IOException {
+      return docsAndPositions(liveDocs, reuse, flags);
+    }
+
+    @Override
+    public DocsAndPositionsEnum docsAndPositions(Bits liveDocs,
+        DocsAndPositionsEnum reuse, int flags) throws IOException {
+      if ((flags & POSITIONS) == 0 && (flags & OFFSETS) == 0) {
+        return null;
+      }
+      return docsAndPositions(liveDocs, (DocsEnum) reuse, flags);
+    }
+
+    private DocsAndPositionsEnum docsAndPositions(Bits liveDocs,
+        DocsEnum reuse, int ignoredFlags) throws IOException {
+      final TVDocsEnum docsEnum;
+      if (reuse != null && reuse instanceof TVDocsEnum) {
+        docsEnum = (TVDocsEnum) reuse;
+      } else {
+        docsEnum = new TVDocsEnum();
+      }
+      final int[]      positions = ((flags & POSITIONS) == 0) ? null : this.positions[ord];
+      final int[]   startOffsets =   ((flags & OFFSETS) == 0) ? null : this.startOffsets[ord];
+      final int[]        lengths =   ((flags & OFFSETS) == 0) ? null : this.lengths[ord];
+      final int[] payloadLengths =  ((flags & PAYLOADS) == 0) ? null : this.payloadLengths[ord];
+      
+      assert positions == null || positions.length == termFreqs[ord] : positions.length + " != " + termFreqs[ord];
+      assert startOffsets == null || startOffsets.length == termFreqs[ord];
+      
+      docsEnum.reset(liveDocs, termFreqs[ord], positions, startOffsets, lengths, payloadBytes, payloadLengths);
+      return docsEnum;
+    }
+
+  }
+
+  private static class TVDocsEnum extends DocsAndPositionsEnum {
+
+    private Bits liveDocs;
+    private int doc = -1;
+    private int termFreq;
+    private int[] positions;
+    private int[] startOffsets;
+    private int[] lengths;
+    private final BytesRef payload;
+    private int[] payloadLengths;
+    private int i;
+
+    TVDocsEnum() {
+      payload = new BytesRef();
+    }
+
+    public void reset(Bits liveDocs, int freq, int[] positions,
+        int[] startOffsets, int[] lengths, byte[] payloadBytes,
+        int[] payloadLengths) {
+      this.liveDocs = liveDocs;
+      this.termFreq = freq;
+      this.positions = positions;
+      this.startOffsets = startOffsets;
+      this.lengths = lengths;
+      payload.bytes = payloadBytes;
+      payload.offset = payload.length = 0;
+      this.payloadLengths = payloadLengths;
+
+      doc = i = -1;
+    }
+
+    private void checkDoc() {
+      if (doc == NO_MORE_DOCS) {
+        throw new IllegalStateException("DocsEnum exhausted");
+      } else if (doc == -1) {
+        throw new IllegalStateException("DocsEnum not started");
+      }
+    }
+
+    private void checkPosition() {
+      checkDoc();
+      if (i < 0) {
+        throw new IllegalStateException("Position enum not started");
+      } else if (i >= termFreq) {
+        throw new IllegalStateException("Read past last position");
+      }
+    }
+
+    @Override
+    public int nextPosition() throws IOException {
+      if (doc != 0) {
+        throw new IllegalStateException();
+      } else if (i >= termFreq - 1) {
+        throw new IllegalStateException("Read past last position");
+      }
+
+      ++i;
+
+      if (payloadLengths != null) {
+        payload.offset += payload.length;
+        payload.length = payloadLengths[i];
+      }
+
+      if (positions == null) {
+        return -1;
+      } else {
+        return positions[i];
+      }
+    }
+
+    @Override
+    public int startOffset() throws IOException {
+      checkPosition();
+      if (startOffsets == null) {
+        return -1;
+      } else {
+        return startOffsets[i];
+      }
+    }
+
+    @Override
+    public int endOffset() throws IOException {
+      checkPosition();
+      if (startOffsets == null) {
+        return -1;
+      } else {
+        return startOffsets[i] + lengths[i];
+      }
+    }
+
+    @Override
+    public BytesRef getPayload() throws IOException {
+      checkPosition();
+      if (payloadLengths == null || payload.length == 0) {
+        return null;
+      } else {
+        return payload;
+      }
+    }
+
+    @Override
+    public int freq() throws IOException {
+      checkDoc();
+      return termFreq;
+    }
+
+    @Override
+    public int docID() {
+      return doc;
+    }
+
+    @Override
+    public int nextDoc() throws IOException {
+      if (doc == -1 && (liveDocs == null || liveDocs.get(0))) {
+        return (doc = 0);
+      } else {
+        return (doc = NO_MORE_DOCS);
+      }
+    }
+
+    @Override
+    public int advance(int target) throws IOException {
+      if (doc == -1 && target == 0 && (liveDocs == null || liveDocs.get(0))) {
+        return (doc = 0);
+      } else {
+        return (doc = NO_MORE_DOCS);
+      }
+    }
+
+  }
+
+  private static int sum(int[] arr) {
+    int sum = 0;
+    for (int el : arr) {
+      sum += el;
+    }
+    return sum;
+  }
+
+}

Modification de propriétés sur lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingTermVectorsReader.java
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
