Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/GrowableByteArrayDataOutput.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/compressing/GrowableByteArrayDataOutput.java	(révision 0)
+++ lucene/core/src/java/org/apache/lucene/codecs/compressing/GrowableByteArrayDataOutput.java	(révision 0)
@@ -0,0 +1,50 @@
+package org.apache.lucene.codecs.compressing;
+
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.ArrayUtil;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class GrowableByteArrayDataOutput extends DataOutput {
+
+  byte[] bytes;
+  int off;
+
+  public GrowableByteArrayDataOutput(int sz) {
+    bytes = new byte[sz];
+    off = 0;
+  }
+
+  @Override
+  public void writeByte(byte b) {
+    if (off == bytes.length) {
+      bytes = ArrayUtil.grow(bytes);
+    }
+    bytes[off++] = b;
+  }
+
+  @Override
+  public void writeBytes(byte[] b, int offset, int length) {
+    if (off + length > bytes.length) {
+      bytes = ArrayUtil.grow(bytes, off + length);
+    }
+    System.arraycopy(b, offset, bytes, off, length);
+    off += length;
+  }
+
+}
Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/RawCompressionAlgorithm.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/compressing/RawCompressionAlgorithm.java	(révision 0)
+++ lucene/core/src/java/org/apache/lucene/codecs/compressing/RawCompressionAlgorithm.java	(révision 0)
@@ -0,0 +1,110 @@
+package org.apache.lucene.codecs.compressing;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.store.DataInput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+
+/**
+ * A trivial {@link CompressionAlgorithm} that doesn't perform any compression.
+ */
+public class RawCompressionAlgorithm extends CompressionAlgorithm {
+
+  public static final RawCompressionAlgorithm INSTANCE = new RawCompressionAlgorithm();
+  public static final Format FORMAT_INSTANCE = new Format();
+  static final int VERSION_START = 0;
+  static final int VERSION_CURRENT = 0;
+
+  @Override
+  public String getName() {
+    return "RawCompressionAlgorithm";
+  }
+
+  @Override
+  public CompressionFormat getCompressionFormat(DataInput in) throws IOException {
+    final int version = in.readVInt();
+    if (version < VERSION_START || version > VERSION_CURRENT) {
+      throw new IllegalStateException("Unsupported version: " + version);
+    }
+    return new Format();
+  }
+
+  public static class Format extends CompressionFormat {
+
+    @Override
+    public CompressionAlgorithm getAlgorithm() {
+      return INSTANCE;
+    }
+
+    @Override
+    public void writeHeader(DataOutput out) throws IOException {
+      out.writeVInt(VERSION_CURRENT);
+    }
+
+    @Override
+    public Compressor newCompressor() {
+      return new RawCompressor();
+    }
+
+    @Override
+    public Decompressor newDecompressor() {
+      return new RawDecompressor();
+    }
+
+  }
+
+  private static class RawCompressor extends Compressor {
+
+    @Override
+    public void compress(BytesRef bytes, DataOutput out) throws IOException {
+      out.writeVInt(bytes.length);
+      out.writeBytes(bytes.bytes, bytes.offset, bytes.length);
+    }
+
+  }
+
+  private static class RawDecompressor extends Decompressor {
+
+    @Override
+    public void decompress(DataInput in, BytesRef bytes) throws IOException {
+      final int len = in.readVInt();
+      if (bytes.bytes.length <= len) {
+        bytes.bytes = new byte[Math.max(len, ArrayUtil.oversize(bytes.bytes.length + 1, 1))];
+      }
+      in.readBytes(bytes.bytes, 0, len);
+      bytes.offset = 0;
+      bytes.length = len;
+    }
+
+    @Override
+    public void skip(IndexInput in) throws IOException {
+      final int len = in.readVInt();
+      in.seek(in.getFilePointer() + len);
+    }
+
+    @Override
+    public Decompressor clone() {
+      return this; // stateless
+    }
+  }
+}
Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/Decompressor.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/compressing/Decompressor.java	(révision 0)
+++ lucene/core/src/java/org/apache/lucene/codecs/compressing/Decompressor.java	(révision 0)
@@ -0,0 +1,45 @@
+package org.apache.lucene.codecs.compressing;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.store.DataInput;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.BytesRef;
+
+/**
+ * A decompressor.
+ */
+public abstract class Decompressor implements Cloneable {
+
+  /**
+   * Decompress bytes. This method is free to resize <code>bytes</code> in case
+   * it is too small to hold all the decompressed data.
+   */
+  public abstract void decompress(DataInput in, BytesRef bytes) throws IOException;
+
+  /**
+   * Skip over the compressed data.
+   */
+  public abstract void skip(IndexInput in) throws IOException;
+
+  @Override
+  public abstract Decompressor clone();
+
+}
Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressedStoredFieldsFormat.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressedStoredFieldsFormat.java	(révision 0)
+++ lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressedStoredFieldsFormat.java	(révision 0)
@@ -0,0 +1,115 @@
+package org.apache.lucene.codecs.compressing;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.codecs.StoredFieldsFormat;
+import org.apache.lucene.codecs.StoredFieldsReader;
+import org.apache.lucene.codecs.StoredFieldsWriter;
+import org.apache.lucene.codecs.lucene40.Lucene40StoredFieldsFormat;
+import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.SegmentInfo;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+
+/**
+ * A {@link StoredFieldsFormat} that compresses data.
+ * </p><p>
+ * On the contrary to user-managed per-document compression, this format allows
+ * the compression algorithm to do a better job by compressing the values of one
+ * field of several documents together.
+ * </p><p>
+ * Compression is performed on a per-chunk basis. The size of a chunk depends
+ * on two parameters <code>chunkDocs</code> which is the maximum number of
+ * documents that can go into a single chunk, and <code>chunkSize</code>, which
+ * controls the maximum size (in bytes) of a chunk before compression. Increasing
+ * <code>chunkDocs</code> and/or <code>chunkSize</code> will improve compression,
+ * but will increase memory requirements at indexing time. It will however not
+ * necessarily increase the time to retrieve a document from disk depending on
+ * the size of your documents and how much of your index fits in your OS cache.
+ * </p><p>
+ * The compression algorithm to use is defined on a per-field basis. A special
+ * {@link RawCompressionAlgorithm} leaves data uncompressed. This compression
+ * algorithm should be used for fields that are unlikely to be efficiently
+ * compressed.
+ * </p><p>
+ * This format uses three files:<ul>
+ *   <li>.fdt</li>: the field data,</li>
+ *   <li>.fdx</li>: the field index, used to locate records in the field data file,</li>
+ *   <li>.fdc</li>: the descriptor of the compression algorithms that have been used to compress fields.</li>
+ * </ul>
+ * </p><p>Formats:<ul>
+ *   <li>Field data (.fdt): &lt;Header&gt; &lt;FieldValuesPosition&gt;<sup>SegSize</sup></li>
+ *   <li>Header: {@link CodecUtil#writeHeader CodecHeader}</li>
+ *   <li>FieldValuesPosition: fixed-length UInt (on an arbitrary number of bits that allows the index file to remain compact) pointing the the start of a chunk in the field data file</li>
+ * </ul><ul>
+ *   <li>Field index (.fdx): &lt;Header&gt;, &lt;ChunkData&gt<sup>NumChunks</sup></li>
+ *   <li>Header: {@link CodecUtil#writeHeader CodecHeader}</li>
+ *   <li>ChunkData: DocBase, FieldCount, &ltFieldNum, NumFields, &lt;CompressedData&gt;<sup>NumFields</sup>&gt;<sup>FieldCount</sup></li>
+ *   <li>DocBase: VInt, the lowest docID of the documents stored in the chunk</li>
+ *   <li>FieldCount: VInt, the count of distinct field names for documents stored in the chunk</li>
+ *   <li>FieldNum: VInt, the ID of the field</li>
+ *   <li>NumFields: VInt, the number of instances for field <code>FieldNum</code></li>
+ *   <li>CompressedData: DocRelative, Bits, Value</li>
+ *   <li>DocRelative: VInt representing <code>docID - DocBase</code></li>
+ *   <li>Bits: same as in {@link Lucene40StoredFieldsFormat}</li>
+ *   <li>Value: same as in {@link Lucene40StoredFieldsFormat}</li>
+ * </ul><ul>
+ *   <li>Field compression (.fdc): &lt;Header&gt; FieldCount, &lt;FieldNum, AlgorithmName, AlgorithmData&gt;<sup>FieldCount</sup></li>
+ *   <li>Header: {@link CodecUtil#writeHeader CodecHeader}</li>
+ *   <li>FieldCount: VInt, the total number of distinct field names</li>
+ *   <li>FieldNum: VInt, the field ID</li>
+ *   <li>AlgorithmName: String, the name of the compression algorithm used to compress the field</li>
+ *   <li>AlgorithmData: algorithm-specific data used by the reader to restore state</li>
+ * </ul></p>
+ */
+public class CompressedStoredFieldsFormat extends StoredFieldsFormat {
+
+  final CompressionFormatSelector selector;
+  final int chunkDocs, chunkSize;
+
+  /**
+   * Create a new CompressedStoredFieldsFormat.
+   *
+   * @param selector describes what compression algorithm to use depending on the field
+   * @param chunkDocs the maximum number of documents in a chunk
+   * @param chunkSize the maximum size (in bytes) of an uncompressed chunk
+   */
+  public CompressedStoredFieldsFormat(CompressionFormatSelector selector,
+      int chunkDocs, int chunkSize) {
+    this.selector = selector;
+    this.chunkDocs = chunkDocs;
+    this.chunkSize = chunkSize;
+  }
+
+  @Override
+  public StoredFieldsReader fieldsReader(Directory directory, SegmentInfo si,
+      FieldInfos fn, IOContext context) throws IOException {
+    return new CompressedStoredFieldsReader(directory, si, fn, context);
+  }
+
+  @Override
+  public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si,
+      IOContext context) throws IOException {
+    return new CompressedStoredFieldsWriter(directory, si.name, context,
+        selector, chunkDocs, chunkSize);
+  }
+
+}
Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingCodec.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingCodec.java	(révision 0)
+++ lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressingCodec.java	(révision 0)
@@ -0,0 +1,108 @@
+package org.apache.lucene.codecs.compressing;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.zip.Deflater;
+
+import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.DocValuesFormat;
+import org.apache.lucene.codecs.FieldInfosFormat;
+import org.apache.lucene.codecs.LiveDocsFormat;
+import org.apache.lucene.codecs.NormsFormat;
+import org.apache.lucene.codecs.PostingsFormat;
+import org.apache.lucene.codecs.SegmentInfoFormat;
+import org.apache.lucene.codecs.StoredFieldsFormat;
+import org.apache.lucene.codecs.TermVectorsFormat;
+import org.apache.lucene.codecs.lucene40.Lucene40Codec;
+import org.apache.lucene.index.FieldInfo;
+
+/**
+ * A {@link Codec} similar to {@link Lucene40Codec} but which uses
+ * {@link CompressedStoredFieldsFormat} for its stored fields.
+ */
+public class CompressingCodec extends Codec {
+
+  static final int DEFAULT_CHUNK_DOCS = 16;
+  static final int DEFAULT_CHUNK_SIZE = 32 * 1024; // 32K
+  static final CompressionFormatSelector DEFAULT_SELECTOR = new CompressionFormatSelector() {
+    @Override
+    public CompressionFormat getCompressionFormat(FieldInfo info) {
+      return new DeflateCompressionAlgorithm.Format(Deflater.BEST_SPEED, true);
+    }
+  };
+
+  final Codec wrappedCodec;
+  final StoredFieldsFormat fields;
+
+  public CompressingCodec(Lucene40Codec wrappedCodec, CompressionFormatSelector selector, int chunkDocs, int chunkSize) {
+    super("Compressing");
+    this.wrappedCodec = wrappedCodec;
+    if (chunkDocs < 1) {
+      throw new IllegalArgumentException("chunkDocs should be >= 1");
+    }
+    if (chunkDocs <= 0) {
+      throw new IllegalArgumentException("chunkSize should be >= 0");
+    }
+    fields = new CompressedStoredFieldsFormat(selector, chunkDocs, chunkSize);
+  }
+
+  public CompressingCodec() {
+    this(new Lucene40Codec(), DEFAULT_SELECTOR, DEFAULT_CHUNK_DOCS, DEFAULT_CHUNK_SIZE);
+  }
+
+  @Override
+  public PostingsFormat postingsFormat() {
+    return wrappedCodec.postingsFormat();
+  }
+
+  @Override
+  public DocValuesFormat docValuesFormat() {
+    return wrappedCodec.docValuesFormat();
+  }
+
+  @Override
+  public StoredFieldsFormat storedFieldsFormat() {
+    return fields;
+  }
+
+  @Override
+  public TermVectorsFormat termVectorsFormat() {
+    return wrappedCodec.termVectorsFormat();
+  }
+
+  @Override
+  public FieldInfosFormat fieldInfosFormat() {
+    return wrappedCodec.fieldInfosFormat();
+  }
+
+  @Override
+  public SegmentInfoFormat segmentInfoFormat() {
+    return wrappedCodec.segmentInfoFormat();
+  }
+
+  @Override
+  public NormsFormat normsFormat() {
+    return wrappedCodec.normsFormat();
+  }
+
+  @Override
+  public LiveDocsFormat liveDocsFormat() {
+    return wrappedCodec.liveDocsFormat();
+  }
+
+}
Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressionAlgorithm.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressionAlgorithm.java	(révision 0)
+++ lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressionAlgorithm.java	(révision 0)
@@ -0,0 +1,50 @@
+package org.apache.lucene.codecs.compressing;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Set;
+
+import org.apache.lucene.store.DataInput;
+import org.apache.lucene.util.NamedSPILoader;
+
+/**
+ * A compression algorithm.
+ */
+public abstract class CompressionAlgorithm implements NamedSPILoader.NamedSPI {
+
+  private static final NamedSPILoader<CompressionAlgorithm> LOADER =
+      new NamedSPILoader<CompressionAlgorithm>(CompressionAlgorithm.class);
+
+  /** looks up a compression format by name */
+  public static CompressionAlgorithm forName(String name) {
+    return LOADER.lookup(name);
+  }
+  
+  /** returns a list of all available compression format names */
+  public static Set<String> availableCodecs() {
+    return LOADER.availableServices();
+  }
+
+  /**
+   * Create a {@link CompressionFormat} instance based on data that can be read
+   * from the provided {@link DataInput}.
+   */
+  public abstract CompressionFormat getCompressionFormat(DataInput in) throws IOException;
+
+}
Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressedStoredFieldsWriter.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressedStoredFieldsWriter.java	(révision 0)
+++ lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressedStoredFieldsWriter.java	(révision 0)
@@ -0,0 +1,312 @@
+package org.apache.lucene.codecs.compressing;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.codecs.StoredFieldsWriter;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.IndexableField;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+
+class CompressedStoredFieldsWriter extends StoredFieldsWriter {
+
+  // TODO: avoid code duplication with Lucene40
+  static final int FIELD_IS_BINARY = 1 << 1;
+  static final int _NUMERIC_BIT_SHIFT = 3;
+  static final int FIELD_IS_NUMERIC_MASK = 0x07 << _NUMERIC_BIT_SHIFT;
+  static final int FIELD_IS_NUMERIC_INT = 1 << _NUMERIC_BIT_SHIFT;
+  static final int FIELD_IS_NUMERIC_LONG = 2 << _NUMERIC_BIT_SHIFT;
+  static final int FIELD_IS_NUMERIC_FLOAT = 3 << _NUMERIC_BIT_SHIFT;
+  static final int FIELD_IS_NUMERIC_DOUBLE = 4 << _NUMERIC_BIT_SHIFT;
+
+  static final String CODEC_NAME_IDX = "CompressedStoredFieldsIndex";
+  static final String CODEC_NAME_DAT = "CompressedStoredFieldsData";
+  static final String CODEC_NAME_COMP = "CompressedStoredFieldsCompression";
+  static final int VERSION_START = 0;
+  static final int VERSION_CURRENT = VERSION_START;
+  static final long HEADER_LENGTH_IDX = CodecUtil.headerLength(CODEC_NAME_IDX);
+  static final long HEADER_LENGTH_DAT = CodecUtil.headerLength(CODEC_NAME_DAT);
+  static final long HEADER_LENGTH_COMP = CodecUtil.headerLength(CODEC_NAME_COMP);
+
+  /** Extension of stored fields file */
+  public static final String FIELDS_EXTENSION = "fdt";
+
+  /** Extension of stored fields index file */
+  public static final String FIELDS_INDEX_EXTENSION = "fdx";
+
+  /** Extension of stored fields compression file */
+  public static final String FIELDS_COMPRESSION_EXTENSION = "fdc";
+
+  final Directory directory;
+  final String segment;
+  IndexOutput fieldsStream,indexStream, compressionStream;
+  final int chunkDocs, chunkSize;
+
+  final CompressionFormatSelector compressionFormatSelector;
+  int[] numFields;
+  CompressionFormat[] formats;
+  GrowableByteArrayDataOutput[] rawOutputs;
+  int docBase, docID;
+
+  public CompressedStoredFieldsWriter(Directory directory, String segment, IOContext context, CompressionFormatSelector compressionFormatSelector, int chunkDocs, int chunkSize) throws IOException {
+    assert directory != null;
+    this.directory = directory;
+    this.segment = segment;
+    this.compressionFormatSelector = compressionFormatSelector;
+    this.numFields = new int[1];
+    this.formats = new CompressionFormat[1];
+    this.rawOutputs = new GrowableByteArrayDataOutput[1];
+    this.chunkDocs = chunkDocs;
+    this.chunkSize = chunkSize;
+
+    boolean success = false;
+    try {
+      fieldsStream = directory.createOutput(IndexFileNames.segmentFileName(segment, "", FIELDS_EXTENSION), context);
+      indexStream = directory.createOutput(IndexFileNames.segmentFileName(segment, "", FIELDS_INDEX_EXTENSION), context);
+      compressionStream = directory.createOutput(IndexFileNames.segmentFileName(segment, "", FIELDS_COMPRESSION_EXTENSION), context);
+
+      CodecUtil.writeHeader(fieldsStream, CODEC_NAME_DAT, VERSION_CURRENT);
+      CodecUtil.writeHeader(indexStream, CODEC_NAME_IDX, VERSION_CURRENT);
+      CodecUtil.writeHeader(compressionStream, CODEC_NAME_COMP, VERSION_CURRENT);
+
+      assert HEADER_LENGTH_DAT == fieldsStream.getFilePointer();
+      assert HEADER_LENGTH_IDX == indexStream.getFilePointer();
+      assert HEADER_LENGTH_COMP == compressionStream.getFilePointer();
+
+      docID = -1;
+      docBase = 0;
+
+      success = true;
+    } finally {
+      if (!success) {
+        abort();
+      }
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    try {
+      IOUtils.close(fieldsStream, indexStream, compressionStream);
+    } finally {
+      fieldsStream = indexStream = compressionStream = null;
+    }
+  }
+
+  @Override
+  public void startDocument(int numStoredFields) throws IOException {
+    if (pendingFieldsSize() >= chunkSize || (docID - docBase + 1 >= chunkDocs)) {
+      flush();
+    }
+
+    ++docID;
+    assert docID >= docBase;
+
+    indexStream.writeLong(fieldsStream.getFilePointer());
+  }
+
+  private void rangeCheck(int n) {
+    if (numFields.length <= n) {
+      numFields = ArrayUtil.grow(numFields, ArrayUtil.oversize(n + 1, 8));
+    }
+    if (formats.length <= n) {
+      formats = Arrays.copyOf(formats, ArrayUtil.oversize(n + 1, 8));
+    }
+    if (rawOutputs.length <= n) {
+      rawOutputs = Arrays.copyOf(rawOutputs, ArrayUtil.oversize(n + 1, 8));
+    }
+  }
+
+  @Override
+  public void writeField(FieldInfo info, IndexableField field)
+      throws IOException {
+    rangeCheck(info.number);
+
+    ++numFields[info.number];
+    if (formats[info.number] == null) {
+      formats[info.number] = compressionFormatSelector.getCompressionFormat(info);
+    }
+    if (rawOutputs[info.number] == null) {
+      rawOutputs[info.number] = new GrowableByteArrayDataOutput(64);
+    }
+
+    final DataOutput output = rawOutputs[info.number];
+    output.writeVInt(docID - docBase);
+
+    int bits = 0;
+    final BytesRef bytes;
+    final String string;
+
+    Number number = field.numericValue();
+    if (number != null) {
+      if (number instanceof Byte || number instanceof Short || number instanceof Integer) {
+        bits |= FIELD_IS_NUMERIC_INT;
+      } else if (number instanceof Long) {
+        bits |= FIELD_IS_NUMERIC_LONG;
+      } else if (number instanceof Float) {
+        bits |= FIELD_IS_NUMERIC_FLOAT;
+      } else if (number instanceof Double) {
+        bits |= FIELD_IS_NUMERIC_DOUBLE;
+      } else {
+        throw new IllegalArgumentException("cannot store numeric type " + number.getClass());
+      }
+      string = null;
+      bytes = null;
+    } else {
+      bytes = field.binaryValue();
+      if (bytes != null) {
+        bits |= FIELD_IS_BINARY;
+        string = null;
+      } else {
+        string = field.stringValue();
+        if (string == null) {
+          throw new IllegalArgumentException("field " + field.name() + " is stored but does not have binaryValue, stringValue nor numericValue");
+        }
+      }
+    }
+
+    output.writeByte((byte) bits);
+
+    if (bytes != null) {
+      output.writeVInt(bytes.length);
+      output.writeBytes(bytes.bytes, bytes.offset, bytes.length);
+    } else if (string != null) {
+      output.writeString(field.stringValue());
+    } else {
+      if (number instanceof Byte || number instanceof Short || number instanceof Integer) {
+        output.writeInt(number.intValue());
+      } else if (number instanceof Long) {
+        output.writeLong(number.longValue());
+      } else if (number instanceof Float) {
+        output.writeInt(Float.floatToIntBits(number.floatValue()));
+      } else if (number instanceof Double) {
+        output.writeLong(Double.doubleToLongBits(number.doubleValue()));
+      } else {
+        assert false;
+      }
+    }
+  }
+
+  public void abort() {
+    try {
+      close();
+    } catch (IOException ignored) {}
+    IOUtils.deleteFilesIgnoringExceptions(directory,
+        IndexFileNames.segmentFileName(segment, "", FIELDS_EXTENSION),
+        IndexFileNames.segmentFileName(segment, "", FIELDS_INDEX_EXTENSION),
+        IndexFileNames.segmentFileName(segment, "", FIELDS_COMPRESSION_EXTENSION));
+  }
+
+  @Override
+  public void finish(FieldInfos fis, int numDocs) throws IOException {
+    flush();
+
+    // write the field compression stream
+    int fieldCount = 0;
+    for (CompressionFormat format : formats) {
+      if (format != null) {
+        ++fieldCount;
+      }
+    }
+    compressionStream.writeVInt(fieldCount);
+
+    int count = 0;
+    for (int fieldNum = 0; fieldNum < formats.length; ++fieldNum) {
+      final CompressionFormat format = formats[fieldNum];
+      if (format != null) {
+        compressionStream.writeVInt(fieldNum);
+        compressionStream.writeString(format.getAlgorithm().getName());
+        format.writeHeader(compressionStream);
+        ++count;
+      }
+    }
+    assert count == fieldCount;
+  }
+
+  private void flush() throws IOException {
+    fieldsStream.writeVInt(docBase);
+    final int fieldCount = fieldCount();
+    fieldsStream.writeVInt(fieldCount);
+
+    int count = 0;
+    for (int fieldNum = 0; fieldNum < rawOutputs.length; ++ fieldNum) {
+      final GrowableByteArrayDataOutput rawOutput = rawOutputs[fieldNum];
+      if (rawOutput == null || rawOutput.off == 0) {
+        rawOutputs[fieldNum] = null;
+        continue;
+      }
+
+      fieldsStream.writeVInt(fieldNum);
+      fieldsStream.writeVInt(numFields[fieldNum]);
+      // now compress
+      final CompressionFormat format = formats[fieldNum];
+      assert format != null;
+      Compressor compressor = format.newCompressor();
+      compressor.compress(new BytesRef(rawOutput.bytes, 0, rawOutput.off), fieldsStream);
+
+      ++count;
+    }
+    assert count == fieldCount;
+    reset();
+  }
+
+  private int fieldCount() {
+    int count = 0;
+    for (GrowableByteArrayDataOutput output : rawOutputs) {
+      if (output != null && output.off > 0) {
+        ++count;
+      }
+    }
+    return count;
+  }
+
+  private long pendingFieldsSize() {
+    long size = 0;
+    for (GrowableByteArrayDataOutput output : rawOutputs) {
+      if (output != null) {
+        size += output.off;
+      }
+    }
+    return size;
+  }
+
+  private void reset() {
+    docBase = docID + 1;
+
+    for (GrowableByteArrayDataOutput output : rawOutputs) {
+      if (output != null) {
+        output.off = 0;
+      }
+    }
+    for (int i = 0; i < numFields.length; ++i) {
+      numFields[i] = 0;
+    }
+  }
+
+}
Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressionFormat.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressionFormat.java	(révision 0)
+++ lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressionFormat.java	(révision 0)
@@ -0,0 +1,52 @@
+package org.apache.lucene.codecs.compressing;
+
+import java.io.IOException;
+
+import org.apache.lucene.store.DataOutput;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * A compression format. This class couples a {@link CompressionAlgorithm} with
+ * data that tells the algorithm how to decompress streams.
+ * @lucene.internal
+ */
+public abstract class CompressionFormat {
+
+  /**
+   * Return the algorithm used by this format.
+   */
+  public abstract CompressionAlgorithm getAlgorithm();
+
+  /**
+   * Write all necessary information to <code>out</code> to be able to restore a
+   * similar {@link CompressionFormat} in the future.
+   */
+  public abstract void writeHeader(DataOutput out) throws IOException;
+
+  /**
+   * Create a new compressor instance for this format.
+   */
+  public abstract Compressor newCompressor();
+
+  /**
+   * Create a new decompressor instance for this format.
+   */
+  public abstract Decompressor newDecompressor();
+
+}
Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressionFormatSelector.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressionFormatSelector.java	(révision 0)
+++ lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressionFormatSelector.java	(révision 0)
@@ -0,0 +1,30 @@
+package org.apache.lucene.codecs.compressing;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.FieldInfo;
+
+/**
+ * Utility class to know what {@link CompressionFormat} to use depending on
+ * the field to compress.
+ */
+public interface CompressionFormatSelector {
+
+  CompressionFormat getCompressionFormat(FieldInfo info);
+
+}
Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/Compressor.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/compressing/Compressor.java	(révision 0)
+++ lucene/core/src/java/org/apache/lucene/codecs/compressing/Compressor.java	(révision 0)
@@ -0,0 +1,37 @@
+package org.apache.lucene.codecs.compressing;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.BytesRef;
+
+/**
+ * A compressor.
+ */
+public abstract class Compressor {
+
+  /**
+   * Compress bytes into out. It it the responsability of the compressor to add
+   * all necessary information so that a {@link Decompressor} will know when to
+   * stop decompressing bytes from the stream.
+   */
+  public abstract void compress(BytesRef bytes, DataOutput out) throws IOException;
+
+}
Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/ByteArrayDataInput.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/compressing/ByteArrayDataInput.java	(révision 0)
+++ lucene/core/src/java/org/apache/lucene/codecs/compressing/ByteArrayDataInput.java	(révision 0)
@@ -0,0 +1,54 @@
+package org.apache.lucene.codecs.compressing;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.store.DataInput;
+
+/**
+ * Utility class that wraps a <code>byte[]</code> and gives access to its
+ * internal <code>offset</code>.
+ */
+class ByteArrayDataInput extends DataInput {
+
+  final byte[] bytes;
+  int offset;
+  final int length;
+
+  public ByteArrayDataInput(byte[] bytes, int off, int len) {
+    this.bytes = bytes;
+    this.offset = off;
+    this.length = len;
+  }
+
+  @Override
+  public byte readByte() {
+    return bytes[offset++];
+  }
+
+  @Override
+  public void readBytes(byte[] b, int offset, int len) {
+    System.arraycopy(bytes, this.offset, b, offset, len);
+    this.offset += len;
+  }
+
+  // skip over n bytes
+  public void skip(int n) {
+    offset += n;
+  }
+
+}
Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/DeflateCompressionAlgorithm.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/compressing/DeflateCompressionAlgorithm.java	(révision 0)
+++ lucene/core/src/java/org/apache/lucene/codecs/compressing/DeflateCompressionAlgorithm.java	(révision 0)
@@ -0,0 +1,192 @@
+package org.apache.lucene.codecs.compressing;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.zip.DataFormatException;
+import java.util.zip.Deflater;
+import java.util.zip.Inflater;
+
+import org.apache.lucene.store.DataInput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+
+/**
+ * Implementation of the {@link Deflater} compression algorithm.
+ */
+public class DeflateCompressionAlgorithm extends CompressionAlgorithm {
+
+  public static final DeflateCompressionAlgorithm INSTANCE = new DeflateCompressionAlgorithm();
+
+  static final int VERSION_START = 0;
+  static final int VERSION_CURRENT = 0;
+
+  @Override
+  public String getName() {
+    return "DeflateCompressionAlgorithm";
+  }
+
+  @Override
+  public Format getCompressionFormat(DataInput in)
+      throws IOException {
+    final int version = in.readVInt();
+    if (version < VERSION_START || version > VERSION_CURRENT) {
+      throw new IllegalStateException("Unsupported version: " + version);
+    }
+    final int level = in.readVInt();
+    final byte nowrap = in.readByte();
+    if (nowrap < 0 || nowrap > 1) {
+      throw new IllegalStateException("Malformed stream");
+    }
+    return new Format(level, nowrap != 0);
+  }
+
+  public static class Format extends CompressionFormat {
+
+    private final int level;
+    private final boolean nowrap;
+
+    public Format(int level, boolean nowrap) {
+      this.level = level;
+      this.nowrap = nowrap;
+    }
+
+    @Override
+    public CompressionAlgorithm getAlgorithm() {
+      return INSTANCE;
+    }
+
+    @Override
+    public void writeHeader(DataOutput out) throws IOException {
+      out.writeVInt(VERSION_CURRENT);
+      out.writeVInt(level);
+      out.writeByte((byte) (nowrap ? 1 : 0));
+    }
+
+    @Override
+    public Compressor newCompressor() {
+      return new Compressor() {
+
+        final Deflater compressor = new Deflater(level, nowrap);
+        byte[] compressed;
+
+        @Override
+        public void compress(BytesRef bytes, DataOutput out) throws IOException {
+          compressor.reset();
+          compressor.setInput(bytes.bytes, bytes.offset, bytes.length);
+          compressor.finish();
+
+          if (compressor.needsInput()) {
+            // no output
+            out.writeVInt(0);
+            return;
+          }
+
+          if (compressed == null) {
+            compressed = new byte[5];
+          }
+          int totalCount = 0;
+          while (true) {
+            final int count = compressor.deflate(compressed, totalCount, compressed.length - totalCount);
+            totalCount += count;
+            assert totalCount <= compressed.length;
+            if (compressor.finished()) {
+              break;
+            } else {
+              compressed = ArrayUtil.grow(compressed);
+            }
+          }
+
+          out.writeVInt(totalCount);
+          out.writeBytes(compressed, totalCount);
+        }
+
+      };
+    }
+
+    @Override
+    public Decompressor newDecompressor() {
+      return new DeflateDecompressor(0, nowrap);
+    }
+
+  }
+
+  private static class DeflateDecompressor extends Decompressor {
+
+    final Inflater decompressor;
+    byte[] buffer;
+    final boolean nowrap;
+
+    private DeflateDecompressor(int bufferSize, boolean nowrap) {
+      this.nowrap = nowrap;
+      decompressor = new Inflater(nowrap);
+      buffer = new byte[bufferSize];
+    }
+
+    @Override
+    public void skip(IndexInput in) throws IOException {
+      final int length = in.readVInt();
+      in.seek(in.getFilePointer() + length);
+    }
+    
+    @Override
+    public void decompress(DataInput in, BytesRef bytes) throws IOException {
+      bytes.offset = bytes.length = 0;
+
+      final int length = in.readVInt();
+      if (buffer.length <= length) {
+        buffer = ArrayUtil.grow(buffer, length);
+      }
+      in.readBytes(buffer, 0, length);
+
+      decompressor.reset();
+      decompressor.setInput(buffer, 0, length);
+      if (decompressor.needsInput()) {
+        return;
+      }
+
+      while (true) {
+        final int count;
+        try {
+          count = decompressor.inflate(bytes.bytes, bytes.length, bytes.bytes.length - bytes.length);
+        } catch (DataFormatException e) {
+          throw new IOException("Malformed input", e);
+        }
+        bytes.length += count;
+        if (decompressor.finished()) {
+          break;
+        } else {
+          try {
+            bytes.bytes = ArrayUtil.grow(bytes.bytes);
+          } catch (OutOfMemoryError e) {
+            System.out.println(bytes.bytes.length);
+            throw e;
+          }
+        }
+      }
+    }
+
+    @Override
+    public Decompressor clone() {
+      return new DeflateDecompressor(buffer.length, nowrap);
+    }
+  }
+
+}
Index: lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressedStoredFieldsReader.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressedStoredFieldsReader.java	(révision 0)
+++ lucene/core/src/java/org/apache/lucene/codecs/compressing/CompressedStoredFieldsReader.java	(révision 0)
@@ -0,0 +1,241 @@
+package org.apache.lucene.codecs.compressing;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static org.apache.lucene.codecs.compressing.CompressedStoredFieldsWriter.CODEC_NAME_COMP;
+import static org.apache.lucene.codecs.compressing.CompressedStoredFieldsWriter.CODEC_NAME_DAT;
+import static org.apache.lucene.codecs.compressing.CompressedStoredFieldsWriter.CODEC_NAME_IDX;
+import static org.apache.lucene.codecs.compressing.CompressedStoredFieldsWriter.FIELDS_COMPRESSION_EXTENSION;
+import static org.apache.lucene.codecs.compressing.CompressedStoredFieldsWriter.FIELDS_EXTENSION;
+import static org.apache.lucene.codecs.compressing.CompressedStoredFieldsWriter.FIELDS_INDEX_EXTENSION;
+import static org.apache.lucene.codecs.compressing.CompressedStoredFieldsWriter.FIELD_IS_BINARY;
+import static org.apache.lucene.codecs.compressing.CompressedStoredFieldsWriter.FIELD_IS_NUMERIC_DOUBLE;
+import static org.apache.lucene.codecs.compressing.CompressedStoredFieldsWriter.FIELD_IS_NUMERIC_FLOAT;
+import static org.apache.lucene.codecs.compressing.CompressedStoredFieldsWriter.FIELD_IS_NUMERIC_INT;
+import static org.apache.lucene.codecs.compressing.CompressedStoredFieldsWriter.FIELD_IS_NUMERIC_LONG;
+import static org.apache.lucene.codecs.compressing.CompressedStoredFieldsWriter.FIELD_IS_NUMERIC_MASK;
+import static org.apache.lucene.codecs.compressing.CompressedStoredFieldsWriter.HEADER_LENGTH_DAT;
+import static org.apache.lucene.codecs.compressing.CompressedStoredFieldsWriter.HEADER_LENGTH_IDX;
+import static org.apache.lucene.codecs.compressing.CompressedStoredFieldsWriter.VERSION_CURRENT;
+import static org.apache.lucene.codecs.compressing.CompressedStoredFieldsWriter.VERSION_START;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.codecs.StoredFieldsReader;
+import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.SegmentInfo;
+import org.apache.lucene.index.StoredFieldVisitor;
+import org.apache.lucene.store.DataInput;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.IOContext;
+import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.IOUtils;
+
+class CompressedStoredFieldsReader extends StoredFieldsReader {
+
+  // read the .fdc file to know how to decompress fields
+  private static Decompressor[] readDecompressors(Directory d, String segment, IOContext context) throws IOException {
+    IndexInput compressionStream = d.openInput(IndexFileNames.segmentFileName(segment, "", FIELDS_COMPRESSION_EXTENSION), context);
+    try {
+      CodecUtil.checkHeader(compressionStream, CODEC_NAME_COMP, VERSION_START, VERSION_CURRENT);
+      final int fieldCount = compressionStream.readVInt();
+      Decompressor[] decompressors = new Decompressor[fieldCount];
+      for (int i = 0; i < fieldCount; ++i) {
+        final int fieldNum = compressionStream.readVInt();
+        if (fieldNum >= decompressors.length) {
+          decompressors = Arrays.copyOf(decompressors, ArrayUtil.oversize(fieldNum + 1, 8));
+        }
+        final String formatName = compressionStream.readString();
+        final CompressionAlgorithm alforithm = CompressionAlgorithm.forName(formatName);
+        final CompressionFormat format = alforithm.getCompressionFormat(compressionStream);
+        decompressors[i] = format.newDecompressor();
+      }
+      assert compressionStream.getFilePointer() == d.fileLength(IndexFileNames.segmentFileName(segment, "", FIELDS_COMPRESSION_EXTENSION));
+      return decompressors;
+    } finally {
+      compressionStream.close();
+    }
+  }
+
+  final Decompressor[] decompressors; // fieldNum -> Decompressor
+  final BytesRef bytesRef;
+  final FieldInfos fieldInfos;
+  final IndexInput indexStream;
+  final IndexInput fieldsStream;
+  boolean closed;
+
+  CompressedStoredFieldsReader(Directory d, SegmentInfo si,
+      FieldInfos fn, IOContext context) throws IOException {
+    final String segment = si.name;
+    boolean success = false;
+    fieldInfos = fn;
+    try {
+      decompressors = readDecompressors(d, segment, context);
+
+      fieldsStream = d.openInput(IndexFileNames.segmentFileName(segment, "", FIELDS_EXTENSION), context);
+      final String indexStreamFN = IndexFileNames.segmentFileName(segment, "", FIELDS_INDEX_EXTENSION);
+      indexStream = d.openInput(indexStreamFN, context);
+
+      CodecUtil.checkHeader(indexStream, CODEC_NAME_IDX, VERSION_START, VERSION_CURRENT);
+      CodecUtil.checkHeader(fieldsStream, CODEC_NAME_DAT, VERSION_START, VERSION_CURRENT);
+      assert HEADER_LENGTH_DAT == fieldsStream.getFilePointer();
+      assert HEADER_LENGTH_IDX == indexStream.getFilePointer();
+
+      this.bytesRef = new BytesRef(new byte[0]);
+      success = true;
+    } finally {
+      if (!success) {
+        close();
+      }
+    }
+  }
+
+  // used by clone
+  private CompressedStoredFieldsReader(
+      CompressedStoredFieldsReader other) {
+    this.decompressors = new Decompressor[other.decompressors.length];
+    for (int i = 0; i < decompressors.length; ++i) {
+      final Decompressor decompressor = other.decompressors[i];
+      decompressors[i] = decompressor == null ? null : decompressor.clone();
+    }
+    this.bytesRef = new BytesRef(new byte[other.bytesRef.bytes.length]);
+    this.fieldInfos = other.fieldInfos;
+    this.fieldsStream = (IndexInput) other.fieldsStream.clone();
+    this.indexStream = (IndexInput) other.indexStream.clone();
+    this.closed = other.closed;
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (!closed) {
+      IOUtils.close(fieldsStream, indexStream);
+      closed = true;
+    }
+  }
+
+  public final void visitDocument(int n, StoredFieldVisitor visitor) throws IOException {
+    indexStream.seek(HEADER_LENGTH_IDX + 8 * n);
+    final long offset = indexStream.readLong();
+    fieldsStream.seek(offset);
+
+    final int docBase = fieldsStream.readVInt();
+    assert docBase <= n;
+    final int fieldCount = fieldsStream.readVInt();
+    for (int i = 0; i < fieldCount; ++i) {
+      final int fieldNum = fieldsStream.readVInt();
+      assert fieldNum < decompressors.length;
+      final FieldInfo fieldInfo = fieldInfos.fieldInfo(fieldNum);
+      final int numFields = fieldsStream.readVInt();
+
+      switch(visitor.needsField(fieldInfo)) {
+        case YES:
+          readFields(visitor, fieldInfo, n - docBase, numFields);
+          break;
+        case NO:
+          decompressors[fieldInfo.number].skip(fieldsStream);
+          break;
+        case STOP:
+          return;
+      }
+    }
+  }
+
+  private void readFields(StoredFieldVisitor visitor, FieldInfo info, int docID, int numFields) throws IOException {
+    final Decompressor decompressor = decompressors[info.number];
+    decompressor.decompress(fieldsStream, bytesRef);
+    ByteArrayDataInput in = new ByteArrayDataInput(bytesRef.bytes, bytesRef.offset, bytesRef.length);
+    for (int i = 0; i < numFields; ++i) {
+      final int doc = in.readVInt();
+      if (doc < docID) {
+        skipField(in);
+      } else if (doc == docID) {
+        readField(visitor, info, in);
+      } else {
+        // everything has been read
+        return;
+      }
+    }
+  }
+
+  private void readField(StoredFieldVisitor visitor, FieldInfo info, DataInput in) throws IOException {
+    final int bits = in.readByte() & 0xFF;
+    final int numeric = bits & FIELD_IS_NUMERIC_MASK;
+    if (numeric != 0) {
+      switch(numeric) {
+        case FIELD_IS_NUMERIC_INT:
+          visitor.intField(info, in.readInt());
+          return;
+        case FIELD_IS_NUMERIC_LONG:
+          visitor.longField(info, in.readLong());
+          return;
+        case FIELD_IS_NUMERIC_FLOAT:
+          visitor.floatField(info, Float.intBitsToFloat(in.readInt()));
+          return;
+        case FIELD_IS_NUMERIC_DOUBLE:
+          visitor.doubleField(info, Double.longBitsToDouble(in.readLong()));
+          return;
+        default:
+          throw new CorruptIndexException("Invalid numeric type: " + Integer.toHexString(numeric));
+      }
+    } else {
+      final int length = in.readVInt();
+      byte bytes[] = new byte[length];
+      in.readBytes(bytes, 0, length);
+      if ((bits & FIELD_IS_BINARY) != 0) {
+        visitor.binaryField(info, bytes, 0, bytes.length);
+      } else {
+        visitor.stringField(info, new String(bytes, 0, bytes.length, IOUtils.CHARSET_UTF_8));
+      }
+    }
+  }
+
+  private void skipField(ByteArrayDataInput in) throws IOException {
+    final int bits = in.readByte() & 0xFF;
+    final int numeric = bits & FIELD_IS_NUMERIC_MASK;
+    if (numeric != 0) {
+      switch(numeric) {
+        case FIELD_IS_NUMERIC_INT:
+        case FIELD_IS_NUMERIC_FLOAT:
+          in.readInt();
+          return;
+        case FIELD_IS_NUMERIC_LONG:
+        case FIELD_IS_NUMERIC_DOUBLE:
+          in.readLong();
+          return;
+        default:
+          throw new CorruptIndexException("Invalid numeric type: " + Integer.toHexString(numeric));
+      }
+    } else {
+      final int length = in.readVInt();
+      in.skip(length);
+    }
+  }
+
+  @Override
+  public StoredFieldsReader clone() {
+    return new CompressedStoredFieldsReader(this);
+  }
+
+}
Index: lucene/core/src/resources/META-INF/services/org.apache.lucene.codecs.Codec
===================================================================
--- lucene/core/src/resources/META-INF/services/org.apache.lucene.codecs.Codec	(révision 1362078)
+++ lucene/core/src/resources/META-INF/services/org.apache.lucene.codecs.Codec	(copie de travail)
@@ -16,3 +16,4 @@
 org.apache.lucene.codecs.lucene40.Lucene40Codec
 org.apache.lucene.codecs.simpletext.SimpleTextCodec
 org.apache.lucene.codecs.appending.AppendingCodec
+org.apache.lucene.codecs.compressing.CompressingCodec
Index: lucene/core/src/resources/META-INF/services/org.apache.lucene.codecs.compressing.CompressionAlgorithm
===================================================================
--- lucene/core/src/resources/META-INF/services/org.apache.lucene.codecs.compressing.CompressionAlgorithm	(révision 0)
+++ lucene/core/src/resources/META-INF/services/org.apache.lucene.codecs.compressing.CompressionAlgorithm	(révision 0)
@@ -0,0 +1,2 @@
+org.apache.lucene.codecs.compressing.RawCompressionAlgorithm
+org.apache.lucene.codecs.compressing.DeflateCompressionAlgorithm
