Index: lucene/core/src/java/org/apache/lucene/codecs/PerDocConsumer.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/PerDocConsumer.java	(révision 1354577)
+++ lucene/core/src/java/org/apache/lucene/codecs/PerDocConsumer.java	(copie de travail)
@@ -50,7 +50,7 @@
       mergeState.fieldInfo = fieldInfo; // set the field we are merging
       if (canMerge(fieldInfo)) {
         for (int i = 0; i < docValues.length; i++) {
-          docValues[i] = getDocValuesForMerge(mergeState.readers.get(i).reader, fieldInfo);
+          docValues[i] = getDocValuesForMerge(mergeState.readers.get(i), fieldInfo);
         }
         Type docValuesType = getDocValuesType(fieldInfo);
         assert docValuesType != null;
Index: lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java	(révision 1354577)
+++ lucene/core/src/java/org/apache/lucene/codecs/TermVectorsWriter.java	(copie de travail)
@@ -21,6 +21,7 @@
 import java.io.IOException;
 import java.util.Comparator;
 
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
@@ -143,9 +144,9 @@
    *  merging (bulk-byte copying, etc). */
   public int merge(MergeState mergeState) throws IOException {
     int docCount = 0;
-    for (MergeState.IndexReaderAndLiveDocs reader : mergeState.readers) {
-      final int maxDoc = reader.reader.maxDoc();
-      final Bits liveDocs = reader.liveDocs;
+    for (AtomicReader reader : mergeState.readers) {
+      final int maxDoc = reader.maxDoc();
+      final Bits liveDocs = reader.getLiveDocs();
       for (int docID = 0; docID < maxDoc; docID++) {
         if (liveDocs != null && !liveDocs.get(docID)) {
           // skip deleted docs
@@ -153,7 +154,7 @@
         }
         // NOTE: it's very important to first assign to vectors then pass it to
         // termVectorsWriter.addAllDocVectors; see LUCENE-1282
-        Fields vectors = reader.reader.getTermVectors(docID);
+        Fields vectors = reader.getTermVectors(docID);
         addAllDocVectors(vectors, mergeState.fieldInfos);
         docCount++;
         mergeState.checkAbort.work(300);
Index: lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java	(révision 1354577)
+++ lucene/core/src/java/org/apache/lucene/codecs/StoredFieldsWriter.java	(copie de travail)
@@ -20,6 +20,7 @@
 import java.io.IOException;
 
 import org.apache.lucene.document.Document;
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.IndexableField;
@@ -74,9 +75,9 @@
    *  merging (bulk-byte copying, etc). */
   public int merge(MergeState mergeState) throws IOException {
     int docCount = 0;
-    for (MergeState.IndexReaderAndLiveDocs reader : mergeState.readers) {
-      final int maxDoc = reader.reader.maxDoc();
-      final Bits liveDocs = reader.liveDocs;
+    for (AtomicReader reader : mergeState.readers) {
+      final int maxDoc = reader.maxDoc();
+      final Bits liveDocs = reader.getLiveDocs();
       for (int i = 0; i < maxDoc; i++) {
         if (liveDocs != null && !liveDocs.get(i)) {
           // skip deleted docs
@@ -88,7 +89,7 @@
         // on the fly?
         // NOTE: it's very important to first assign to doc then pass it to
         // fieldsWriter.addDocument; see LUCENE-1282
-        Document doc = reader.reader.document(i);
+        Document doc = reader.document(i);
         addDocument(doc, mergeState.fieldInfos);
         docCount++;
         mergeState.checkAbort.work(300);
Index: lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsWriter.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsWriter.java	(révision 1354577)
+++ lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40StoredFieldsWriter.java	(copie de travail)
@@ -22,6 +22,7 @@
 import org.apache.lucene.codecs.StoredFieldsReader;
 import org.apache.lucene.codecs.StoredFieldsWriter;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
@@ -226,7 +227,7 @@
     int rawDocLengths[] = new int[MAX_RAW_MERGE_DOCS];
     int idx = 0;
     
-    for (MergeState.IndexReaderAndLiveDocs reader : mergeState.readers) {
+    for (AtomicReader reader : mergeState.readers) {
       final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
       Lucene40StoredFieldsReader matchingFieldsReader = null;
       if (matchingSegmentReader != null) {
@@ -237,7 +238,7 @@
         }
       }
     
-      if (reader.liveDocs != null) {
+      if (reader.getLiveDocs() != null) {
         docCount += copyFieldsWithDeletions(mergeState,
                                             reader, matchingFieldsReader, rawDocLengths);
       } else {
@@ -253,12 +254,12 @@
       when merging stored fields */
   private final static int MAX_RAW_MERGE_DOCS = 4192;
 
-  private int copyFieldsWithDeletions(MergeState mergeState, final MergeState.IndexReaderAndLiveDocs reader,
+  private int copyFieldsWithDeletions(MergeState mergeState, final AtomicReader reader,
                                       final Lucene40StoredFieldsReader matchingFieldsReader, int rawDocLengths[])
     throws IOException, MergeAbortedException, CorruptIndexException {
     int docCount = 0;
-    final int maxDoc = reader.reader.maxDoc();
-    final Bits liveDocs = reader.liveDocs;
+    final int maxDoc = reader.maxDoc();
+    final Bits liveDocs = reader.getLiveDocs();
     assert liveDocs != null;
     if (matchingFieldsReader != null) {
       // We can bulk-copy because the fieldInfos are "congruent"
@@ -298,7 +299,7 @@
         // on the fly?
         // NOTE: it's very important to first assign to doc then pass it to
         // fieldsWriter.addDocument; see LUCENE-1282
-        Document doc = reader.reader.document(j);
+        Document doc = reader.document(j);
         addDocument(doc, mergeState.fieldInfos);
         docCount++;
         mergeState.checkAbort.work(300);
@@ -307,10 +308,10 @@
     return docCount;
   }
 
-  private int copyFieldsNoDeletions(MergeState mergeState, final MergeState.IndexReaderAndLiveDocs reader,
+  private int copyFieldsNoDeletions(MergeState mergeState, final AtomicReader reader,
                                     final Lucene40StoredFieldsReader matchingFieldsReader, int rawDocLengths[])
     throws IOException, MergeAbortedException, CorruptIndexException {
-    final int maxDoc = reader.reader.maxDoc();
+    final int maxDoc = reader.maxDoc();
     int docCount = 0;
     if (matchingFieldsReader != null) {
       // We can bulk-copy because the fieldInfos are "congruent"
@@ -325,7 +326,7 @@
       for (; docCount < maxDoc; docCount++) {
         // NOTE: it's very important to first assign to doc then pass it to
         // fieldsWriter.addDocument; see LUCENE-1282
-        Document doc = reader.reader.document(docCount);
+        Document doc = reader.document(docCount);
         addDocument(doc, mergeState.fieldInfos);
         mergeState.checkAbort.work(300);
       }
Index: lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsWriter.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsWriter.java	(révision 1354577)
+++ lucene/core/src/java/org/apache/lucene/codecs/lucene40/Lucene40TermVectorsWriter.java	(copie de travail)
@@ -23,6 +23,7 @@
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.TermVectorsReader;
 import org.apache.lucene.codecs.TermVectorsWriter;
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
 import org.apache.lucene.index.Fields;
@@ -255,7 +256,7 @@
 
     int idx = 0;
     int numDocs = 0;
-    for (final MergeState.IndexReaderAndLiveDocs reader : mergeState.readers) {
+    for (final AtomicReader reader : mergeState.readers) {
       final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
       Lucene40TermVectorsReader matchingVectorsReader = null;
       if (matchingSegmentReader != null) {
@@ -265,7 +266,7 @@
             matchingVectorsReader = (Lucene40TermVectorsReader) vectorsReader;
         }
       }
-      if (reader.liveDocs != null) {
+      if (reader.getLiveDocs() != null) {
         numDocs += copyVectorsWithDeletions(mergeState, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
       } else {
         numDocs += copyVectorsNoDeletions(mergeState, matchingVectorsReader, reader, rawDocLengths, rawDocLengths2);
@@ -281,12 +282,12 @@
 
   private int copyVectorsWithDeletions(MergeState mergeState,
                                         final Lucene40TermVectorsReader matchingVectorsReader,
-                                        final MergeState.IndexReaderAndLiveDocs reader,
+                                        final AtomicReader reader,
                                         int rawDocLengths[],
                                         int rawDocLengths2[])
           throws IOException, MergeAbortedException {
-    final int maxDoc = reader.reader.maxDoc();
-    final Bits liveDocs = reader.liveDocs;
+    final int maxDoc = reader.maxDoc();
+    final Bits liveDocs = reader.getLiveDocs();
     int totalNumDocs = 0;
     if (matchingVectorsReader != null) {
       // We can bulk-copy because the fieldInfos are "congruent"
@@ -323,7 +324,7 @@
         
         // NOTE: it's very important to first assign to vectors then pass it to
         // termVectorsWriter.addAllDocVectors; see LUCENE-1282
-        Fields vectors = reader.reader.getTermVectors(docNum);
+        Fields vectors = reader.getTermVectors(docNum);
         addAllDocVectors(vectors, mergeState.fieldInfos);
         totalNumDocs++;
         mergeState.checkAbort.work(300);
@@ -334,11 +335,11 @@
   
   private int copyVectorsNoDeletions(MergeState mergeState,
                                       final Lucene40TermVectorsReader matchingVectorsReader,
-                                      final MergeState.IndexReaderAndLiveDocs reader,
+                                      final AtomicReader reader,
                                       int rawDocLengths[],
                                       int rawDocLengths2[])
           throws IOException, MergeAbortedException {
-    final int maxDoc = reader.reader.maxDoc();
+    final int maxDoc = reader.maxDoc();
     if (matchingVectorsReader != null) {
       // We can bulk-copy because the fieldInfos are "congruent"
       int docCount = 0;
@@ -353,7 +354,7 @@
       for (int docNum = 0; docNum < maxDoc; docNum++) {
         // NOTE: it's very important to first assign to vectors then pass it to
         // termVectorsWriter.addAllDocVectors; see LUCENE-1282
-        Fields vectors = reader.reader.getTermVectors(docNum);
+        Fields vectors = reader.getTermVectors(docNum);
         addAllDocVectors(vectors, mergeState.fieldInfos);
         mergeState.checkAbort.work(300);
       }
Index: lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java	(révision 1354577)
+++ lucene/core/src/java/org/apache/lucene/codecs/DocValuesConsumer.java	(copie de travail)
@@ -29,6 +29,7 @@
 import org.apache.lucene.document.ShortDocValuesField;
 import org.apache.lucene.document.SortedBytesDocValuesField;
 import org.apache.lucene.document.StraightBytesDocValuesField;
+import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.DocValues.Source;
 import org.apache.lucene.index.DocValues.Type;
 import org.apache.lucene.index.DocValues;
@@ -106,12 +107,12 @@
     assert mergeState != null;
     boolean hasMerged = false;
     for(int readerIDX=0;readerIDX<mergeState.readers.size();readerIDX++) {
-      final org.apache.lucene.index.MergeState.IndexReaderAndLiveDocs reader = mergeState.readers.get(readerIDX);
+      final AtomicReader reader = mergeState.readers.get(readerIDX);
       if (docValues[readerIDX] != null) {
         hasMerged = true;
         merge(docValues[readerIDX], mergeState.docBase[readerIDX],
-              reader.reader.maxDoc(), reader.liveDocs);
-        mergeState.checkAbort.work(reader.reader.maxDoc());
+              reader.maxDoc(), reader.getLiveDocs());
+        mergeState.checkAbort.work(reader.maxDoc());
       }
     }
     // only finish if no exception is thrown!
Index: lucene/core/src/java/org/apache/lucene/index/ReadersAndLiveDocs.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/ReadersAndLiveDocs.java	(révision 1354577)
+++ lucene/core/src/java/org/apache/lucene/index/ReadersAndLiveDocs.java	(copie de travail)
@@ -130,34 +130,51 @@
     return reader;
   }
 
-  // Get reader for merging (does not load the terms
-  // index):
+  /** Get reader for merging (does not load the terms index). */
   public synchronized SegmentReader getMergeReader(IOContext context) throws IOException {
-    //System.out.println("  livedocs=" + rld.liveDocs);
+    assert Thread.holdsLock(writer);
+    shared = true; // future attempts to retrieve live docs will use copy-on-write
 
+    final boolean hasDeletions = info.hasDeletions() || pendingDeleteCount > 0;
+    // the actual number of docs
+    final int numDocs = info.info.getDocCount() - info.getDelCount() - pendingDeleteCount;
+
     if (mergeReader == null) {
 
       if (reader != null) {
         // Just use the already opened non-merge reader
         // for merging.  In the NRT case this saves us
         // pointless double-open:
-        //System.out.println("PROMOTE non-merge reader seg=" + rld.info);
-        // Ref for us:
-        reader.incRef();
-        mergeReader = reader;
-        //System.out.println(Thread.currentThread().getName() + ": getMergeReader share seg=" + info.name);
+        if (hasDeletions) {
+          mergeReader = new SegmentReader(reader.getSegmentInfo(), reader.core, liveDocs, numDocs);
+        } else {
+          // Ref for us:
+          reader.incRef();
+          mergeReader = reader;
+        }
       } else {
-        //System.out.println(Thread.currentThread().getName() + ": getMergeReader seg=" + info.name);
-        // We steal returned ref:
-        mergeReader = new SegmentReader(info, -1, context);
-        if (liveDocs == null) {
-          liveDocs = mergeReader.getLiveDocs();
-        }
+        mergeReader = new SegmentReader(info, -1, context, liveDocs, numDocs);
+        liveDocs = mergeReader.getLiveDocs();
       }
+
+    } else if (mergeReader.numDocs() != numDocs) {
+      // take the pending deletes into account
+      assert mergeReader.getSegmentInfo() == info;
+      final SegmentReader formerMergeReader = mergeReader;
+      mergeReader = new SegmentReader(info, formerMergeReader.core, liveDocs, numDocs);
+      liveDocs = mergeReader.getLiveDocs();
+      release(formerMergeReader);
     }
 
+    assert hasDeletions == mergeReader.hasDeletions();
+    assert hasDeletions == (mergeReader.numDeletedDocs() > 0);
+    assert hasDeletions == (mergeReader.getLiveDocs() != null);
+    assert numDocs == mergeReader.numDocs();
+    assert verifyDocCounts();
+
     // Ref for caller
     mergeReader.incRef();
+    
     return mergeReader;
   }
 
@@ -294,4 +311,5 @@
   public String toString() {
     return "ReadersAndLiveDocs(seg=" + info + " pendingDeleteCount=" + pendingDeleteCount + " shared=" + shared + ")";
   }
+
 }
Index: lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/MergePolicy.java	(révision 1354577)
+++ lucene/core/src/java/org/apache/lucene/index/MergePolicy.java	(copie de travail)
@@ -74,7 +74,6 @@
     int maxNumSegments = -1;        // used by IndexWriter
     public long estimatedMergeBytes;       // used by IndexWriter
     List<SegmentReader> readers;        // used by IndexWriter
-    List<Bits> readerLiveDocs;      // used by IndexWriter
     public final List<SegmentInfoPerCommit> segments;
     public final int totalDocCount;
     boolean aborted;
Index: lucene/core/src/java/org/apache/lucene/index/SegmentReader.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/SegmentReader.java	(révision 1354577)
+++ lucene/core/src/java/org/apache/lucene/index/SegmentReader.java	(copie de travail)
@@ -51,18 +51,29 @@
    * @throws IOException if there is a low-level IO error
    */
   public SegmentReader(SegmentInfoPerCommit si, int termInfosIndexDivisor, IOContext context) throws IOException {
+    this(si, termInfosIndexDivisor, context, null, -1);
+  }
+
+  // Used by ReadersAndLiveDocs to have segment readers with configurable live docs
+  SegmentReader(SegmentInfoPerCommit si, int termInfosIndexDivisor, IOContext context, Bits liveDocs, int numDocs) throws IOException {
     this.si = si;
     core = new SegmentCoreReaders(this, si.info.dir, si, context, termInfosIndexDivisor);
     boolean success = false;
     try {
-      if (si.hasDeletions()) {
+      if (liveDocs != null) {
+        this.liveDocs = liveDocs;
+      } else if (si.hasDeletions()) {
         // NOTE: the bitvector is stored using the regular directory, not cfs
-        liveDocs = si.info.getCodec().liveDocsFormat().readLiveDocs(directory(), si, new IOContext(IOContext.READ, true));
+        this.liveDocs = si.info.getCodec().liveDocsFormat().readLiveDocs(directory(), si, new IOContext(IOContext.READ, true));
       } else {
         assert si.getDelCount() == 0;
-        liveDocs = null;
+        this.liveDocs = null;
       }
-      numDocs = si.info.getDocCount() - si.getDelCount();
+      if (numDocs >= 0) {
+        this.numDocs = numDocs;
+      } else {
+        this.numDocs = si.info.getDocCount() - si.getDelCount();
+      }
       success = true;
     } finally {
       // With lock-less commits, it's entirely possible (and
Index: lucene/core/src/java/org/apache/lucene/index/MergeState.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/MergeState.java	(révision 1354577)
+++ lucene/core/src/java/org/apache/lucene/index/MergeState.java	(copie de travail)
@@ -31,18 +31,6 @@
  * @lucene.experimental */
 public class MergeState {
 
-  public static class IndexReaderAndLiveDocs {
-    public final AtomicReader reader;
-    public final Bits liveDocs;
-    public final int numDeletedDocs;
-
-    public IndexReaderAndLiveDocs(AtomicReader reader, Bits liveDocs, int numDeletedDocs) {
-      this.reader = reader;
-      this.liveDocs = liveDocs;
-      this.numDeletedDocs = numDeletedDocs;
-    }
-  }
-
   public static abstract class DocMap {
     private final Bits liveDocs;
 
@@ -50,17 +38,16 @@
       this.liveDocs = liveDocs;
     }
 
-    public static DocMap build(IndexReaderAndLiveDocs reader) {
-      final int maxDoc = reader.reader.maxDoc();
-      final int numDeletes = reader.numDeletedDocs;
+    public static DocMap build(AtomicReader reader) {
+      final int maxDoc = reader.maxDoc();
+      final int numDeletes = reader.numDeletedDocs();
       final int numDocs = maxDoc - numDeletes;
-      assert reader.liveDocs != null || numDeletes == 0;
       if (numDeletes == 0) {
         return new NoDelDocMap(maxDoc);
       } else if (numDeletes < numDocs) {
-        return buildDelCountDocmap(maxDoc, numDeletes, reader.liveDocs, PackedInts.COMPACT);
+        return buildDelCountDocmap(maxDoc, numDeletes, reader.getLiveDocs(), PackedInts.COMPACT);
       } else {
-        return buildDirectDocMap(maxDoc, numDocs, reader.liveDocs, PackedInts.COMPACT);
+        return buildDirectDocMap(maxDoc, numDocs, reader.getLiveDocs(), PackedInts.COMPACT);
       }
     }
 
@@ -197,7 +184,7 @@
 
   public SegmentInfo segmentInfo;
   public FieldInfos fieldInfos;
-  public List<IndexReaderAndLiveDocs> readers;        // Readers & liveDocs being merged
+  public List<AtomicReader> readers;                  // Readers being merged
   public DocMap[] docMaps;                            // Maps docIDs around deletions
   public int[] docBase;                               // New docID base per reader
   public CheckAbort checkAbort;
Index: lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java	(révision 1354577)
+++ lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java	(copie de travail)
@@ -31,7 +31,6 @@
 import org.apache.lucene.codecs.TermVectorsWriter;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.IOContext;
-import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.InfoStream;
 
@@ -60,7 +59,7 @@
                 FieldInfos.FieldNumbers fieldNumbers, IOContext context) {
     mergeState.segmentInfo = segmentInfo;
     mergeState.infoStream = infoStream;
-    mergeState.readers = new ArrayList<MergeState.IndexReaderAndLiveDocs>();
+    mergeState.readers = new ArrayList<AtomicReader>();
     mergeState.checkAbort = checkAbort;
     mergeState.payloadProcessorProvider = payloadProcessorProvider;
     directory = dir;
@@ -77,12 +76,12 @@
   final void add(IndexReader reader) {
     for (final AtomicReaderContext ctx : reader.getTopReaderContext().leaves()) {
       final AtomicReader r = ctx.reader();
-      mergeState.readers.add(new MergeState.IndexReaderAndLiveDocs(r, r.getLiveDocs(), r.numDeletedDocs()));
+      mergeState.readers.add(r);
     }
   }
 
-  final void add(SegmentReader reader, Bits liveDocs, int delCount) {
-    mergeState.readers.add(new MergeState.IndexReaderAndLiveDocs(reader, liveDocs, delCount));
+  final void add(AtomicReader reader) {
+    mergeState.readers.add(reader);
   }
 
   /**
@@ -138,14 +137,14 @@
     // FieldInfos, then we can do a bulk copy of the
     // stored fields:
     for (int i = 0; i < numReaders; i++) {
-      MergeState.IndexReaderAndLiveDocs reader = mergeState.readers.get(i);
+      AtomicReader reader = mergeState.readers.get(i);
       // TODO: we may be able to broaden this to
       // non-SegmentReaders, since FieldInfos is now
       // required?  But... this'd also require exposing
       // bulk-copy (TVs and stored fields) API in foreign
       // readers..
-      if (reader.reader instanceof SegmentReader) {
-        SegmentReader segmentReader = (SegmentReader) reader.reader;
+      if (reader instanceof SegmentReader) {
+        SegmentReader segmentReader = (SegmentReader) reader;
         boolean same = true;
         FieldInfos segmentFieldInfos = segmentReader.getFieldInfos();
         for (FieldInfo fi : segmentFieldInfos) {
@@ -188,8 +187,7 @@
     Map<FieldInfo,TypePromoter> docValuesTypes = new HashMap<FieldInfo,TypePromoter>();
     Map<FieldInfo,TypePromoter> normValuesTypes = new HashMap<FieldInfo,TypePromoter>();
 
-    for (MergeState.IndexReaderAndLiveDocs readerAndLiveDocs : mergeState.readers) {
-      final AtomicReader reader = readerAndLiveDocs.reader;
+    for (AtomicReader reader : mergeState.readers) {
       FieldInfos readerFieldInfos = reader.getFieldInfos();
       for (FieldInfo fi : readerFieldInfos) {
         FieldInfo merged = fieldInfosBuilder.add(fi);
@@ -283,7 +281,7 @@
     int i = 0;
     while(i < mergeState.readers.size()) {
 
-      final MergeState.IndexReaderAndLiveDocs reader = mergeState.readers.get(i);
+      final AtomicReader reader = mergeState.readers.get(i);
 
       mergeState.docBase[i] = docBase;
       final MergeState.DocMap docMap = MergeState.DocMap.build(reader);
@@ -291,7 +289,7 @@
       docBase += docMap.numDocs();
 
       if (mergeState.payloadProcessorProvider != null) {
-        mergeState.readerPayloadProcessor[i] = mergeState.payloadProcessorProvider.getReaderProcessor(reader.reader);
+        mergeState.readerPayloadProcessor[i] = mergeState.payloadProcessorProvider.getReaderProcessor(reader);
       }
 
       i++;
@@ -308,9 +306,9 @@
     int docBase = 0;
 
     for(int readerIndex=0;readerIndex<mergeState.readers.size();readerIndex++) {
-      final MergeState.IndexReaderAndLiveDocs r = mergeState.readers.get(readerIndex);
-      final Fields f = r.reader.fields();
-      final int maxDoc = r.reader.maxDoc();
+      final AtomicReader reader = mergeState.readers.get(readerIndex);
+      final Fields f = reader.fields();
+      final int maxDoc = reader.maxDoc();
       if (f != null) {
         slices.add(new ReaderSlice(docBase, maxDoc, readerIndex));
         fields.add(f);
Index: lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/IndexWriter.java	(révision 1354577)
+++ lucene/core/src/java/org/apache/lucene/index/IndexWriter.java	(copie de travail)
@@ -2889,7 +2889,7 @@
       SegmentInfoPerCommit info = sourceSegments.get(i);
       minGen = Math.min(info.getBufferedDeletesGen(), minGen);
       final int docCount = info.info.getDocCount();
-      final Bits prevLiveDocs = merge.readerLiveDocs.get(i);
+      final Bits prevLiveDocs = merge.readers.get(i).getLiveDocs();
       final Bits currentLiveDocs;
       final ReadersAndLiveDocs rld = readerPool.get(info, false);
       // We hold a ref so it should still be in the pool:
@@ -3439,7 +3439,6 @@
     }
 
     merge.readers = new ArrayList<SegmentReader>();
-    merge.readerLiveDocs = new ArrayList<Bits>();
 
     // This is try/finally to make sure merger's readers are
     // closed:
@@ -3453,22 +3452,14 @@
         // Hold onto the "live" reader; we will use this to
         // commit merged deletes
         final ReadersAndLiveDocs rld = readerPool.get(info, true);
-        final SegmentReader reader = rld.getMergeReader(context);
-        assert reader != null;
-
-        // Carefully pull the most recent live docs:
-        final Bits liveDocs;
-        final int delCount;
-
+        final SegmentReader reader;
         synchronized(this) {
           // Must sync to ensure BufferedDeletesStream
           // cannot change liveDocs/pendingDeleteCount while
           // we pull a copy:
-          liveDocs = rld.getReadOnlyLiveDocs();
-          delCount = rld.getPendingDeleteCount() + info.getDelCount();
+          reader = rld.getMergeReader(context);
+          assert reader != null;
 
-          assert rld.verifyDocCounts();
-
           if (infoStream.isEnabled("IW")) {
             if (rld.getPendingDeleteCount() != 0) {
               infoStream.message("IW", "seg=" + segString(info) + " delCount=" + info.getDelCount() + " pendingDelCount=" + rld.getPendingDeleteCount());
@@ -3479,11 +3470,10 @@
             }
           }
         }
-        merge.readerLiveDocs.add(liveDocs);
         merge.readers.add(reader);
-        assert delCount <= info.info.getDocCount(): "delCount=" + delCount + " info.docCount=" + info.info.getDocCount() + " rld.pendingDeleteCount=" + rld.getPendingDeleteCount() + " info.getDelCount()=" + info.getDelCount();
-        if (delCount < info.info.getDocCount()) {
-          merger.add(reader, liveDocs, delCount);
+        assert reader.numDeletedDocs() <= info.info.getDocCount(): "delCount=" + reader.numDeletedDocs() + " info.docCount=" + info.info.getDocCount() + " rld.pendingDeleteCount=" + rld.getPendingDeleteCount() + " info.getDelCount()=" + info.getDelCount();
+        if (reader.numDeletedDocs() < info.info.getDocCount()) {
+          merger.add(reader);
         }
         segUpto++;
       }
