Index: lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java	(révision 1456616)
+++ lucene/core/src/test/org/apache/lucene/index/Test2BTerms.java	(copie de travail)
@@ -163,7 +163,8 @@
                                       .setRAMBufferSizeMB(256.0)
                                       .setMergeScheduler(new ConcurrentMergeScheduler())
                                       .setMergePolicy(newLogMergePolicy(false, 10))
-                                      .setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+                                      .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
+                                      .setSegmentMerger(new SegmentMerger()));
 
       MergePolicy mp = w.getConfig().getMergePolicy();
       if (mp instanceof LogByteSizeMergePolicy) {
Index: lucene/core/src/test/org/apache/lucene/index/Test2BNumericDocValues.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/Test2BNumericDocValues.java	(révision 1456616)
+++ lucene/core/src/test/org/apache/lucene/index/Test2BNumericDocValues.java	(copie de travail)
@@ -46,7 +46,8 @@
         .setRAMBufferSizeMB(256.0)
         .setMergeScheduler(new ConcurrentMergeScheduler())
         .setMergePolicy(newLogMergePolicy(false, 10))
-        .setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+        .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
+        .setSegmentMerger(new SegmentMerger()));
 
     Document doc = new Document();
     NumericDocValuesField dvField = new NumericDocValuesField("dv", 0);
Index: lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValues.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValues.java	(révision 1456616)
+++ lucene/core/src/test/org/apache/lucene/index/Test2BSortedDocValues.java	(copie de travail)
@@ -49,7 +49,8 @@
         .setRAMBufferSizeMB(256.0)
         .setMergeScheduler(new ConcurrentMergeScheduler())
         .setMergePolicy(newLogMergePolicy(false, 10))
-        .setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+        .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
+        .setSegmentMerger(new SegmentMerger()));
 
     Document doc = new Document();
     byte bytes[] = new byte[2];
@@ -106,7 +107,8 @@
         .setRAMBufferSizeMB(256.0)
         .setMergeScheduler(new ConcurrentMergeScheduler())
         .setMergePolicy(newLogMergePolicy(false, 10))
-        .setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+        .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
+        .setSegmentMerger(new SegmentMerger()));
 
     Document doc = new Document();
     byte bytes[] = new byte[4];
Index: lucene/core/src/test/org/apache/lucene/index/Test4GBStoredFields.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/Test4GBStoredFields.java	(révision 1456616)
+++ lucene/core/src/test/org/apache/lucene/index/Test4GBStoredFields.java	(copie de travail)
@@ -50,7 +50,8 @@
         .setRAMBufferSizeMB(256.0)
         .setMergeScheduler(new ConcurrentMergeScheduler())
         .setMergePolicy(newLogMergePolicy(false, 10))
-        .setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+        .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
+        .setSegmentMerger(new SegmentMerger()));
 
     MergePolicy mp = w.getConfig().getMergePolicy();
     if (mp instanceof LogByteSizeMergePolicy) {
Index: lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java	(révision 1456616)
+++ lucene/core/src/test/org/apache/lucene/index/Test2BPostingsBytes.java	(copie de travail)
@@ -61,7 +61,8 @@
         .setRAMBufferSizeMB(256.0)
         .setMergeScheduler(new ConcurrentMergeScheduler())
         .setMergePolicy(newLogMergePolicy(false, 10))
-        .setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+        .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
+        .setSegmentMerger(new SegmentMerger()));
 
     MergePolicy mp = w.getConfig().getMergePolicy();
     if (mp instanceof LogByteSizeMergePolicy) {
Index: lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java	(révision 1456616)
+++ lucene/core/src/test/org/apache/lucene/index/TestIndexWriterConfig.java	(copie de travail)
@@ -73,6 +73,7 @@
     assertNull(conf.getMergedSegmentWarmer());
     assertEquals(IndexWriterConfig.DEFAULT_READER_TERMS_INDEX_DIVISOR, conf.getReaderTermsIndexDivisor());
     assertEquals(TieredMergePolicy.class, conf.getMergePolicy().getClass());
+    assertEquals(SegmentMerger.class, conf.getSegmentMerger().getClass());
     assertEquals(ThreadAffinityDocumentsWriterThreadPool.class, conf.getIndexerThreadPool().getClass());
     assertEquals(FlushByRamOrCountsPolicy.class, conf.getFlushPolicy().getClass());
     assertEquals(IndexWriterConfig.DEFAULT_RAM_PER_THREAD_HARD_LIMIT_MB, conf.getRAMPerThreadHardLimitMB());
@@ -96,6 +97,7 @@
     getters.add("getIndexingChain");
     getters.add("getMergedSegmentWarmer");
     getters.add("getMergePolicy");
+    getters.add("getSegmentMerger");
     getters.add("getMaxThreadStates");
     getters.add("getReaderPooling");
     getters.add("getIndexerThreadPool");
Index: lucene/core/src/test/org/apache/lucene/index/Test2BBinaryDocValues.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/Test2BBinaryDocValues.java	(révision 1456616)
+++ lucene/core/src/test/org/apache/lucene/index/Test2BBinaryDocValues.java	(copie de travail)
@@ -49,7 +49,8 @@
         .setRAMBufferSizeMB(256.0)
         .setMergeScheduler(new ConcurrentMergeScheduler())
         .setMergePolicy(newLogMergePolicy(false, 10))
-        .setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+        .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
+        .setSegmentMerger(new SegmentMerger()));
 
     Document doc = new Document();
     byte bytes[] = new byte[4];
@@ -109,7 +110,8 @@
         .setRAMBufferSizeMB(256.0)
         .setMergeScheduler(new ConcurrentMergeScheduler())
         .setMergePolicy(newLogMergePolicy(false, 10))
-        .setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+        .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
+        .setSegmentMerger(new SegmentMerger()));
 
     Document doc = new Document();
     byte bytes[] = new byte[4];
Index: lucene/core/src/test/org/apache/lucene/index/TestDoc.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestDoc.java	(révision 1456616)
+++ lucene/core/src/test/org/apache/lucene/index/TestDoc.java	(copie de travail)
@@ -25,6 +25,7 @@
 import java.io.PrintWriter;
 import java.io.StringWriter;
 import java.io.Writer;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.LinkedList;
@@ -218,12 +219,10 @@
       TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(si1.info.dir);
       final SegmentInfo si = new SegmentInfo(si1.info.dir, Constants.LUCENE_MAIN_VERSION, merged, -1, false, codec, null, null);
 
-      SegmentMerger merger = new SegmentMerger(si, InfoStream.getDefault(), trackingDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL,
-                                               MergeState.CheckAbort.NONE, new FieldInfos.FieldNumbers(), context);
-
-      merger.add(r1);
-      merger.add(r2);
-      MergeState mergeState = merger.merge();
+      SegmentWriteState segmentWriteState = new SegmentWriteState(InfoStream.getDefault(), trackingDir, si, null, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, null, context);
+      SegmentMerger merger = new SegmentMerger();
+      
+      merger.merge(Arrays.<AtomicReader>asList(r1, r2), segmentWriteState, MergeState.CheckAbort.NONE, new FieldInfos.FieldNumbers());
       r1.close();
       r2.close();
       final SegmentInfo info = new SegmentInfo(si1.info.dir, Constants.LUCENE_MAIN_VERSION, merged,
Index: lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java	(révision 1456616)
+++ lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java	(copie de travail)
@@ -18,6 +18,7 @@
  */
 
 import java.io.IOException;
+import java.util.Arrays;
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.Document;
@@ -29,7 +30,6 @@
 import org.apache.lucene.util.InfoStream;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util._TestUtil;
-import org.apache.lucene.util.packed.PackedInts;
 
 public class TestSegmentMerger extends LuceneTestCase {
   //The variables for the new merged segment
@@ -80,11 +80,10 @@
     final Codec codec = Codec.getDefault();
     final SegmentInfo si = new SegmentInfo(mergedDir, Constants.LUCENE_MAIN_VERSION, mergedSegment, -1, false, codec, null, null);
 
-    SegmentMerger merger = new SegmentMerger(si, InfoStream.getDefault(), mergedDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL,
-                                             MergeState.CheckAbort.NONE, new FieldInfos.FieldNumbers(), newIOContext(random()));
-    merger.add(reader1);
-    merger.add(reader2);
-    MergeState mergeState = merger.merge();
+    SegmentWriteState segmentWriteState = new SegmentWriteState(InfoStream.getDefault(), mergedDir, si, null, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, null, newIOContext(random()));
+    SegmentMerger merger = new SegmentMerger();
+    
+    final MergeState mergeState = merger.merge(Arrays.<AtomicReader>asList(reader1, reader2), segmentWriteState, MergeState.CheckAbort.NONE, new FieldInfos.FieldNumbers());
     int docsMerged = mergeState.segmentInfo.getDocCount();
     assertTrue(docsMerged == 2);
     //Should be able to open a new SegmentReader against the new directory
@@ -141,18 +140,6 @@
     mergedReader.close();
   }
 
-  private static boolean equals(MergeState.DocMap map1, MergeState.DocMap map2) {
-    if (map1.maxDoc() != map2.maxDoc()) {
-      return false;
-    }
-    for (int i = 0; i < map1.maxDoc(); ++i) {
-      if (map1.get(i) != map2.get(i)) {
-        return false;
-      }
-    }
-    return true;
-  }
-
   public void testBuildDocMap() {
     final int maxDoc = _TestUtil.nextInt(random(), 1, 128);
     final int numDocs = _TestUtil.nextInt(random(), 0, maxDoc);
Index: lucene/core/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java	(révision 1456616)
+++ lucene/core/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java	(copie de travail)
@@ -228,9 +228,14 @@
         writer.close();
       }
 
-      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+      IndexWriterConfig iwConf = newIndexWriterConfig(
           TEST_VERSION_CURRENT, new MockAnalyzer(random())).setMergePolicy(
-          new LogByteSizeMergePolicy()).setInfoStream(new FailOnNonBulkMergesInfoStream()));
+          new LogByteSizeMergePolicy());
+      if (SegmentMerger.class.equals(iwConf.getSegmentMerger())) {
+        // regular segment merger -> bulk merge
+        iwConf.setInfoStream(new FailOnNonBulkMergesInfoStream());
+      }
+      IndexWriter writer = new IndexWriter(dir, iwConf);
       writer.forceMerge(1);
       writer.close();
 
Index: lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java	(révision 1456616)
+++ lucene/core/src/test/org/apache/lucene/index/Test2BPostings.java	(copie de travail)
@@ -54,7 +54,8 @@
         .setRAMBufferSizeMB(256.0)
         .setMergeScheduler(new ConcurrentMergeScheduler())
         .setMergePolicy(newLogMergePolicy(false, 10))
-        .setOpenMode(IndexWriterConfig.OpenMode.CREATE);
+        .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
+        .setSegmentMerger(new SegmentMerger());
     
     IndexWriter w = new IndexWriter(dir, iwc);
 
Index: lucene/core/src/test/org/apache/lucene/index/Test2BPositions.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/Test2BPositions.java	(révision 1456616)
+++ lucene/core/src/test/org/apache/lucene/index/Test2BPositions.java	(copie de travail)
@@ -57,7 +57,8 @@
         .setRAMBufferSizeMB(256.0)
         .setMergeScheduler(new ConcurrentMergeScheduler())
         .setMergePolicy(newLogMergePolicy(false, 10))
-        .setOpenMode(IndexWriterConfig.OpenMode.CREATE));
+        .setOpenMode(IndexWriterConfig.OpenMode.CREATE)
+        .setSegmentMerger(new SegmentMerger()));
 
     MergePolicy mp = w.getConfig().getMergePolicy();
     if (mp instanceof LogByteSizeMergePolicy) {
Index: lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java	(révision 1456616)
+++ lucene/core/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java	(copie de travail)
@@ -351,6 +351,7 @@
     IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
     iwc.setMaxBufferedDocs(5);
     iwc.setMergeScheduler(new TrackingCMS());
+    iwc.setSegmentMerger(new SegmentMerger());
     if (_TestUtil.getPostingsFormat("id").equals("SimpleText")) {
       // no
       iwc.setCodec(_TestUtil.alwaysPostingsFormat(new Lucene41PostingsFormat()));
Index: lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java	(révision 1456616)
+++ lucene/core/src/test/org/apache/lucene/codecs/compressing/TestCompressingStoredFieldsFormat.java	(copie de travail)
@@ -39,6 +39,7 @@
 import org.apache.lucene.index.DirectoryReader;
 import org.apache.lucene.index.IndexWriterConfig;
 import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.SegmentMerger;
 import org.apache.lucene.index.StorableField;
 import org.apache.lucene.index.StoredDocument;
 import org.apache.lucene.index.Term;
@@ -306,6 +307,7 @@
     iw.close();
     dir.close();
     dir = newFSDirectory(_TestUtil.getTempDir(getClass().getSimpleName()));
+    iwConf.setSegmentMerger(new SegmentMerger());
     iw = new RandomIndexWriter(random(), dir, iwConf);
 
     if (dir instanceof MockDirectoryWrapper) {
Index: lucene/core/src/java/org/apache/lucene/index/SegmentWriteState.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/SegmentWriteState.java	(révision 1456616)
+++ lucene/core/src/java/org/apache/lucene/index/SegmentWriteState.java	(copie de travail)
@@ -42,7 +42,7 @@
 
   /** {@link FieldInfos} describing all fields in this
    *  segment. */
-  public final FieldInfos fieldInfos;
+  public FieldInfos fieldInfos;
 
   /** Number of deleted documents set while flushing the
    *  segment. */
Index: lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java	(révision 1456616)
+++ lucene/core/src/java/org/apache/lucene/index/IndexWriterConfig.java	(copie de travail)
@@ -291,6 +291,15 @@
     return this;
   }
 
+  /** Expert: set the {@link SegmentMerger} impl to use. */
+  public IndexWriterConfig setSegmentMerger(SegmentMerger segmentMerger) {
+    if (segmentMerger == null) {
+      throw new IllegalArgumentException("segmentMerger must not be null");
+    }
+    this.segmentMerger = segmentMerger;
+    return this;
+  }
+
   /**
    * Set the {@link Codec}.
    * 
@@ -316,6 +325,11 @@
     return mergePolicy;
   }
 
+  @Override
+  public SegmentMerger getSegmentMerger() {
+    return segmentMerger;
+  }
+
   /** Expert: Sets the {@link DocumentsWriterPerThreadPool} instance used by the
    * IndexWriter to assign thread-states to incoming indexing threads. If no
    * {@link DocumentsWriterPerThreadPool} is set {@link IndexWriter} will use
Index: lucene/core/src/java/org/apache/lucene/index/FieldInfos.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/FieldInfos.java	(révision 1456616)
+++ lucene/core/src/java/org/apache/lucene/index/FieldInfos.java	(copie de travail)
@@ -158,8 +158,10 @@
   public FieldInfo fieldInfo(int fieldNumber) {
     return (fieldNumber >= 0) ? byNumber.get(fieldNumber) : null;
   }
-  
-  static final class FieldNumbers {
+
+  /** A map of field names/numbers.
+   *  @lucene.internal */
+  public static final class FieldNumbers {
     
     private final Map<Integer,String> numberToName;
     private final Map<String,Integer> nameToNumber;
Index: lucene/core/src/java/org/apache/lucene/index/LiveIndexWriterConfig.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/LiveIndexWriterConfig.java	(révision 1456616)
+++ lucene/core/src/java/org/apache/lucene/index/LiveIndexWriterConfig.java	(copie de travail)
@@ -80,6 +80,9 @@
   /** {@link MergePolicy} for selecting merges. */
   protected volatile MergePolicy mergePolicy;
 
+  /** {@link SegmentMerger} for running merges. */
+  protected volatile SegmentMerger segmentMerger;
+
   /** {@code DocumentsWriterPerThreadPool} to control how
    *  threads are allocated to {@code DocumentsWriterPerThread}. */
   protected volatile DocumentsWriterPerThreadPool indexerThreadPool;
@@ -121,6 +124,7 @@
     }
     infoStream = InfoStream.getDefault();
     mergePolicy = new TieredMergePolicy();
+    segmentMerger = new SegmentMerger();
     flushPolicy = new FlushByRamOrCountsPolicy();
     readerPooling = IndexWriterConfig.DEFAULT_READER_POOLING;
     indexerThreadPool = new ThreadAffinityDocumentsWriterThreadPool(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES);
@@ -150,6 +154,7 @@
     codec = config.getCodec();
     infoStream = config.getInfoStream();
     mergePolicy = config.getMergePolicy();
+    segmentMerger = config.getSegmentMerger();
     indexerThreadPool = config.getIndexerThreadPool();
     readerPooling = config.getReaderPooling();
     flushPolicy = config.getFlushPolicy();
@@ -478,8 +483,16 @@
   public MergePolicy getMergePolicy() {
     return mergePolicy;
   }
-  
+
   /**
+   * Returns the current {@link SegmentMerger} in use by this writer.
+   * @see IndexWriterConfig#setSegmentMerger(SegmentMerger)
+   */
+  public SegmentMerger getSegmentMerger() {
+    return segmentMerger;
+  }
+
+  /**
    * Returns the configured {@link DocumentsWriterPerThreadPool} instance.
    * 
    * @see IndexWriterConfig#setIndexerThreadPool(DocumentsWriterPerThreadPool)
Index: lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/IndexWriter.java	(révision 1456616)
+++ lucene/core/src/java/org/apache/lucene/index/IndexWriter.java	(copie de travail)
@@ -234,6 +234,7 @@
   // merges
   private HashSet<SegmentInfoPerCommit> mergingSegments = new HashSet<SegmentInfoPerCommit>();
 
+  private SegmentMerger segmentMerger;
   private MergePolicy mergePolicy;
   private final MergeScheduler mergeScheduler;
   private LinkedList<MergePolicy.OneMerge> pendingMerges = new LinkedList<MergePolicy.OneMerge>();
@@ -623,6 +624,7 @@
     infoStream = config.getInfoStream();
     mergePolicy = config.getMergePolicy();
     mergePolicy.setIndexWriter(this);
+    segmentMerger = config.getSegmentMerger();
     mergeScheduler = config.getMergeScheduler();
     codec = config.getCodec();
 
@@ -2407,17 +2409,18 @@
       SegmentInfo info = new SegmentInfo(directory, Constants.LUCENE_MAIN_VERSION, mergedName, -1,
                                          false, codec, null, null);
 
-      SegmentMerger merger = new SegmentMerger(info, infoStream, trackingDir, config.getTermIndexInterval(),
-                                               MergeState.CheckAbort.NONE, globalFieldNumberMap, context);
-
+      final List<AtomicReader> readersToMerge = new ArrayList<AtomicReader>();
       for (IndexReader reader : readers) {    // add new indexes
-        merger.add(reader);
+        for (AtomicReaderContext ctx : reader.leaves()) {
+          readersToMerge.add(ctx.reader());
+        }
       }
 
       MergeState mergeState;
       boolean success = false;
       try {
-        mergeState = merger.merge();                // merge 'em
+        final SegmentWriteState segmentWriteState = new SegmentWriteState(infoStream, trackingDir, info, null, config.getTermIndexInterval(), null, context);
+        mergeState = segmentMerger.merge(readersToMerge, segmentWriteState, MergeState.CheckAbort.NONE, globalFieldNumberMap); // merge 'em
         success = true;
       } finally {
         if (!success) { 
@@ -3561,13 +3564,11 @@
     final MergeState.CheckAbort checkAbort = new MergeState.CheckAbort(merge, directory);
     final TrackingDirectoryWrapper dirWrapper = new TrackingDirectoryWrapper(directory);
 
-    SegmentMerger merger = new SegmentMerger(merge.info.info, infoStream, dirWrapper, config.getTermIndexInterval(), checkAbort,
-                                             globalFieldNumberMap, context);
-
     if (infoStream.isEnabled("IW")) {
       infoStream.message("IW", "merging " + segString(merge.segments));
     }
 
+    final List<AtomicReader> readersToMerge = new ArrayList<AtomicReader>();
     merge.readers = new ArrayList<SegmentReader>();
 
     // This is try/finally to make sure merger's readers are
@@ -3634,7 +3635,7 @@
         merge.readers.add(reader);
         assert delCount <= info.info.getDocCount(): "delCount=" + delCount + " info.docCount=" + info.info.getDocCount() + " rld.pendingDeleteCount=" + rld.getPendingDeleteCount() + " info.getDelCount()=" + info.getDelCount();
         if (delCount < info.info.getDocCount()) {
-          merger.add(reader);
+          readersToMerge.add(reader);
         }
         segUpto++;
       }
@@ -3645,7 +3646,8 @@
       MergeState mergeState;
       boolean success3 = false;
       try {
-        mergeState = merger.merge();
+        final SegmentWriteState segmentWriteState = new SegmentWriteState(infoStream, dirWrapper, merge.info.info, null, config.getTermIndexInterval(), null, context);
+        mergeState = segmentMerger.merge(readersToMerge, segmentWriteState, checkAbort, globalFieldNumberMap);
         success3 = true;
       } finally {
         if (!success3) {
Index: lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java	(révision 1456616)
+++ lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java	(copie de travail)
@@ -21,99 +21,69 @@
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.lucene.codecs.Codec;
+import org.apache.lucene.codecs.DocValuesConsumer;
 import org.apache.lucene.codecs.FieldInfosWriter;
 import org.apache.lucene.codecs.FieldsConsumer;
-import org.apache.lucene.codecs.DocValuesConsumer;
 import org.apache.lucene.codecs.StoredFieldsWriter;
 import org.apache.lucene.codecs.TermVectorsWriter;
 import org.apache.lucene.index.FieldInfo.DocValuesType;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.IOContext;
 import org.apache.lucene.util.IOUtils;
-import org.apache.lucene.util.InfoStream;
 
 /**
- * The SegmentMerger class combines two or more Segments, represented by an IndexReader ({@link #add},
- * into a single Segment.  After adding the appropriate readers, call the merge method to combine the
- * segments.
+ * The SegmentMerger class combines two or more Segments, represented by a list
+ * of {@link AtomicReader}s.
  *
  * @see #merge
- * @see #add
+ * @lucene.experimental
  */
-final class SegmentMerger {
-  private final Directory directory;
-  private final int termIndexInterval;
+public class SegmentMerger {
 
-  private final Codec codec;
-  
-  private final IOContext context;
-  
-  private final MergeState mergeState = new MergeState();
-  private final FieldInfos.Builder fieldInfosBuilder;
+  /** Sole constructor. */
+  public SegmentMerger() {}
 
-  // note, just like in codec apis Directory 'dir' is NOT the same as segmentInfo.dir!!
-  SegmentMerger(SegmentInfo segmentInfo, InfoStream infoStream, Directory dir, int termIndexInterval,
-                MergeState.CheckAbort checkAbort, FieldInfos.FieldNumbers fieldNumbers, IOContext context) {
-    mergeState.segmentInfo = segmentInfo;
-    mergeState.infoStream = infoStream;
-    mergeState.readers = new ArrayList<AtomicReader>();
-    mergeState.checkAbort = checkAbort;
-    directory = dir;
-    this.termIndexInterval = termIndexInterval;
-    this.codec = segmentInfo.getCodec();
-    this.context = context;
-    this.fieldInfosBuilder = new FieldInfos.Builder(fieldNumbers);
-  }
-
   /**
-   * Add an IndexReader to the collection of readers that are to be merged
-   */
-  void add(IndexReader reader) {
-    for (final AtomicReaderContext ctx : reader.leaves()) {
-      final AtomicReader r = ctx.reader();
-      mergeState.readers.add(r);
-    }
-  }
-
-  void add(SegmentReader reader) {
-    mergeState.readers.add(reader);
-  }
-
-  /**
-   * Merges the readers specified by the {@link #add} method into the directory passed to the constructor
+   * Merges <code>readers</code> into the directory wrapped by <code>segmentWriteState</code>.
    * @return The number of documents that were merged
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error
    */
-  MergeState merge() throws IOException {
+  public MergeState merge(List<AtomicReader> readers, SegmentWriteState segmentWriteState, MergeState.CheckAbort checkAbort, FieldInfos.FieldNumbers writerFieldNumbers) throws IOException {
     // NOTE: it's important to add calls to
     // checkAbort.work(...) if you make any changes to this
     // method that will spend alot of time.  The frequency
     // of this check impacts how long
     // IndexWriter.close(false) takes to actually stop the
     // threads.
-    
-    mergeState.segmentInfo.setDocCount(setDocMaps());
-    mergeFieldInfos();
-    setMatchingSegmentReaders();
+
+    // note, just like in codec apis Directory 'directory' in
+    // SegmentWriteState is NOT the same as segmentInfo.dir!!
+
+    segmentWriteState.fieldInfos = mergeFieldInfos(readers, writerFieldNumbers);
+
+    MergeState mergeState = new MergeState();
+    mergeState.segmentInfo = segmentWriteState.segmentInfo;
+    mergeState.fieldInfos = segmentWriteState.fieldInfos;
+    mergeState.infoStream = segmentWriteState.infoStream;
+    mergeState.readers = readers;
+    mergeState.checkAbort = checkAbort;
+
+    segmentWriteState.segmentInfo.setDocCount(setDocMaps(mergeState));
+    setMatchingSegmentReaders(mergeState);
     long t0 = 0;
     if (mergeState.infoStream.isEnabled("SM")) {
       t0 = System.nanoTime();
     }
-    int numMerged = mergeFields();
+    int numMerged = mergeFields(segmentWriteState, mergeState);
     if (mergeState.infoStream.isEnabled("SM")) {
       long t1 = System.nanoTime();
       mergeState.infoStream.message("SM", ((t1-t0)/1000000) + " msec to merge stored fields [" + numMerged + " docs]");
     }
     assert numMerged == mergeState.segmentInfo.getDocCount();
 
-    final SegmentWriteState segmentWriteState = new SegmentWriteState(mergeState.infoStream, directory, mergeState.segmentInfo,
-                                                                      mergeState.fieldInfos, termIndexInterval, null, context);
     if (mergeState.infoStream.isEnabled("SM")) {
       t0 = System.nanoTime();
     }
-    mergeTerms(segmentWriteState);
+    mergeTerms(segmentWriteState, mergeState);
     if (mergeState.infoStream.isEnabled("SM")) {
       long t1 = System.nanoTime();
       mergeState.infoStream.message("SM", ((t1-t0)/1000000) + " msec to merge postings [" + numMerged + " docs]");
@@ -123,7 +93,7 @@
       t0 = System.nanoTime();
     }
     if (mergeState.fieldInfos.hasDocValues()) {
-      mergeDocValues(segmentWriteState);
+      mergeDocValues(segmentWriteState, mergeState);
     }
     if (mergeState.infoStream.isEnabled("SM")) {
       long t1 = System.nanoTime();
@@ -134,7 +104,7 @@
       if (mergeState.infoStream.isEnabled("SM")) {
         t0 = System.nanoTime();
       }
-      mergeNorms(segmentWriteState);
+      mergeNorms(segmentWriteState, mergeState);
       if (mergeState.infoStream.isEnabled("SM")) {
         long t1 = System.nanoTime();
         mergeState.infoStream.message("SM", ((t1-t0)/1000000) + " msec to merge norms [" + numMerged + " docs]");
@@ -145,7 +115,7 @@
       if (mergeState.infoStream.isEnabled("SM")) {
         t0 = System.nanoTime();
       }
-      numMerged = mergeVectors();
+      numMerged = mergeVectors(segmentWriteState, mergeState);
       if (mergeState.infoStream.isEnabled("SM")) {
         long t1 = System.nanoTime();
         mergeState.infoStream.message("SM", ((t1-t0)/1000000) + " msec to merge vectors [" + numMerged + " docs]");
@@ -154,14 +124,25 @@
     }
     
     // write the merged infos
-    FieldInfosWriter fieldInfosWriter = codec.fieldInfosFormat().getFieldInfosWriter();
-    fieldInfosWriter.write(directory, mergeState.segmentInfo.name, mergeState.fieldInfos, context);
+    FieldInfosWriter fieldInfosWriter = segmentWriteState.segmentInfo.getCodec().fieldInfosFormat().getFieldInfosWriter();
+    fieldInfosWriter.write(segmentWriteState.directory, mergeState.segmentInfo.name, mergeState.fieldInfos, segmentWriteState.context);
 
     return mergeState;
   }
 
-  private void mergeDocValues(SegmentWriteState segmentWriteState) throws IOException {
-    DocValuesConsumer consumer = codec.docValuesFormat().fieldsConsumer(segmentWriteState);
+  private FieldInfos mergeFieldInfos(List<AtomicReader> readers, FieldInfos.FieldNumbers fieldNumbers) {
+    final FieldInfos.Builder fieldInfosBuilder = new FieldInfos.Builder(fieldNumbers);
+    for (AtomicReader reader : readers) {
+      FieldInfos readerFieldInfos = reader.getFieldInfos();
+      for (FieldInfo fi : readerFieldInfos) {
+        fieldInfosBuilder.add(fi);
+      }
+    }
+    return fieldInfosBuilder.finish();
+  }
+
+  private void mergeDocValues(SegmentWriteState segmentWriteState, MergeState mergeState) throws IOException {
+    DocValuesConsumer consumer = segmentWriteState.segmentInfo.getCodec().docValuesFormat().fieldsConsumer(segmentWriteState);
     boolean success = false;
     try {
       for (FieldInfo field : mergeState.fieldInfos) {
@@ -222,8 +203,8 @@
     }
   }
 
-  private void mergeNorms(SegmentWriteState segmentWriteState) throws IOException {
-    DocValuesConsumer consumer = codec.normsFormat().normsConsumer(segmentWriteState);
+  private void mergeNorms(SegmentWriteState segmentWriteState, MergeState mergeState) throws IOException {
+    DocValuesConsumer consumer = segmentWriteState.segmentInfo.getCodec().normsFormat().normsConsumer(segmentWriteState);
     boolean success = false;
     try {
       for (FieldInfo field : mergeState.fieldInfos) {
@@ -249,7 +230,7 @@
     }
   }
 
-  private void setMatchingSegmentReaders() {
+  private void setMatchingSegmentReaders(MergeState mergeState) {
     // If the i'th reader is a SegmentReader and has
     // identical fieldName -> number mapping, then this
     // array will be non-null at position i:
@@ -292,16 +273,6 @@
       }
     }
   }
-  
-  public void mergeFieldInfos() throws IOException {
-    for (AtomicReader reader : mergeState.readers) {
-      FieldInfos readerFieldInfos = reader.getFieldInfos();
-      for (FieldInfo fi : readerFieldInfos) {
-        fieldInfosBuilder.add(fi);
-      }
-    }
-    mergeState.fieldInfos = fieldInfosBuilder.finish();
-  }
 
   /**
    *
@@ -309,8 +280,8 @@
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error
    */
-  private int mergeFields() throws IOException {
-    final StoredFieldsWriter fieldsWriter = codec.storedFieldsFormat().fieldsWriter(directory, mergeState.segmentInfo, context);
+  private int mergeFields(SegmentWriteState segmentWriteState, MergeState mergeState) throws IOException {
+    final StoredFieldsWriter fieldsWriter = segmentWriteState.segmentInfo.getCodec().storedFieldsFormat().fieldsWriter(segmentWriteState.directory, mergeState.segmentInfo, segmentWriteState.context);
     
     try {
       return fieldsWriter.merge(mergeState);
@@ -323,8 +294,8 @@
    * Merge the TermVectors from each of the segments into the new one.
    * @throws IOException if there is a low-level IO error
    */
-  private int mergeVectors() throws IOException {
-    final TermVectorsWriter termVectorsWriter = codec.termVectorsFormat().vectorsWriter(directory, mergeState.segmentInfo, context);
+  private int mergeVectors(SegmentWriteState segmentWriteState, MergeState mergeState) throws IOException {
+    final TermVectorsWriter termVectorsWriter = segmentWriteState.segmentInfo.getCodec().termVectorsFormat().vectorsWriter(segmentWriteState.directory, mergeState.segmentInfo, segmentWriteState.context);
     
     try {
       return termVectorsWriter.merge(mergeState);
@@ -334,7 +305,7 @@
   }
 
   // NOTE: removes any "all deleted" readers from mergeState.readers
-  private int setDocMaps() throws IOException {
+  private int setDocMaps(MergeState mergeState) throws IOException {
     final int numReaders = mergeState.readers.size();
 
     // Remap docIDs
@@ -359,7 +330,7 @@
     return docBase;
   }
 
-  private void mergeTerms(SegmentWriteState segmentWriteState) throws IOException {
+  private void mergeTerms(SegmentWriteState segmentWriteState, MergeState mergeState) throws IOException {
     
     final List<Fields> fields = new ArrayList<Fields>();
     final List<ReaderSlice> slices = new ArrayList<ReaderSlice>();
@@ -377,7 +348,7 @@
       docBase += maxDoc;
     }
 
-    final FieldsConsumer consumer = codec.postingsFormat().fieldsConsumer(segmentWriteState);
+    final FieldsConsumer consumer = segmentWriteState.segmentInfo.getCodec().postingsFormat().fieldsConsumer(segmentWriteState);
     boolean success = false;
     try {
       consumer.merge(mergeState,
Index: lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java
===================================================================
--- lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java	(révision 1456616)
+++ lucene/test-framework/src/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java	(copie de travail)
@@ -435,7 +435,8 @@
     dir = newMockFSDirectory(tempDir); // some subclasses rely on this being MDW
     dir.setCheckIndexOnClose(false); // don't double-checkIndex, we do it ourselves.
     final IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, 
-        new MockAnalyzer(random())).setInfoStream(new FailOnNonBulkMergesInfoStream());
+        new MockAnalyzer(random())).setInfoStream(new FailOnNonBulkMergesInfoStream())
+        .setSegmentMerger(new SegmentMerger());
 
     if (LuceneTestCase.TEST_NIGHTLY) {
       // newIWConfig makes smallish max seg size, which
Index: lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
===================================================================
--- lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java	(révision 1456616)
+++ lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java	(copie de travail)
@@ -34,6 +34,7 @@
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.*;
 import org.apache.lucene.index.IndexReader.ReaderClosedListener;
+import org.apache.lucene.index.MergeState.CheckAbort;
 import org.apache.lucene.search.*;
 import org.apache.lucene.search.FieldCache.CacheEntry;
 import org.apache.lucene.search.QueryUtils.FCInvisibleMultiReader;
@@ -769,6 +770,20 @@
     } else {
       c.setMergePolicy(newLogMergePolicy());
     }
+
+    if (rarely(r)) {
+      c.setSegmentMerger(new SegmentMerger() {
+        @Override
+        public MergeState merge(List<AtomicReader> readers,
+            SegmentWriteState segmentWriteState, CheckAbort checkAbort, FieldInfos.FieldNumbers writerFieldNumbers)
+            throws IOException {
+          final IndexReader reader = new MultiReader(readers.toArray(new IndexReader[readers.size()]));
+          final AtomicReader atomicReader = SlowCompositeReaderWrapper.wrap(reader);
+          return super.merge(Collections.singletonList(atomicReader), segmentWriteState, checkAbort, writerFieldNumbers);
+        }
+      });
+    }
+
     c.setReaderPooling(r.nextBoolean());
     c.setReaderTermsIndexDivisor(_TestUtil.nextInt(r, 1, 4));
     return c;
