Index: lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java
===================================================================
--- lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java	(révision 1457512)
+++ lucene/test-framework/src/java/org/apache/lucene/util/LuceneTestCase.java	(copie de travail)
@@ -34,6 +34,7 @@
 import org.apache.lucene.document.TextField;
 import org.apache.lucene.index.*;
 import org.apache.lucene.index.IndexReader.ReaderClosedListener;
+import org.apache.lucene.index.TermsEnum.SeekStatus;
 import org.apache.lucene.search.*;
 import org.apache.lucene.search.FieldCache.CacheEntry;
 import org.apache.lucene.search.QueryUtils.FCInvisibleMultiReader;
@@ -41,6 +42,9 @@
 import org.apache.lucene.store.IOContext.Context;
 import org.apache.lucene.store.MockDirectoryWrapper.Throttling;
 import org.apache.lucene.util.FieldCacheSanityChecker.Insanity;
+import org.apache.lucene.util.automaton.AutomatonTestUtil;
+import org.apache.lucene.util.automaton.CompiledAutomaton;
+import org.apache.lucene.util.automaton.RegExp;
 import org.junit.*;
 import org.junit.rules.RuleChain;
 import org.junit.rules.TestRule;
@@ -1308,4 +1312,648 @@
     }
     return true;
   }
+
+  public void assertReaderEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
+    assertReaderStatisticsEquals(info, leftReader, rightReader);
+    assertFieldsEquals(info, leftReader, MultiFields.getFields(leftReader), MultiFields.getFields(rightReader), true);
+    assertNormsEquals(info, leftReader, rightReader);
+    assertStoredFieldsEquals(info, leftReader, rightReader);
+    assertTermVectorsEquals(info, leftReader, rightReader);
+    assertDocValuesEquals(info, leftReader, rightReader);
+    assertDeletedDocsEquals(info, leftReader, rightReader);
+    assertFieldInfosEquals(info, leftReader, rightReader);
+  }
+
+  /** 
+   * checks that reader-level statistics are the same 
+   */
+  public void assertReaderStatisticsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
+    // Somewhat redundant: we never delete docs
+    assertEquals(info, leftReader.maxDoc(), rightReader.maxDoc());
+    assertEquals(info, leftReader.numDocs(), rightReader.numDocs());
+    assertEquals(info, leftReader.numDeletedDocs(), rightReader.numDeletedDocs());
+    assertEquals(info, leftReader.hasDeletions(), rightReader.hasDeletions());
+  }
+
+  /** 
+   * Fields api equivalency 
+   */
+  public void assertFieldsEquals(String info, IndexReader leftReader, Fields leftFields, Fields rightFields, boolean deep) throws IOException {
+    // Fields could be null if there are no postings,
+    // but then it must be null for both
+    if (leftFields == null || rightFields == null) {
+      assertNull(info, leftFields);
+      assertNull(info, rightFields);
+      return;
+    }
+    assertFieldStatisticsEquals(info, leftFields, rightFields);
+    
+    Iterator<String> leftEnum = leftFields.iterator();
+    Iterator<String> rightEnum = rightFields.iterator();
+    
+    while (leftEnum.hasNext()) {
+      String field = leftEnum.next();
+      assertEquals(info, field, rightEnum.next());
+      assertTermsEquals(info, leftReader, leftFields.terms(field), rightFields.terms(field), deep);
+    }
+    assertFalse(rightEnum.hasNext());
+  }
+
+  /** 
+   * checks that top-level statistics on Fields are the same 
+   */
+  public void assertFieldStatisticsEquals(String info, Fields leftFields, Fields rightFields) throws IOException {
+    if (leftFields.size() != -1 && rightFields.size() != -1) {
+      assertEquals(info, leftFields.size(), rightFields.size());
+    }
+  }
+
+  /** 
+   * Terms api equivalency 
+   */
+  public void assertTermsEquals(String info, IndexReader leftReader, Terms leftTerms, Terms rightTerms, boolean deep) throws IOException {
+    if (leftTerms == null || rightTerms == null) {
+      assertNull(info, leftTerms);
+      assertNull(info, rightTerms);
+      return;
+    }
+    assertTermsStatisticsEquals(info, leftTerms, rightTerms);
+    assertEquals(leftTerms.hasOffsets(), rightTerms.hasOffsets());
+    assertEquals(leftTerms.hasPositions(), rightTerms.hasPositions());
+    assertEquals(leftTerms.hasPayloads(), rightTerms.hasPayloads());
+
+    TermsEnum leftTermsEnum = leftTerms.iterator(null);
+    TermsEnum rightTermsEnum = rightTerms.iterator(null);
+    assertTermsEnumEquals(info, leftReader, leftTermsEnum, rightTermsEnum, true);
+    
+    assertTermsSeekingEquals(info, leftTerms, rightTerms);
+    
+    if (deep) {
+      int numIntersections = atLeast(3);
+      for (int i = 0; i < numIntersections; i++) {
+        String re = AutomatonTestUtil.randomRegexp(random());
+        CompiledAutomaton automaton = new CompiledAutomaton(new RegExp(re, RegExp.NONE).toAutomaton());
+        if (automaton.type == CompiledAutomaton.AUTOMATON_TYPE.NORMAL) {
+          // TODO: test start term too
+          TermsEnum leftIntersection = leftTerms.intersect(automaton, null);
+          TermsEnum rightIntersection = rightTerms.intersect(automaton, null);
+          assertTermsEnumEquals(info, leftReader, leftIntersection, rightIntersection, rarely());
+        }
+      }
+    }
+  }
+
+  /** 
+   * checks collection-level statistics on Terms 
+   */
+  public void assertTermsStatisticsEquals(String info, Terms leftTerms, Terms rightTerms) throws IOException {
+    assert leftTerms.getComparator() == rightTerms.getComparator();
+    if (leftTerms.getDocCount() != -1 && rightTerms.getDocCount() != -1) {
+      assertEquals(info, leftTerms.getDocCount(), rightTerms.getDocCount());
+    }
+    if (leftTerms.getSumDocFreq() != -1 && rightTerms.getSumDocFreq() != -1) {
+      assertEquals(info, leftTerms.getSumDocFreq(), rightTerms.getSumDocFreq());
+    }
+    if (leftTerms.getSumTotalTermFreq() != -1 && rightTerms.getSumTotalTermFreq() != -1) {
+      assertEquals(info, leftTerms.getSumTotalTermFreq(), rightTerms.getSumTotalTermFreq());
+    }
+    if (leftTerms.size() != -1 && rightTerms.size() != -1) {
+      assertEquals(info, leftTerms.size(), rightTerms.size());
+    }
+  }
+
+  private static class RandomBits implements Bits {
+    FixedBitSet bits;
+    
+    RandomBits(int maxDoc, double pctLive, Random random) {
+      bits = new FixedBitSet(maxDoc);
+      for (int i = 0; i < maxDoc; i++) {
+        if (random.nextDouble() <= pctLive) {        
+          bits.set(i);
+        }
+      }
+    }
+    
+    @Override
+    public boolean get(int index) {
+      return bits.get(index);
+    }
+
+    @Override
+    public int length() {
+      return bits.length();
+    }
+  }
+
+  /** 
+   * checks the terms enum sequentially
+   * if deep is false, it does a 'shallow' test that doesnt go down to the docsenums
+   */
+  public void assertTermsEnumEquals(String info, IndexReader leftReader, TermsEnum leftTermsEnum, TermsEnum rightTermsEnum, boolean deep) throws IOException {
+    BytesRef term;
+    Bits randomBits = new RandomBits(leftReader.maxDoc(), random().nextDouble(), random());
+    DocsAndPositionsEnum leftPositions = null;
+    DocsAndPositionsEnum rightPositions = null;
+    DocsEnum leftDocs = null;
+    DocsEnum rightDocs = null;
+    
+    while ((term = leftTermsEnum.next()) != null) {
+      assertEquals(info, term, rightTermsEnum.next());
+      assertTermStatsEquals(info, leftTermsEnum, rightTermsEnum);
+      if (deep) {
+        assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
+                                   rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
+        assertDocsAndPositionsEnumEquals(info, leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
+                                   rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
+
+        assertPositionsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(), 
+                                leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
+                                rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
+        assertPositionsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(), 
+                                leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
+                                rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
+
+        // with freqs:
+        assertDocsEnumEquals(info, leftDocs = leftTermsEnum.docs(null, leftDocs),
+            rightDocs = rightTermsEnum.docs(null, rightDocs),
+            true);
+        assertDocsEnumEquals(info, leftDocs = leftTermsEnum.docs(randomBits, leftDocs),
+            rightDocs = rightTermsEnum.docs(randomBits, rightDocs),
+            true);
+
+        // w/o freqs:
+        assertDocsEnumEquals(info, leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
+            rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE),
+            false);
+        assertDocsEnumEquals(info, leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
+            rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE),
+            false);
+        
+        // with freqs:
+        assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(), 
+            leftDocs = leftTermsEnum.docs(null, leftDocs),
+            rightDocs = rightTermsEnum.docs(null, rightDocs),
+            true);
+        assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(), 
+            leftDocs = leftTermsEnum.docs(randomBits, leftDocs),
+            rightDocs = rightTermsEnum.docs(randomBits, rightDocs),
+            true);
+
+        // w/o freqs:
+        assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(), 
+            leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
+            rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE),
+            false);
+        assertDocsSkippingEquals(info, leftReader, leftTermsEnum.docFreq(), 
+            leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
+            rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE),
+            false);
+      }
+    }
+    assertNull(info, rightTermsEnum.next());
+  }
+
+
+  /**
+   * checks docs + freqs + positions + payloads, sequentially
+   */
+  public void assertDocsAndPositionsEnumEquals(String info, DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws IOException {
+    if (leftDocs == null || rightDocs == null) {
+      assertNull(leftDocs);
+      assertNull(rightDocs);
+      return;
+    }
+    assertTrue(info, leftDocs.docID() == -1 || leftDocs.docID() == DocIdSetIterator.NO_MORE_DOCS);
+    assertTrue(info, rightDocs.docID() == -1 || rightDocs.docID() == DocIdSetIterator.NO_MORE_DOCS);
+    int docid;
+    while ((docid = leftDocs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
+      assertEquals(info, docid, rightDocs.nextDoc());
+      int freq = leftDocs.freq();
+      assertEquals(info, freq, rightDocs.freq());
+      for (int i = 0; i < freq; i++) {
+        assertEquals(info, leftDocs.nextPosition(), rightDocs.nextPosition());
+        assertEquals(info, leftDocs.getPayload(), rightDocs.getPayload());
+        assertEquals(info, leftDocs.startOffset(), rightDocs.startOffset());
+        assertEquals(info, leftDocs.endOffset(), rightDocs.endOffset());
+      }
+    }
+    assertEquals(info, DocIdSetIterator.NO_MORE_DOCS, rightDocs.nextDoc());
+  }
+  
+  /**
+   * checks docs + freqs, sequentially
+   */
+  public void assertDocsEnumEquals(String info, DocsEnum leftDocs, DocsEnum rightDocs, boolean hasFreqs) throws IOException {
+    if (leftDocs == null) {
+      assertNull(rightDocs);
+      return;
+    }
+    assertTrue(info, leftDocs.docID() == -1 || leftDocs.docID() == DocIdSetIterator.NO_MORE_DOCS);
+    assertTrue(info, rightDocs.docID() == -1 || rightDocs.docID() == DocIdSetIterator.NO_MORE_DOCS);
+    int docid;
+    while ((docid = leftDocs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
+      assertEquals(info, docid, rightDocs.nextDoc());
+      if (hasFreqs) {
+        assertEquals(info, leftDocs.freq(), rightDocs.freq());
+      }
+    }
+    assertEquals(info, DocIdSetIterator.NO_MORE_DOCS, rightDocs.nextDoc());
+  }
+  
+  /**
+   * checks advancing docs
+   */
+  public void assertDocsSkippingEquals(String info, IndexReader leftReader, int docFreq, DocsEnum leftDocs, DocsEnum rightDocs, boolean hasFreqs) throws IOException {
+    if (leftDocs == null) {
+      assertNull(rightDocs);
+      return;
+    }
+    int docid = -1;
+    int averageGap = leftReader.maxDoc() / (1+docFreq);
+    int skipInterval = 16;
+
+    while (true) {
+      if (random().nextBoolean()) {
+        // nextDoc()
+        docid = leftDocs.nextDoc();
+        assertEquals(info, docid, rightDocs.nextDoc());
+      } else {
+        // advance()
+        int skip = docid + (int) Math.ceil(Math.abs(skipInterval + random().nextGaussian() * averageGap));
+        docid = leftDocs.advance(skip);
+        assertEquals(info, docid, rightDocs.advance(skip));
+      }
+      
+      if (docid == DocIdSetIterator.NO_MORE_DOCS) {
+        return;
+      }
+      if (hasFreqs) {
+        assertEquals(info, leftDocs.freq(), rightDocs.freq());
+      }
+    }
+  }
+  
+  /**
+   * checks advancing docs + positions
+   */
+  public void assertPositionsSkippingEquals(String info, IndexReader leftReader, int docFreq, DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws IOException {
+    if (leftDocs == null || rightDocs == null) {
+      assertNull(leftDocs);
+      assertNull(rightDocs);
+      return;
+    }
+    
+    int docid = -1;
+    int averageGap = leftReader.maxDoc() / (1+docFreq);
+    int skipInterval = 16;
+
+    while (true) {
+      if (random().nextBoolean()) {
+        // nextDoc()
+        docid = leftDocs.nextDoc();
+        assertEquals(info, docid, rightDocs.nextDoc());
+      } else {
+        // advance()
+        int skip = docid + (int) Math.ceil(Math.abs(skipInterval + random().nextGaussian() * averageGap));
+        docid = leftDocs.advance(skip);
+        assertEquals(info, docid, rightDocs.advance(skip));
+      }
+      
+      if (docid == DocIdSetIterator.NO_MORE_DOCS) {
+        return;
+      }
+      int freq = leftDocs.freq();
+      assertEquals(info, freq, rightDocs.freq());
+      for (int i = 0; i < freq; i++) {
+        assertEquals(info, leftDocs.nextPosition(), rightDocs.nextPosition());
+        assertEquals(info, leftDocs.getPayload(), rightDocs.getPayload());
+      }
+    }
+  }
+
+  
+  private void assertTermsSeekingEquals(String info, Terms leftTerms, Terms rightTerms) throws IOException {
+    TermsEnum leftEnum = null;
+    TermsEnum rightEnum = null;
+
+    // just an upper bound
+    int numTests = atLeast(20);
+    Random random = random();
+
+    // collect this number of terms from the left side
+    HashSet<BytesRef> tests = new HashSet<BytesRef>();
+    int numPasses = 0;
+    while (numPasses < 10 && tests.size() < numTests) {
+      leftEnum = leftTerms.iterator(leftEnum);
+      BytesRef term = null;
+      while ((term = leftEnum.next()) != null) {
+        int code = random.nextInt(10);
+        if (code == 0) {
+          // the term
+          tests.add(BytesRef.deepCopyOf(term));
+        } else if (code == 1) {
+          // truncated subsequence of term
+          term = BytesRef.deepCopyOf(term);
+          if (term.length > 0) {
+            // truncate it
+            term.length = random.nextInt(term.length);
+          }
+        } else if (code == 2) {
+          // term, but ensure a non-zero offset
+          byte newbytes[] = new byte[term.length+5];
+          System.arraycopy(term.bytes, term.offset, newbytes, 5, term.length);
+          tests.add(new BytesRef(newbytes, 5, term.length));
+        } else if (code == 3) {
+          switch (random().nextInt(3)) {
+            case 0:
+              tests.add(new BytesRef()); // before the first term
+              break;
+            case 1:
+              tests.add(new BytesRef(new byte[] {(byte) 0xFF, (byte) 0xFF})); // past the last term
+              break;
+            case 2:
+              tests.add(new BytesRef(_TestUtil.randomSimpleString(random()))); // random term
+              break;
+            default:
+              throw new AssertionError();
+          }
+        }
+      }
+      numPasses++;
+    }
+
+    rightEnum = rightTerms.iterator(rightEnum);
+
+    ArrayList<BytesRef> shuffledTests = new ArrayList<BytesRef>(tests);
+    Collections.shuffle(shuffledTests, random);
+
+    for (BytesRef b : shuffledTests) {
+      if (rarely()) {
+        // reuse the enums
+        leftEnum = leftTerms.iterator(leftEnum);
+        rightEnum = rightTerms.iterator(rightEnum);
+      }
+
+      final boolean useCache = random().nextBoolean();
+      final boolean seekExact = random().nextBoolean();
+
+      if (seekExact) {
+        assertEquals(info, leftEnum.seekExact(b, useCache), rightEnum.seekExact(b, useCache));
+      } else {
+        SeekStatus leftStatus = leftEnum.seekCeil(b, useCache);
+        SeekStatus rightStatus = rightEnum.seekCeil(b, useCache);
+        assertEquals(info, leftStatus, rightStatus);
+        if (leftStatus != SeekStatus.END) {
+          assertEquals(info, leftEnum.term(), rightEnum.term());
+          assertTermStatsEquals(info, leftEnum, rightEnum);
+        }
+      }
+    }
+  }
+  
+  /**
+   * checks term-level statistics
+   */
+  public void assertTermStatsEquals(String info, TermsEnum leftTermsEnum, TermsEnum rightTermsEnum) throws IOException {
+    assertEquals(info, leftTermsEnum.docFreq(), rightTermsEnum.docFreq());
+    if (leftTermsEnum.totalTermFreq() != -1 && rightTermsEnum.totalTermFreq() != -1) {
+      assertEquals(info, leftTermsEnum.totalTermFreq(), rightTermsEnum.totalTermFreq());
+    }
+  }
+  
+  /** 
+   * checks that norms are the same across all fields 
+   */
+  public void assertNormsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
+    Fields leftFields = MultiFields.getFields(leftReader);
+    Fields rightFields = MultiFields.getFields(rightReader);
+    // Fields could be null if there are no postings,
+    // but then it must be null for both
+    if (leftFields == null || rightFields == null) {
+      assertNull(info, leftFields);
+      assertNull(info, rightFields);
+      return;
+    }
+    
+    for (String field : leftFields) {
+      NumericDocValues leftNorms = MultiDocValues.getNormValues(leftReader, field);
+      NumericDocValues rightNorms = MultiDocValues.getNormValues(rightReader, field);
+      if (leftNorms != null && rightNorms != null) {
+        assertDocValuesEquals(info, leftReader.maxDoc(), leftNorms, rightNorms);
+      } else {
+        assertNull(info, leftNorms);
+        assertNull(info, rightNorms);
+      }
+    }
+  }
+  
+  /** 
+   * checks that stored fields of all documents are the same 
+   */
+  public void assertStoredFieldsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
+    assert leftReader.maxDoc() == rightReader.maxDoc();
+    for (int i = 0; i < leftReader.maxDoc(); i++) {
+      StoredDocument leftDoc = leftReader.document(i);
+      StoredDocument rightDoc = rightReader.document(i);
+      
+      // TODO: I think this is bogus because we don't document what the order should be
+      // from these iterators, etc. I think the codec/IndexReader should be free to order this stuff
+      // in whatever way it wants (e.g. maybe it packs related fields together or something)
+      // To fix this, we sort the fields in both documents by name, but
+      // we still assume that all instances with same name are in order:
+      Comparator<StorableField> comp = new Comparator<StorableField>() {
+        @Override
+        public int compare(StorableField arg0, StorableField arg1) {
+          return arg0.name().compareTo(arg1.name());
+        }        
+      };
+      Collections.sort(leftDoc.getFields(), comp);
+      Collections.sort(rightDoc.getFields(), comp);
+
+      Iterator<StorableField> leftIterator = leftDoc.iterator();
+      Iterator<StorableField> rightIterator = rightDoc.iterator();
+      while (leftIterator.hasNext()) {
+        assertTrue(info, rightIterator.hasNext());
+        assertStoredFieldEquals(info, leftIterator.next(), rightIterator.next());
+      }
+      assertFalse(info, rightIterator.hasNext());
+    }
+  }
+  
+  /** 
+   * checks that two stored fields are equivalent 
+   */
+  public void assertStoredFieldEquals(String info, StorableField leftField, StorableField rightField) {
+    assertEquals(info, leftField.name(), rightField.name());
+    assertEquals(info, leftField.binaryValue(), rightField.binaryValue());
+    assertEquals(info, leftField.stringValue(), rightField.stringValue());
+    assertEquals(info, leftField.numericValue(), rightField.numericValue());
+    // TODO: should we check the FT at all?
+  }
+  
+  /** 
+   * checks that term vectors across all fields are equivalent 
+   */
+  public void assertTermVectorsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
+    assert leftReader.maxDoc() == rightReader.maxDoc();
+    for (int i = 0; i < leftReader.maxDoc(); i++) {
+      Fields leftFields = leftReader.getTermVectors(i);
+      Fields rightFields = rightReader.getTermVectors(i);
+      assertFieldsEquals(info, leftReader, leftFields, rightFields, rarely());
+    }
+  }
+
+  private static Set<String> getDVFields(IndexReader reader) {
+    Set<String> fields = new HashSet<String>();
+    for(FieldInfo fi : MultiFields.getMergedFieldInfos(reader)) {
+      if (fi.hasDocValues()) {
+        fields.add(fi.name);
+      }
+    }
+
+    return fields;
+  }
+  
+  /**
+   * checks that docvalues across all fields are equivalent
+   */
+  public void assertDocValuesEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
+    Set<String> leftFields = getDVFields(leftReader);
+    Set<String> rightFields = getDVFields(rightReader);
+    assertEquals(info, leftFields, rightFields);
+
+    for (String field : leftFields) {
+      // TODO: clean this up... very messy
+      {
+        NumericDocValues leftValues = MultiDocValues.getNumericValues(leftReader, field);
+        NumericDocValues rightValues = MultiDocValues.getNumericValues(rightReader, field);
+        if (leftValues != null && rightValues != null) {
+          assertDocValuesEquals(info, leftReader.maxDoc(), leftValues, rightValues);
+        } else {
+          assertNull(info, leftValues);
+          assertNull(info, rightValues);
+        }
+      }
+
+      {
+        BinaryDocValues leftValues = MultiDocValues.getBinaryValues(leftReader, field);
+        BinaryDocValues rightValues = MultiDocValues.getBinaryValues(rightReader, field);
+        if (leftValues != null && rightValues != null) {
+          BytesRef scratchLeft = new BytesRef();
+          BytesRef scratchRight = new BytesRef();
+          for(int docID=0;docID<leftReader.maxDoc();docID++) {
+            leftValues.get(docID, scratchLeft);
+            rightValues.get(docID, scratchRight);
+            assertEquals(info, scratchLeft, scratchRight);
+          }
+        } else {
+          assertNull(info, leftValues);
+          assertNull(info, rightValues);
+        }
+      }
+      
+      {
+        SortedDocValues leftValues = MultiDocValues.getSortedValues(leftReader, field);
+        SortedDocValues rightValues = MultiDocValues.getSortedValues(rightReader, field);
+        if (leftValues != null && rightValues != null) {
+          // numOrds
+          assertEquals(info, leftValues.getValueCount(), rightValues.getValueCount());
+          // ords
+          BytesRef scratchLeft = new BytesRef();
+          BytesRef scratchRight = new BytesRef();
+          for (int i = 0; i < leftValues.getValueCount(); i++) {
+            leftValues.lookupOrd(i, scratchLeft);
+            rightValues.lookupOrd(i, scratchRight);
+            assertEquals(info, scratchLeft, scratchRight);
+          }
+          // bytes
+          for(int docID=0;docID<leftReader.maxDoc();docID++) {
+            leftValues.get(docID, scratchLeft);
+            rightValues.get(docID, scratchRight);
+            assertEquals(info, scratchLeft, scratchRight);
+          }
+        } else {
+          assertNull(info, leftValues);
+          assertNull(info, rightValues);
+        }
+      }
+      
+      {
+        SortedSetDocValues leftValues = MultiDocValues.getSortedSetValues(leftReader, field);
+        SortedSetDocValues rightValues = MultiDocValues.getSortedSetValues(rightReader, field);
+        if (leftValues != null && rightValues != null) {
+          // numOrds
+          assertEquals(info, leftValues.getValueCount(), rightValues.getValueCount());
+          // ords
+          BytesRef scratchLeft = new BytesRef();
+          BytesRef scratchRight = new BytesRef();
+          for (int i = 0; i < leftValues.getValueCount(); i++) {
+            leftValues.lookupOrd(i, scratchLeft);
+            rightValues.lookupOrd(i, scratchRight);
+            assertEquals(info, scratchLeft, scratchRight);
+          }
+          // ord lists
+          for(int docID=0;docID<leftReader.maxDoc();docID++) {
+            leftValues.setDocument(docID);
+            rightValues.setDocument(docID);
+            long ord;
+            while ((ord = leftValues.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
+              assertEquals(info, ord, rightValues.nextOrd());
+            }
+            assertEquals(info, SortedSetDocValues.NO_MORE_ORDS, rightValues.nextOrd());
+          }
+        } else {
+          assertNull(info, leftValues);
+          assertNull(info, rightValues);
+        }
+      }
+    }
+  }
+  
+  public void assertDocValuesEquals(String info, int num, NumericDocValues leftDocValues, NumericDocValues rightDocValues) throws IOException {
+    assertNotNull(info, leftDocValues);
+    assertNotNull(info, rightDocValues);
+    for(int docID=0;docID<num;docID++) {
+      assertEquals(leftDocValues.get(docID),
+                   rightDocValues.get(docID));
+    }
+  }
+  
+  // TODO: this is kinda stupid, we don't delete documents in the test.
+  public void assertDeletedDocsEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
+    assert leftReader.numDeletedDocs() == rightReader.numDeletedDocs();
+    Bits leftBits = MultiFields.getLiveDocs(leftReader);
+    Bits rightBits = MultiFields.getLiveDocs(rightReader);
+    
+    if (leftBits == null || rightBits == null) {
+      assertNull(info, leftBits);
+      assertNull(info, rightBits);
+      return;
+    }
+    
+    assert leftReader.maxDoc() == rightReader.maxDoc();
+    assertEquals(info, leftBits.length(), rightBits.length());
+    for (int i = 0; i < leftReader.maxDoc(); i++) {
+      assertEquals(info, leftBits.get(i), rightBits.get(i));
+    }
+  }
+  
+  public void assertFieldInfosEquals(String info, IndexReader leftReader, IndexReader rightReader) throws IOException {
+    FieldInfos leftInfos = MultiFields.getMergedFieldInfos(leftReader);
+    FieldInfos rightInfos = MultiFields.getMergedFieldInfos(rightReader);
+    
+    // TODO: would be great to verify more than just the names of the fields!
+    TreeSet<String> left = new TreeSet<String>();
+    TreeSet<String> right = new TreeSet<String>();
+    
+    for (FieldInfo fi : leftInfos) {
+      left.add(fi.name);
+    }
+    
+    for (FieldInfo fi : rightInfos) {
+      right.add(fi.name);
+    }
+    
+    assertEquals(info, left, right);
+  }
 }
Index: lucene/core/src/test/org/apache/lucene/index/TestDoc.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestDoc.java	(révision 1457512)
+++ lucene/core/src/test/org/apache/lucene/index/TestDoc.java	(copie de travail)
@@ -25,6 +25,7 @@
 import java.io.PrintWriter;
 import java.io.StringWriter;
 import java.io.Writer;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.LinkedList;
@@ -218,11 +219,10 @@
       TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(si1.info.dir);
       final SegmentInfo si = new SegmentInfo(si1.info.dir, Constants.LUCENE_MAIN_VERSION, merged, -1, false, codec, null, null);
 
-      SegmentMerger merger = new SegmentMerger(si, InfoStream.getDefault(), trackingDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL,
-                                               MergeState.CheckAbort.NONE, new FieldInfos.FieldNumbers(), context);
+      SegmentMerger merger = new SegmentMerger(Arrays.<AtomicReader>asList(r1, r2),
+          si, InfoStream.getDefault(), trackingDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL,
+          MergeState.CheckAbort.NONE, new FieldInfos.FieldNumbers(), context);
 
-      merger.add(r1);
-      merger.add(r2);
       MergeState mergeState = merger.merge();
       r1.close();
       r2.close();
Index: lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java	(révision 1457512)
+++ lucene/core/src/test/org/apache/lucene/index/TestSegmentMerger.java	(copie de travail)
@@ -18,6 +18,7 @@
  */
 
 import java.io.IOException;
+import java.util.Arrays;
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.document.Document;
@@ -29,7 +30,6 @@
 import org.apache.lucene.util.InfoStream;
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util._TestUtil;
-import org.apache.lucene.util.packed.PackedInts;
 
 public class TestSegmentMerger extends LuceneTestCase {
   //The variables for the new merged segment
@@ -80,10 +80,9 @@
     final Codec codec = Codec.getDefault();
     final SegmentInfo si = new SegmentInfo(mergedDir, Constants.LUCENE_MAIN_VERSION, mergedSegment, -1, false, codec, null, null);
 
-    SegmentMerger merger = new SegmentMerger(si, InfoStream.getDefault(), mergedDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL,
-                                             MergeState.CheckAbort.NONE, new FieldInfos.FieldNumbers(), newIOContext(random()));
-    merger.add(reader1);
-    merger.add(reader2);
+    SegmentMerger merger = new SegmentMerger(Arrays.<AtomicReader>asList(reader1, reader2),
+        si, InfoStream.getDefault(), mergedDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL,
+        MergeState.CheckAbort.NONE, new FieldInfos.FieldNumbers(), newIOContext(random()));
     MergeState mergeState = merger.merge();
     int docsMerged = mergeState.segmentInfo.getDocCount();
     assertTrue(docsMerged == 2);
Index: lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java	(révision 1457512)
+++ lucene/core/src/test/org/apache/lucene/index/TestDuelingCodecs.java	(copie de travail)
@@ -157,646 +157,8 @@
   /**
    * checks the two indexes are equivalent
    */
-  public void testEquals() throws Exception {
-    assertReaderStatistics(leftReader, rightReader);
-    assertFields(MultiFields.getFields(leftReader), MultiFields.getFields(rightReader), true);
-    assertNorms(leftReader, rightReader);
-    assertStoredFields(leftReader, rightReader);
-    assertTermVectors(leftReader, rightReader);
-    assertDocValues(leftReader, rightReader);
-    assertDeletedDocs(leftReader, rightReader);
-    assertFieldInfos(leftReader, rightReader);
+  public void testEquals() throws IOException {
+    assertReaderEquals(info, leftReader, rightReader);
   }
-  
-  /** 
-   * checks that reader-level statistics are the same 
-   */
-  public void assertReaderStatistics(IndexReader leftReader, IndexReader rightReader) throws Exception {
-    // Somewhat redundant: we never delete docs
-    assertEquals(info, leftReader.maxDoc(), rightReader.maxDoc());
-    assertEquals(info, leftReader.numDocs(), rightReader.numDocs());
-    assertEquals(info, leftReader.numDeletedDocs(), rightReader.numDeletedDocs());
-    assertEquals(info, leftReader.hasDeletions(), rightReader.hasDeletions());
-  }
-  
-  /** 
-   * Fields api equivalency 
-   */
-  public void assertFields(Fields leftFields, Fields rightFields, boolean deep) throws Exception {
-    // Fields could be null if there are no postings,
-    // but then it must be null for both
-    if (leftFields == null || rightFields == null) {
-      assertNull(info, leftFields);
-      assertNull(info, rightFields);
-      return;
-    }
-    assertFieldStatistics(leftFields, rightFields);
-    
-    Iterator<String> leftEnum = leftFields.iterator();
-    Iterator<String> rightEnum = rightFields.iterator();
-    
-    while (leftEnum.hasNext()) {
-      String field = leftEnum.next();
-      assertEquals(info, field, rightEnum.next());
-      assertTerms(leftFields.terms(field), rightFields.terms(field), deep);
-    }
-    assertFalse(rightEnum.hasNext());
-  }
-  
-  /** 
-   * checks that top-level statistics on Fields are the same 
-   */
-  public void assertFieldStatistics(Fields leftFields, Fields rightFields) throws Exception {
-    if (leftFields.size() != -1 && rightFields.size() != -1) {
-      assertEquals(info, leftFields.size(), rightFields.size());
-    }
-  }
-  
-  /** 
-   * Terms api equivalency 
-   */
-  public void assertTerms(Terms leftTerms, Terms rightTerms, boolean deep) throws Exception {
-    if (leftTerms == null || rightTerms == null) {
-      assertNull(info, leftTerms);
-      assertNull(info, rightTerms);
-      return;
-    }
-    assertTermsStatistics(leftTerms, rightTerms);
-    assertEquals(leftTerms.hasOffsets(), rightTerms.hasOffsets());
-    assertEquals(leftTerms.hasPositions(), rightTerms.hasPositions());
-    assertEquals(leftTerms.hasPayloads(), rightTerms.hasPayloads());
 
-    TermsEnum leftTermsEnum = leftTerms.iterator(null);
-    TermsEnum rightTermsEnum = rightTerms.iterator(null);
-    assertTermsEnum(leftTermsEnum, rightTermsEnum, true);
-    
-    assertTermsSeeking(leftTerms, rightTerms);
-    
-    if (deep) {
-      int numIntersections = atLeast(3);
-      for (int i = 0; i < numIntersections; i++) {
-        String re = AutomatonTestUtil.randomRegexp(random());
-        CompiledAutomaton automaton = new CompiledAutomaton(new RegExp(re, RegExp.NONE).toAutomaton());
-        if (automaton.type == CompiledAutomaton.AUTOMATON_TYPE.NORMAL) {
-          // TODO: test start term too
-          TermsEnum leftIntersection = leftTerms.intersect(automaton, null);
-          TermsEnum rightIntersection = rightTerms.intersect(automaton, null);
-          assertTermsEnum(leftIntersection, rightIntersection, rarely());
-        }
-      }
-    }
-  }
-  
-  private void assertTermsSeeking(Terms leftTerms, Terms rightTerms) throws Exception {
-    TermsEnum leftEnum = null;
-    TermsEnum rightEnum = null;
-
-    // just an upper bound
-    int numTests = atLeast(20);
-    Random random = random();
-
-    // collect this number of terms from the left side
-    HashSet<BytesRef> tests = new HashSet<BytesRef>();
-    int numPasses = 0;
-    while (numPasses < 10 && tests.size() < numTests) {
-      leftEnum = leftTerms.iterator(leftEnum);
-      BytesRef term = null;
-      while ((term = leftEnum.next()) != null) {
-        int code = random.nextInt(10);
-        if (code == 0) {
-          // the term
-          tests.add(BytesRef.deepCopyOf(term));
-        } else if (code == 1) {
-          // truncated subsequence of term
-          term = BytesRef.deepCopyOf(term);
-          if (term.length > 0) {
-            // truncate it
-            term.length = random.nextInt(term.length);
-          }
-        } else if (code == 2) {
-          // term, but ensure a non-zero offset
-          byte newbytes[] = new byte[term.length+5];
-          System.arraycopy(term.bytes, term.offset, newbytes, 5, term.length);
-          tests.add(new BytesRef(newbytes, 5, term.length));
-        } else if (code == 3) {
-          switch (random().nextInt(3)) {
-            case 0:
-              tests.add(new BytesRef()); // before the first term
-              break;
-            case 1:
-              tests.add(new BytesRef(new byte[] {(byte) 0xFF, (byte) 0xFF})); // past the last term
-              break;
-            case 2:
-              tests.add(new BytesRef(_TestUtil.randomSimpleString(random()))); // random term
-              break;
-            default:
-              throw new AssertionError();
-          }
-        }
-      }
-      numPasses++;
-    }
-
-    rightEnum = rightTerms.iterator(rightEnum);
-
-    ArrayList<BytesRef> shuffledTests = new ArrayList<BytesRef>(tests);
-    Collections.shuffle(shuffledTests, random);
-
-    for (BytesRef b : shuffledTests) {
-      if (rarely()) {
-        // reuse the enums
-        leftEnum = leftTerms.iterator(leftEnum);
-        rightEnum = rightTerms.iterator(rightEnum);
-      }
-
-      final boolean useCache = random().nextBoolean();
-      final boolean seekExact = random().nextBoolean();
-
-      if (seekExact) {
-        assertEquals(info, leftEnum.seekExact(b, useCache), rightEnum.seekExact(b, useCache));
-      } else {
-        SeekStatus leftStatus = leftEnum.seekCeil(b, useCache);
-        SeekStatus rightStatus = rightEnum.seekCeil(b, useCache);
-        assertEquals(info, leftStatus, rightStatus);
-        if (leftStatus != SeekStatus.END) {
-          assertEquals(info, leftEnum.term(), rightEnum.term());
-          assertTermStats(leftEnum, rightEnum);
-        }
-      }
-    }
-  }
-  
-  /** 
-   * checks collection-level statistics on Terms 
-   */
-  public void assertTermsStatistics(Terms leftTerms, Terms rightTerms) throws Exception {
-    assert leftTerms.getComparator() == rightTerms.getComparator();
-    if (leftTerms.getDocCount() != -1 && rightTerms.getDocCount() != -1) {
-      assertEquals(info, leftTerms.getDocCount(), rightTerms.getDocCount());
-    }
-    if (leftTerms.getSumDocFreq() != -1 && rightTerms.getSumDocFreq() != -1) {
-      assertEquals(info, leftTerms.getSumDocFreq(), rightTerms.getSumDocFreq());
-    }
-    if (leftTerms.getSumTotalTermFreq() != -1 && rightTerms.getSumTotalTermFreq() != -1) {
-      assertEquals(info, leftTerms.getSumTotalTermFreq(), rightTerms.getSumTotalTermFreq());
-    }
-    if (leftTerms.size() != -1 && rightTerms.size() != -1) {
-      assertEquals(info, leftTerms.size(), rightTerms.size());
-    }
-  }
-
-  /** 
-   * checks the terms enum sequentially
-   * if deep is false, it does a 'shallow' test that doesnt go down to the docsenums
-   */
-  public void assertTermsEnum(TermsEnum leftTermsEnum, TermsEnum rightTermsEnum, boolean deep) throws Exception {
-    BytesRef term;
-    Bits randomBits = new RandomBits(leftReader.maxDoc(), random().nextDouble(), random());
-    DocsAndPositionsEnum leftPositions = null;
-    DocsAndPositionsEnum rightPositions = null;
-    DocsEnum leftDocs = null;
-    DocsEnum rightDocs = null;
-    
-    while ((term = leftTermsEnum.next()) != null) {
-      assertEquals(info, term, rightTermsEnum.next());
-      assertTermStats(leftTermsEnum, rightTermsEnum);
-      if (deep) {
-        assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
-                                   rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
-        assertDocsAndPositionsEnum(leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
-                                   rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
-
-        assertPositionsSkipping(leftTermsEnum.docFreq(), 
-                                leftPositions = leftTermsEnum.docsAndPositions(null, leftPositions),
-                                rightPositions = rightTermsEnum.docsAndPositions(null, rightPositions));
-        assertPositionsSkipping(leftTermsEnum.docFreq(), 
-                                leftPositions = leftTermsEnum.docsAndPositions(randomBits, leftPositions),
-                                rightPositions = rightTermsEnum.docsAndPositions(randomBits, rightPositions));
-
-        // with freqs:
-        assertDocsEnum(leftDocs = leftTermsEnum.docs(null, leftDocs),
-            rightDocs = rightTermsEnum.docs(null, rightDocs),
-            true);
-        assertDocsEnum(leftDocs = leftTermsEnum.docs(randomBits, leftDocs),
-            rightDocs = rightTermsEnum.docs(randomBits, rightDocs),
-            true);
-
-        // w/o freqs:
-        assertDocsEnum(leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
-            rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE),
-            false);
-        assertDocsEnum(leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
-            rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE),
-            false);
-        
-        // with freqs:
-        assertDocsSkipping(leftTermsEnum.docFreq(), 
-            leftDocs = leftTermsEnum.docs(null, leftDocs),
-            rightDocs = rightTermsEnum.docs(null, rightDocs),
-            true);
-        assertDocsSkipping(leftTermsEnum.docFreq(), 
-            leftDocs = leftTermsEnum.docs(randomBits, leftDocs),
-            rightDocs = rightTermsEnum.docs(randomBits, rightDocs),
-            true);
-
-        // w/o freqs:
-        assertDocsSkipping(leftTermsEnum.docFreq(), 
-            leftDocs = leftTermsEnum.docs(null, leftDocs, DocsEnum.FLAG_NONE),
-            rightDocs = rightTermsEnum.docs(null, rightDocs, DocsEnum.FLAG_NONE),
-            false);
-        assertDocsSkipping(leftTermsEnum.docFreq(), 
-            leftDocs = leftTermsEnum.docs(randomBits, leftDocs, DocsEnum.FLAG_NONE),
-            rightDocs = rightTermsEnum.docs(randomBits, rightDocs, DocsEnum.FLAG_NONE),
-            false);
-      }
-    }
-    assertNull(info, rightTermsEnum.next());
-  }
-  
-  /**
-   * checks term-level statistics
-   */
-  public void assertTermStats(TermsEnum leftTermsEnum, TermsEnum rightTermsEnum) throws Exception {
-    assertEquals(info, leftTermsEnum.docFreq(), rightTermsEnum.docFreq());
-    if (leftTermsEnum.totalTermFreq() != -1 && rightTermsEnum.totalTermFreq() != -1) {
-      assertEquals(info, leftTermsEnum.totalTermFreq(), rightTermsEnum.totalTermFreq());
-    }
-  }
-  
-  /**
-   * checks docs + freqs + positions + payloads, sequentially
-   */
-  public void assertDocsAndPositionsEnum(DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws Exception {
-    if (leftDocs == null || rightDocs == null) {
-      assertNull(leftDocs);
-      assertNull(rightDocs);
-      return;
-    }
-    assertTrue(info, leftDocs.docID() == -1 || leftDocs.docID() == DocIdSetIterator.NO_MORE_DOCS);
-    assertTrue(info, rightDocs.docID() == -1 || rightDocs.docID() == DocIdSetIterator.NO_MORE_DOCS);
-    int docid;
-    while ((docid = leftDocs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
-      assertEquals(info, docid, rightDocs.nextDoc());
-      int freq = leftDocs.freq();
-      assertEquals(info, freq, rightDocs.freq());
-      for (int i = 0; i < freq; i++) {
-        assertEquals(info, leftDocs.nextPosition(), rightDocs.nextPosition());
-        assertEquals(info, leftDocs.getPayload(), rightDocs.getPayload());
-        assertEquals(info, leftDocs.startOffset(), rightDocs.startOffset());
-        assertEquals(info, leftDocs.endOffset(), rightDocs.endOffset());
-      }
-    }
-    assertEquals(info, DocIdSetIterator.NO_MORE_DOCS, rightDocs.nextDoc());
-  }
-  
-  /**
-   * checks docs + freqs, sequentially
-   */
-  public void assertDocsEnum(DocsEnum leftDocs, DocsEnum rightDocs, boolean hasFreqs) throws Exception {
-    if (leftDocs == null) {
-      assertNull(rightDocs);
-      return;
-    }
-    assertTrue(info, leftDocs.docID() == -1 || leftDocs.docID() == DocIdSetIterator.NO_MORE_DOCS);
-    assertTrue(info, rightDocs.docID() == -1 || rightDocs.docID() == DocIdSetIterator.NO_MORE_DOCS);
-    int docid;
-    while ((docid = leftDocs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
-      assertEquals(info, docid, rightDocs.nextDoc());
-      if (hasFreqs) {
-        assertEquals(info, leftDocs.freq(), rightDocs.freq());
-      }
-    }
-    assertEquals(info, DocIdSetIterator.NO_MORE_DOCS, rightDocs.nextDoc());
-  }
-  
-  /**
-   * checks advancing docs
-   */
-  public void assertDocsSkipping(int docFreq, DocsEnum leftDocs, DocsEnum rightDocs, boolean hasFreqs) throws Exception {
-    if (leftDocs == null) {
-      assertNull(rightDocs);
-      return;
-    }
-    int docid = -1;
-    int averageGap = leftReader.maxDoc() / (1+docFreq);
-    int skipInterval = 16;
-
-    while (true) {
-      if (random().nextBoolean()) {
-        // nextDoc()
-        docid = leftDocs.nextDoc();
-        assertEquals(info, docid, rightDocs.nextDoc());
-      } else {
-        // advance()
-        int skip = docid + (int) Math.ceil(Math.abs(skipInterval + random().nextGaussian() * averageGap));
-        docid = leftDocs.advance(skip);
-        assertEquals(info, docid, rightDocs.advance(skip));
-      }
-      
-      if (docid == DocIdSetIterator.NO_MORE_DOCS) {
-        return;
-      }
-      if (hasFreqs) {
-        assertEquals(info, leftDocs.freq(), rightDocs.freq());
-      }
-    }
-  }
-  
-  /**
-   * checks advancing docs + positions
-   */
-  public void assertPositionsSkipping(int docFreq, DocsAndPositionsEnum leftDocs, DocsAndPositionsEnum rightDocs) throws Exception {
-    if (leftDocs == null || rightDocs == null) {
-      assertNull(leftDocs);
-      assertNull(rightDocs);
-      return;
-    }
-    
-    int docid = -1;
-    int averageGap = leftReader.maxDoc() / (1+docFreq);
-    int skipInterval = 16;
-
-    while (true) {
-      if (random().nextBoolean()) {
-        // nextDoc()
-        docid = leftDocs.nextDoc();
-        assertEquals(info, docid, rightDocs.nextDoc());
-      } else {
-        // advance()
-        int skip = docid + (int) Math.ceil(Math.abs(skipInterval + random().nextGaussian() * averageGap));
-        docid = leftDocs.advance(skip);
-        assertEquals(info, docid, rightDocs.advance(skip));
-      }
-      
-      if (docid == DocIdSetIterator.NO_MORE_DOCS) {
-        return;
-      }
-      int freq = leftDocs.freq();
-      assertEquals(info, freq, rightDocs.freq());
-      for (int i = 0; i < freq; i++) {
-        assertEquals(info, leftDocs.nextPosition(), rightDocs.nextPosition());
-        assertEquals(info, leftDocs.getPayload(), rightDocs.getPayload());
-      }
-    }
-  }
-  
-  /** 
-   * checks that norms are the same across all fields 
-   */
-  public void assertNorms(IndexReader leftReader, IndexReader rightReader) throws Exception {
-    Fields leftFields = MultiFields.getFields(leftReader);
-    Fields rightFields = MultiFields.getFields(rightReader);
-    // Fields could be null if there are no postings,
-    // but then it must be null for both
-    if (leftFields == null || rightFields == null) {
-      assertNull(info, leftFields);
-      assertNull(info, rightFields);
-      return;
-    }
-    
-    for (String field : leftFields) {
-      NumericDocValues leftNorms = MultiDocValues.getNormValues(leftReader, field);
-      NumericDocValues rightNorms = MultiDocValues.getNormValues(rightReader, field);
-      if (leftNorms != null && rightNorms != null) {
-        assertDocValues(leftReader.maxDoc(), leftNorms, rightNorms);
-      } else {
-        assertNull(info, leftNorms);
-        assertNull(info, rightNorms);
-      }
-    }
-  }
-  
-  /** 
-   * checks that stored fields of all documents are the same 
-   */
-  public void assertStoredFields(IndexReader leftReader, IndexReader rightReader) throws Exception {
-    assert leftReader.maxDoc() == rightReader.maxDoc();
-    for (int i = 0; i < leftReader.maxDoc(); i++) {
-      StoredDocument leftDoc = leftReader.document(i);
-      StoredDocument rightDoc = rightReader.document(i);
-      
-      // TODO: I think this is bogus because we don't document what the order should be
-      // from these iterators, etc. I think the codec/IndexReader should be free to order this stuff
-      // in whatever way it wants (e.g. maybe it packs related fields together or something)
-      // To fix this, we sort the fields in both documents by name, but
-      // we still assume that all instances with same name are in order:
-      Comparator<StorableField> comp = new Comparator<StorableField>() {
-        @Override
-        public int compare(StorableField arg0, StorableField arg1) {
-          return arg0.name().compareTo(arg1.name());
-        }        
-      };
-      Collections.sort(leftDoc.getFields(), comp);
-      Collections.sort(rightDoc.getFields(), comp);
-
-      Iterator<StorableField> leftIterator = leftDoc.iterator();
-      Iterator<StorableField> rightIterator = rightDoc.iterator();
-      while (leftIterator.hasNext()) {
-        assertTrue(info, rightIterator.hasNext());
-        assertStoredField(leftIterator.next(), rightIterator.next());
-      }
-      assertFalse(info, rightIterator.hasNext());
-    }
-  }
-  
-  /** 
-   * checks that two stored fields are equivalent 
-   */
-  public void assertStoredField(StorableField leftField, StorableField rightField) {
-    assertEquals(info, leftField.name(), rightField.name());
-    assertEquals(info, leftField.binaryValue(), rightField.binaryValue());
-    assertEquals(info, leftField.stringValue(), rightField.stringValue());
-    assertEquals(info, leftField.numericValue(), rightField.numericValue());
-    // TODO: should we check the FT at all?
-  }
-  
-  /** 
-   * checks that term vectors across all fields are equivalent 
-   */
-  public void assertTermVectors(IndexReader leftReader, IndexReader rightReader) throws Exception {
-    assert leftReader.maxDoc() == rightReader.maxDoc();
-    for (int i = 0; i < leftReader.maxDoc(); i++) {
-      Fields leftFields = leftReader.getTermVectors(i);
-      Fields rightFields = rightReader.getTermVectors(i);
-      assertFields(leftFields, rightFields, rarely());
-    }
-  }
-
-  private static Set<String> getDVFields(IndexReader reader) {
-    Set<String> fields = new HashSet<String>();
-    for(FieldInfo fi : MultiFields.getMergedFieldInfos(reader)) {
-      if (fi.hasDocValues()) {
-        fields.add(fi.name);
-      }
-    }
-
-    return fields;
-  }
-  
-  /**
-   * checks that docvalues across all fields are equivalent
-   */
-  public void assertDocValues(IndexReader leftReader, IndexReader rightReader) throws Exception {
-    Set<String> leftFields = getDVFields(leftReader);
-    Set<String> rightFields = getDVFields(rightReader);
-    assertEquals(info, leftFields, rightFields);
-
-    for (String field : leftFields) {
-      // TODO: clean this up... very messy
-      {
-        NumericDocValues leftValues = MultiDocValues.getNumericValues(leftReader, field);
-        NumericDocValues rightValues = MultiDocValues.getNumericValues(rightReader, field);
-        if (leftValues != null && rightValues != null) {
-          assertDocValues(leftReader.maxDoc(), leftValues, rightValues);
-        } else {
-          assertNull(info, leftValues);
-          assertNull(info, rightValues);
-        }
-      }
-
-      {
-        BinaryDocValues leftValues = MultiDocValues.getBinaryValues(leftReader, field);
-        BinaryDocValues rightValues = MultiDocValues.getBinaryValues(rightReader, field);
-        if (leftValues != null && rightValues != null) {
-          BytesRef scratchLeft = new BytesRef();
-          BytesRef scratchRight = new BytesRef();
-          for(int docID=0;docID<leftReader.maxDoc();docID++) {
-            leftValues.get(docID, scratchLeft);
-            rightValues.get(docID, scratchRight);
-            assertEquals(info, scratchLeft, scratchRight);
-          }
-        } else {
-          assertNull(info, leftValues);
-          assertNull(info, rightValues);
-        }
-      }
-      
-      {
-        SortedDocValues leftValues = MultiDocValues.getSortedValues(leftReader, field);
-        SortedDocValues rightValues = MultiDocValues.getSortedValues(rightReader, field);
-        if (leftValues != null && rightValues != null) {
-          // numOrds
-          assertEquals(info, leftValues.getValueCount(), rightValues.getValueCount());
-          // ords
-          BytesRef scratchLeft = new BytesRef();
-          BytesRef scratchRight = new BytesRef();
-          for (int i = 0; i < leftValues.getValueCount(); i++) {
-            leftValues.lookupOrd(i, scratchLeft);
-            rightValues.lookupOrd(i, scratchRight);
-            assertEquals(info, scratchLeft, scratchRight);
-          }
-          // bytes
-          for(int docID=0;docID<leftReader.maxDoc();docID++) {
-            leftValues.get(docID, scratchLeft);
-            rightValues.get(docID, scratchRight);
-            assertEquals(info, scratchLeft, scratchRight);
-          }
-        } else {
-          assertNull(info, leftValues);
-          assertNull(info, rightValues);
-        }
-      }
-      
-      {
-        SortedSetDocValues leftValues = MultiDocValues.getSortedSetValues(leftReader, field);
-        SortedSetDocValues rightValues = MultiDocValues.getSortedSetValues(rightReader, field);
-        if (leftValues != null && rightValues != null) {
-          // numOrds
-          assertEquals(info, leftValues.getValueCount(), rightValues.getValueCount());
-          // ords
-          BytesRef scratchLeft = new BytesRef();
-          BytesRef scratchRight = new BytesRef();
-          for (int i = 0; i < leftValues.getValueCount(); i++) {
-            leftValues.lookupOrd(i, scratchLeft);
-            rightValues.lookupOrd(i, scratchRight);
-            assertEquals(info, scratchLeft, scratchRight);
-          }
-          // ord lists
-          for(int docID=0;docID<leftReader.maxDoc();docID++) {
-            leftValues.setDocument(docID);
-            rightValues.setDocument(docID);
-            long ord;
-            while ((ord = leftValues.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
-              assertEquals(info, ord, rightValues.nextOrd());
-            }
-            assertEquals(info, SortedSetDocValues.NO_MORE_ORDS, rightValues.nextOrd());
-          }
-        } else {
-          assertNull(info, leftValues);
-          assertNull(info, rightValues);
-        }
-      }
-    }
-  }
-  
-  public void assertDocValues(int num, NumericDocValues leftDocValues, NumericDocValues rightDocValues) throws Exception {
-    assertNotNull(info, leftDocValues);
-    assertNotNull(info, rightDocValues);
-    for(int docID=0;docID<num;docID++) {
-      assertEquals(leftDocValues.get(docID),
-                   rightDocValues.get(docID));
-    }
-  }
-  
-  // TODO: this is kinda stupid, we don't delete documents in the test.
-  public void assertDeletedDocs(IndexReader leftReader, IndexReader rightReader) throws Exception {
-    assert leftReader.numDeletedDocs() == rightReader.numDeletedDocs();
-    Bits leftBits = MultiFields.getLiveDocs(leftReader);
-    Bits rightBits = MultiFields.getLiveDocs(rightReader);
-    
-    if (leftBits == null || rightBits == null) {
-      assertNull(info, leftBits);
-      assertNull(info, rightBits);
-      return;
-    }
-    
-    assert leftReader.maxDoc() == rightReader.maxDoc();
-    assertEquals(info, leftBits.length(), rightBits.length());
-    for (int i = 0; i < leftReader.maxDoc(); i++) {
-      assertEquals(info, leftBits.get(i), rightBits.get(i));
-    }
-  }
-  
-  public void assertFieldInfos(IndexReader leftReader, IndexReader rightReader) throws Exception {
-    FieldInfos leftInfos = MultiFields.getMergedFieldInfos(leftReader);
-    FieldInfos rightInfos = MultiFields.getMergedFieldInfos(rightReader);
-    
-    // TODO: would be great to verify more than just the names of the fields!
-    TreeSet<String> left = new TreeSet<String>();
-    TreeSet<String> right = new TreeSet<String>();
-    
-    for (FieldInfo fi : leftInfos) {
-      left.add(fi.name);
-    }
-    
-    for (FieldInfo fi : rightInfos) {
-      right.add(fi.name);
-    }
-    
-    assertEquals(info, left, right);
-  }
-  
-  
-  private static class RandomBits implements Bits {
-    FixedBitSet bits;
-    
-    RandomBits(int maxDoc, double pctLive, Random random) {
-      bits = new FixedBitSet(maxDoc);
-      for (int i = 0; i < maxDoc; i++) {
-        if (random.nextDouble() <= pctLive) {        
-          bits.set(i);
-        }
-      }
-    }
-    
-    @Override
-    public boolean get(int index) {
-      return bits.get(index);
-    }
-
-    @Override
-    public int length() {
-      return bits.length();
-    }
-  }
 }
Index: lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java	(révision 1457512)
+++ lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java	(copie de travail)
@@ -34,12 +34,11 @@
 import org.apache.lucene.util.InfoStream;
 
 /**
- * The SegmentMerger class combines two or more Segments, represented by an IndexReader ({@link #add},
- * into a single Segment.  After adding the appropriate readers, call the merge method to combine the
+ * The SegmentMerger class combines two or more Segments, represented by an
+ * IndexReader, into a single Segment.  Call the merge method to combine the
  * segments.
  *
  * @see #merge
- * @see #add
  */
 final class SegmentMerger {
   private final Directory directory;
@@ -49,16 +48,13 @@
   
   private final IOContext context;
   
-  private final MergeState mergeState = new MergeState();
+  private final MergeState mergeState;
   private final FieldInfos.Builder fieldInfosBuilder;
 
   // note, just like in codec apis Directory 'dir' is NOT the same as segmentInfo.dir!!
-  SegmentMerger(SegmentInfo segmentInfo, InfoStream infoStream, Directory dir, int termIndexInterval,
+  SegmentMerger(List<AtomicReader> readers, SegmentInfo segmentInfo, InfoStream infoStream, Directory dir, int termIndexInterval,
                 MergeState.CheckAbort checkAbort, FieldInfos.FieldNumbers fieldNumbers, IOContext context) {
-    mergeState.segmentInfo = segmentInfo;
-    mergeState.infoStream = infoStream;
-    mergeState.readers = new ArrayList<AtomicReader>();
-    mergeState.checkAbort = checkAbort;
+    mergeState = new MergeState(readers, segmentInfo, infoStream, checkAbort);
     directory = dir;
     this.termIndexInterval = termIndexInterval;
     this.codec = segmentInfo.getCodec();
@@ -67,21 +63,7 @@
   }
 
   /**
-   * Add an IndexReader to the collection of readers that are to be merged
-   */
-  void add(IndexReader reader) {
-    for (final AtomicReaderContext ctx : reader.leaves()) {
-      final AtomicReader r = ctx.reader();
-      mergeState.readers.add(r);
-    }
-  }
-
-  void add(SegmentReader reader) {
-    mergeState.readers.add(reader);
-  }
-
-  /**
-   * Merges the readers specified by the {@link #add} method into the directory passed to the constructor
+   * Merges the readers into the directory passed to the constructor
    * @return The number of documents that were merged
    * @throws CorruptIndexException if the index is corrupt
    * @throws IOException if there is a low-level IO error
Index: lucene/core/src/java/org/apache/lucene/index/MergePolicy.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/MergePolicy.java	(révision 1457512)
+++ lucene/core/src/java/org/apache/lucene/index/MergePolicy.java	(copie de travail)
@@ -19,6 +19,7 @@
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 
@@ -104,6 +105,22 @@
       totalDocCount = count;
     }
 
+    /** Get the list of readers to merge. Note that this list does not
+     *  necessarily match the list of segments to merge and should only be used
+     *  to feed SegmentMerger to initialize a merge. */
+    public List<AtomicReader> getMergeReaders() throws IOException {
+      if (readers == null) {
+        throw new IllegalStateException("IndexWriter has not initialized readers from the segment infos yet");
+      }
+      final List<AtomicReader> readers = new ArrayList<AtomicReader>(this.readers.size());
+      for (AtomicReader reader : this.readers) {
+        if (reader.numDocs() > 0) {
+          readers.add(reader);
+        }
+      }
+      return Collections.unmodifiableList(readers);
+    }
+
     /** Record that an exception occurred while executing
      *  this merge */
     synchronized void setException(Throwable error) {
Index: lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/IndexWriter.java	(révision 1457512)
+++ lucene/core/src/java/org/apache/lucene/index/IndexWriter.java	(copie de travail)
@@ -2395,8 +2395,12 @@
       flush(false, true);
 
       String mergedName = newSegmentName();
+      final List<AtomicReader> mergeReaders = new ArrayList<AtomicReader>();
       for (IndexReader indexReader : readers) {
         numDocs += indexReader.numDocs();
+        for (AtomicReaderContext ctx : indexReader.leaves()) {
+          mergeReaders.add(ctx.reader());
+        }
       }
       final IOContext context = new IOContext(new MergeInfo(numDocs, -1, true, -1));
 
@@ -2407,13 +2411,9 @@
       SegmentInfo info = new SegmentInfo(directory, Constants.LUCENE_MAIN_VERSION, mergedName, -1,
                                          false, codec, null, null);
 
-      SegmentMerger merger = new SegmentMerger(info, infoStream, trackingDir, config.getTermIndexInterval(),
+      SegmentMerger merger = new SegmentMerger(mergeReaders, info, infoStream, trackingDir, config.getTermIndexInterval(),
                                                MergeState.CheckAbort.NONE, globalFieldNumberMap, context);
 
-      for (IndexReader reader : readers) {    // add new indexes
-        merger.add(reader);
-      }
-
       MergeState mergeState;
       boolean success = false;
       try {
@@ -3561,9 +3561,6 @@
     final MergeState.CheckAbort checkAbort = new MergeState.CheckAbort(merge, directory);
     final TrackingDirectoryWrapper dirWrapper = new TrackingDirectoryWrapper(directory);
 
-    SegmentMerger merger = new SegmentMerger(merge.info.info, infoStream, dirWrapper, config.getTermIndexInterval(), checkAbort,
-                                             globalFieldNumberMap, context);
-
     if (infoStream.isEnabled("IW")) {
       infoStream.message("IW", "merging " + segString(merge.segments));
     }
@@ -3633,12 +3630,13 @@
 
         merge.readers.add(reader);
         assert delCount <= info.info.getDocCount(): "delCount=" + delCount + " info.docCount=" + info.info.getDocCount() + " rld.pendingDeleteCount=" + rld.getPendingDeleteCount() + " info.getDelCount()=" + info.getDelCount();
-        if (delCount < info.info.getDocCount()) {
-          merger.add(reader);
-        }
         segUpto++;
       }
 
+      final SegmentMerger merger = new SegmentMerger(merge.getMergeReaders(),
+          merge.info.info, infoStream, dirWrapper, config.getTermIndexInterval(),
+          checkAbort, globalFieldNumberMap, context);
+
       merge.checkAborted(directory);
 
       // This is where all the work happens:
Index: lucene/core/src/java/org/apache/lucene/index/MergeState.java
===================================================================
--- lucene/core/src/java/org/apache/lucene/index/MergeState.java	(révision 1457512)
+++ lucene/core/src/java/org/apache/lucene/index/MergeState.java	(copie de travail)
@@ -129,13 +129,13 @@
   }
 
   /** {@link SegmentInfo} of the newly merged segment. */
-  public SegmentInfo segmentInfo;
+  public final SegmentInfo segmentInfo;
 
   /** {@link FieldInfos} of the newly merged segment. */
   public FieldInfos fieldInfos;
 
   /** Readers being merged. */
-  public List<AtomicReader> readers;
+  public final List<AtomicReader> readers;
 
   /** Maps docIDs around deletions. */
   public DocMap[] docMaps;
@@ -145,10 +145,10 @@
 
   /** Holds the CheckAbort instance, which is invoked
    *  periodically to see if the merge has been aborted. */
-  public CheckAbort checkAbort;
+  public final CheckAbort checkAbort;
 
   /** InfoStream for debugging messages. */
-  public InfoStream infoStream;
+  public final InfoStream infoStream;
 
   // TODO: get rid of this? it tells you which segments are 'aligned' (e.g. for bulk merging)
   // but is this really so expensive to compute again in different components, versus once in SM?
@@ -162,7 +162,11 @@
   public int matchedCount;
 
   /** Sole constructor. */
-  MergeState() {
+  MergeState(List<AtomicReader> readers, SegmentInfo segmentInfo, InfoStream infoStream, CheckAbort checkAbort) {
+    this.readers = readers;
+    this.segmentInfo = segmentInfo;
+    this.infoStream = infoStream;
+    this.checkAbort = checkAbort;
   }
 
   /**
Index: lucene/misc/src/test/org/apache/lucene/index/sorter/TestSortingMergePolicy.java
===================================================================
--- lucene/misc/src/test/org/apache/lucene/index/sorter/TestSortingMergePolicy.java	(révision 0)
+++ lucene/misc/src/test/org/apache/lucene/index/sorter/TestSortingMergePolicy.java	(copie de travail)
@@ -0,0 +1,108 @@
+package org.apache.lucene.index.sorter;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.SlowCompositeReaderWrapper;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestSortingMergePolicy extends LuceneTestCase {
+
+  private Directory dir1, dir2;
+  private Sorter sorter;
+  private IndexReader reader;
+  private IndexReader sortedReader;
+
+  public void setUp() throws Exception {
+    super.setUp();
+    sorter = new NumericDocValuesSorter("ndv");
+    createRandomIndexes();
+  }
+
+  private Document randomDocument() {
+    final Document doc = new Document();
+    doc.add(new NumericDocValuesField("ndv", random().nextLong()));
+    doc.add(new StringField("s", _TestUtil.randomSimpleString(random(), 3), Store.YES));
+    return doc;
+  }
+
+  private void createRandomIndexes() throws IOException {
+    dir1 = newDirectory();
+    dir2 = newDirectory();
+    final IndexWriterConfig iwc1 = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+    final IndexWriterConfig iwc2 = iwc1.clone();
+    iwc2.setMergePolicy(new SortingMergePolicy(iwc2.getMergePolicy(), sorter));
+    final IndexWriter iw1 = new IndexWriter(dir1, iwc1);
+    final IndexWriter iw2 = new IndexWriter(dir2, iwc2);
+    final int numDocs = atLeast(100);
+    for (int i = 0; i < numDocs; ++i) {
+      final Document doc = randomDocument();
+      iw1.addDocument(doc);
+      iw2.addDocument(doc);
+      if (i == numDocs / 2 || rarely()) {
+        iw1.commit();
+        iw2.commit();
+      }
+    }
+    iw1.forceMerge(1);
+    iw2.forceMerge(1);
+    iw1.close();
+    iw2.close();
+    reader = DirectoryReader.open(dir1);
+    sortedReader = DirectoryReader.open(dir2);
+  }
+
+  public void tearDown() throws Exception {
+    reader.close();
+    sortedReader.close();
+    dir1.close();
+    dir2.close();
+    super.tearDown();
+  }
+
+  private void assertSorted(AtomicReader reader) throws IOException {
+    final NumericDocValues ndv = reader.getNumericDocValues("ndv");
+    for (int i = 1; i < reader.maxDoc(); ++i) {
+      assertTrue(ndv.get(i-1) < ndv.get(i));
+    }
+  }
+
+  public void testSortingMP() throws IOException {
+    final AtomicReader sortedReader1 = SortingAtomicReader.wrap(SlowCompositeReaderWrapper.wrap(reader), sorter);
+    final AtomicReader sortedReader2 = SlowCompositeReaderWrapper.wrap(sortedReader);
+
+    assertSorted(sortedReader1);
+    assertSorted(sortedReader2);
+    assertReaderEquals("", sortedReader1, sortedReader2);
+  }
+
+}

Modification de propriétés sur lucene/misc/src/test/org/apache/lucene/index/sorter/TestSortingMergePolicy.java
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Index: lucene/misc/src/test/org/apache/lucene/index/sorter/SortingAtomicReaderTest.java
===================================================================
--- lucene/misc/src/test/org/apache/lucene/index/sorter/SortingAtomicReaderTest.java	(révision 1457512)
+++ lucene/misc/src/test/org/apache/lucene/index/sorter/SortingAtomicReaderTest.java	(copie de travail)
@@ -56,7 +56,7 @@
       System.out.println("sortedValues: " + Arrays.toString(sortedValues));
     }
     
-    reader = SortingAtomicReader.sort(reader, new Sorter() {
+    reader = SortingAtomicReader.wrap(reader, new Sorter() {
       @Override
       public Sorter.DocMap sort(AtomicReader reader) throws IOException {
         return docMap;
@@ -76,5 +76,5 @@
     
     _TestUtil.checkReader(reader);
   }
-  
+
 }
Index: lucene/misc/src/test/org/apache/lucene/index/sorter/IndexSortingTest.java
===================================================================
--- lucene/misc/src/test/org/apache/lucene/index/sorter/IndexSortingTest.java	(révision 1457512)
+++ lucene/misc/src/test/org/apache/lucene/index/sorter/IndexSortingTest.java	(copie de travail)
@@ -61,7 +61,7 @@
 
     Directory target = newDirectory();
     IndexWriter writer = new IndexWriter(target, newIndexWriterConfig(TEST_VERSION_CURRENT, null));
-    reader = SortingAtomicReader.sort(reader, sorter);
+    reader = SortingAtomicReader.wrap(reader, sorter);
     writer.addIndexes(reader);
     writer.close();
     reader.close();
Index: lucene/misc/src/java/org/apache/lucene/index/sorter/SortingAtomicReader.java
===================================================================
--- lucene/misc/src/java/org/apache/lucene/index/sorter/SortingAtomicReader.java	(révision 1457512)
+++ lucene/misc/src/java/org/apache/lucene/index/sorter/SortingAtomicReader.java	(copie de travail)
@@ -362,7 +362,7 @@
     
     @Override
     public int docID() {
-      return docIt >= upto ? NO_MORE_DOCS : docs[docIt];
+      return docIt < 0 ? -1 : docIt >= upto ? NO_MORE_DOCS : docs[docIt];
     }
     
     @Override
@@ -556,7 +556,7 @@
   /** Return a sorted view of <code>reader</code> according to the order
    *  defined by <code>sorter</code>. If the reader is already sorted, this
    *  method might return the reader as-is. */
-  public static AtomicReader sort(AtomicReader reader, Sorter sorter) throws IOException {
+  public static AtomicReader wrap(AtomicReader reader, Sorter sorter) throws IOException {
     final Sorter.DocMap docMap = sorter.sort(reader);
     if (docMap == null) {
       // the reader is already sorter
Index: lucene/misc/src/java/org/apache/lucene/index/sorter/SortingMergePolicy.java
===================================================================
--- lucene/misc/src/java/org/apache/lucene/index/sorter/SortingMergePolicy.java	(révision 0)
+++ lucene/misc/src/java/org/apache/lucene/index/sorter/SortingMergePolicy.java	(copie de travail)
@@ -0,0 +1,148 @@
+package org.apache.lucene.index.sorter;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.lucene.index.AtomicReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.MergePolicy;
+import org.apache.lucene.index.MultiReader;
+import org.apache.lucene.index.SegmentInfoPerCommit;
+import org.apache.lucene.index.SegmentInfos;
+import org.apache.lucene.index.SlowCompositeReaderWrapper;
+import org.apache.lucene.store.Directory;
+
+/** A {@link MergePolicy} that reorders documents according to a {@link Sorter}
+ *  before merging them. As a consequence, all segments resulting from a merge
+ *  will be sorted while segments resulting from a flush will be in the order
+ *  in which documents have been added.
+ *  <p>Never use this {@link MergePolicy} if you rely on
+ *  {@link IndexWriter#addDocuments(Iterable, org.apache.lucene.analysis.Analyzer)}
+ *  to have sequentially-assigned doc IDs, this policy will scatter doc IDs.
+ *  @lucene.experimental */
+public final class SortingMergePolicy extends MergePolicy {
+
+  private class SortingOneMerge extends OneMerge {
+
+    SortingOneMerge(List<SegmentInfoPerCommit> segments) {
+      super(segments);
+    }
+
+    @Override
+    public List<AtomicReader> getMergeReaders() throws IOException {
+      final List<AtomicReader> readers =  super.getMergeReaders();
+      switch (readers.size()) {
+        case 0:
+          return readers;
+        case 1:
+          return Collections.singletonList(SortingAtomicReader.wrap(readers.get(0), sorter));
+        default:
+          final IndexReader multiReader = new MultiReader(readers.toArray(new AtomicReader[readers.size()]));
+          final AtomicReader atomicReader = SlowCompositeReaderWrapper.wrap(multiReader);
+          final AtomicReader sortingReader = SortingAtomicReader.wrap(atomicReader, sorter);
+          if (sortingReader == atomicReader) {
+            // already sorted, return the original list of readers so that
+            // codec-specific bulk-merge methods can be used
+            return readers;
+          }
+          return Collections.singletonList(sortingReader);
+      }
+    }
+
+  }
+
+  private class SortingMergeSpecification extends MergeSpecification {
+
+    @Override
+    public void add(OneMerge merge) {
+      super.add(new SortingOneMerge(merge.segments));
+    }
+
+    @Override
+    public String segString(Directory dir) {
+      return "SortingMergeSpec(" + super.segString(dir) + ", sorter=" + sorter + ")";
+    }
+
+  }
+
+  private MergeSpecification sortedMergeSpecification(MergeSpecification specification) {
+    if (specification == null) {
+      return null;
+    }
+    MergeSpecification sortingSpec = new SortingMergeSpecification();
+    for (OneMerge merge : specification.merges) {
+      sortingSpec.add(merge);
+    }
+    return sortingSpec;
+  }
+
+  private final MergePolicy in;
+  private final Sorter sorter;
+
+  /** Create a new {@link MergePolicy} that sorts documents with <code>sorter</code>. */
+  public SortingMergePolicy(MergePolicy in, Sorter sorter) {
+    this.in = in;
+    this.sorter = sorter;
+  }
+
+  @Override
+  public MergeSpecification findMerges(MergeTrigger mergeTrigger,
+      SegmentInfos segmentInfos) throws IOException {
+    return sortedMergeSpecification(in.findMerges(mergeTrigger, segmentInfos));
+  }
+
+  @Override
+  public MergeSpecification findForcedMerges(SegmentInfos segmentInfos,
+      int maxSegmentCount, Map<SegmentInfoPerCommit,Boolean> segmentsToMerge)
+      throws IOException {
+    return sortedMergeSpecification(in.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge));
+  }
+
+  @Override
+  public MergeSpecification findForcedDeletesMerges(SegmentInfos segmentInfos)
+      throws IOException {
+    return sortedMergeSpecification(in.findForcedDeletesMerges(segmentInfos));
+  }
+
+  @Override
+  public void close() {
+    in.close();
+  }
+
+  @Override
+  public boolean useCompoundFile(SegmentInfos segments,
+      SegmentInfoPerCommit newSegment) throws IOException {
+    return in.useCompoundFile(segments, newSegment);
+  }
+
+  @Override
+  public void setIndexWriter(IndexWriter writer) {
+    in.setIndexWriter(writer);
+  }
+
+  @Override
+  public String toString() {
+    return "SortingMergePolicy(" + in + ", sorter=" + sorter + ")";
+  }
+
+}

Modification de propriétés sur lucene/misc/src/java/org/apache/lucene/index/sorter/SortingMergePolicy.java
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
