Index: src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java =================================================================== --- src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (revision 825030) +++ src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (working copy) @@ -43,27 +43,27 @@ writer = newWriter(dir, true); // add 100 documents addDocs(writer, 100); - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.close(); writer = newWriter(aux, true); writer.setUseCompoundFile(false); // use one without a compound file // add 40 documents in separate files addDocs(writer, 40); - assertEquals(40, writer.docCount()); + assertEquals(40, writer.maxDoc()); writer.close(); writer = newWriter(aux2, true); // add 40 documents in compound files addDocs2(writer, 50); - assertEquals(50, writer.docCount()); + assertEquals(50, writer.maxDoc()); writer.close(); // test doc count before segments are merged writer = newWriter(dir, false); - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.addIndexesNoOptimize(new Directory[] { aux, aux2 }); - assertEquals(190, writer.docCount()); + assertEquals(190, writer.maxDoc()); writer.close(); // make sure the old index is correct @@ -77,14 +77,14 @@ writer = newWriter(aux3, true); // add 40 documents addDocs(writer, 40); - assertEquals(40, writer.docCount()); + assertEquals(40, writer.maxDoc()); writer.close(); // test doc count before segments are merged/index is optimized writer = newWriter(dir, false); - assertEquals(190, writer.docCount()); + assertEquals(190, writer.maxDoc()); writer.addIndexesNoOptimize(new Directory[] { aux3 }); - assertEquals(230, writer.docCount()); + assertEquals(230, writer.maxDoc()); writer.close(); // make sure the new index is correct @@ -113,9 +113,9 @@ writer.close(); writer = newWriter(dir, false); - assertEquals(230, writer.docCount()); + assertEquals(230, writer.maxDoc()); writer.addIndexesNoOptimize(new Directory[] { aux4 }); - assertEquals(231, writer.docCount()); + assertEquals(231, writer.maxDoc()); writer.close(); verifyNumDocs(dir, 231); @@ -250,7 +250,7 @@ writer = newWriter(dir, true); // add 100 documents addDocs(writer, 100); - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.close(); writer = newWriter(aux, true); @@ -272,7 +272,7 @@ assertTrue(false); } catch (IllegalArgumentException e) { - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); } writer.close(); @@ -297,7 +297,7 @@ addDocs(writer, 10); writer.addIndexesNoOptimize(new Directory[] { aux }); - assertEquals(1040, writer.docCount()); + assertEquals(1040, writer.maxDoc()); assertEquals(2, writer.getSegmentCount()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -321,7 +321,7 @@ addDocs(writer, 2); writer.addIndexesNoOptimize(new Directory[] { aux }); - assertEquals(1032, writer.docCount()); + assertEquals(1032, writer.maxDoc()); assertEquals(2, writer.getSegmentCount()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -344,7 +344,7 @@ writer.setMergeFactor(4); writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) }); - assertEquals(1060, writer.docCount()); + assertEquals(1060, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -373,7 +373,7 @@ writer.setMergeFactor(4); writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) }); - assertEquals(1020, writer.docCount()); + assertEquals(1020, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -395,7 +395,7 @@ writer.setMaxBufferedDocs(100); writer.setMergeFactor(10); writer.addIndexesNoOptimize(new Directory[] { aux }); - assertEquals(30, writer.docCount()); + assertEquals(30, writer.maxDoc()); assertEquals(3, writer.getSegmentCount()); writer.close(); @@ -418,7 +418,7 @@ writer.setMergeFactor(4); writer.addIndexesNoOptimize(new Directory[] { aux, aux2 }); - assertEquals(1025, writer.docCount()); + assertEquals(1025, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -476,7 +476,7 @@ writer.setMaxBufferedDocs(1000); // add 1000 documents in 1 segment addDocs(writer, 1000); - assertEquals(1000, writer.docCount()); + assertEquals(1000, writer.maxDoc()); assertEquals(1, writer.getSegmentCount()); writer.close(); @@ -493,7 +493,7 @@ writer.setMaxBufferedDocs(100); writer.setMergeFactor(10); } - assertEquals(30, writer.docCount()); + assertEquals(30, writer.maxDoc()); assertEquals(3, writer.getSegmentCount()); writer.close(); } Index: src/test/org/apache/lucene/index/TestCrash.java =================================================================== --- src/test/org/apache/lucene/index/TestCrash.java (revision 825030) +++ src/test/org/apache/lucene/index/TestCrash.java (working copy) @@ -82,7 +82,7 @@ MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory(); writer.close(); writer = initIndex(dir); - assertEquals(314, writer.docCount()); + assertEquals(314, writer.maxDoc()); crash(writer); /* Index: src/test/org/apache/lucene/index/TestIndexReader.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexReader.java (revision 825030) +++ src/test/org/apache/lucene/index/TestIndexReader.java (working copy) @@ -1762,7 +1762,7 @@ } assertFalse(((SegmentReader) r.getSequentialSubReaders()[0]).termsIndexLoaded()); - assertEquals(-1, r.getTermInfosIndexDivisor()); + assertEquals(-1, ((SegmentReader) r.getSequentialSubReaders()[0]).getTermInfosIndexDivisor()); writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); writer.addDocument(doc); writer.close(); Index: src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (revision 825030) +++ src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (working copy) @@ -119,7 +119,8 @@ IndexWriter.MaxFieldLength.LIMITED); iw.setMaxBufferedDocs(5); iw.setMergeFactor(3); - iw.addIndexes(new Directory[] { dir1, dir2 }); + iw.addIndexesNoOptimize(new Directory[] { dir1, dir2 }); + iw.optimize(); iw.close(); norms1.addAll(norms); Index: src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexWriter.java (revision 825030) +++ src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -100,7 +100,7 @@ for (i = 0; i < 100; i++) { addDoc(writer); } - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.close(); // delete 40 documents @@ -112,7 +112,7 @@ // test doc count before segments are merged/index is optimized writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.close(); reader = IndexReader.open(dir, true); @@ -160,7 +160,7 @@ /* Test: make sure when we run out of disk space or hit - random IOExceptions in any of the addIndexes(*) calls + random IOExceptions in any of the addIndexesNoOptimize(*) calls that 1) index is not corrupt (searcher can open/search it) and 2) transactional semantics are followed: either all or none of the incoming documents were in @@ -175,7 +175,7 @@ boolean debug = false; // Build up a bunch of dirs that have indexes which we - // will then merge together by calling addIndexes(*): + // will then merge together by calling addIndexesNoOptimize(*): Directory[] dirs = new Directory[NUM_DIR]; long inputDiskUsage = 0; for(int i=0;i