Index: contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermEnum.java =================================================================== --- contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermEnum.java (revision 825052) +++ contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedTermEnum.java (working copy) @@ -77,36 +77,6 @@ public void close() { } - - public boolean skipTo(Term target) throws IOException { - - // this method is not known to be used by anything - // in lucene for many years now, so there is - // very to gain by optimizing this method more, - - if (reader.getIndex().getOrderedTerms().length == 0) { - return false; - } - - InstantiatedTerm term = reader.getIndex().findTerm(target); - if (term != null) { - this.term = term; - nextTermIndex = term.getTermIndex() + 1; - return true; - } else { - int pos = Arrays.binarySearch(reader.getIndex().getOrderedTerms(), target, InstantiatedTerm.termComparator); - if (pos < 0) { - pos = -1 - pos; - } - - if (pos > reader.getIndex().getOrderedTerms().length) { - return false; - } - this.term = reader.getIndex().getOrderedTerms()[pos]; - nextTermIndex = pos + 1; - return true; - } - } } Index: contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java =================================================================== --- contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java (revision 825052) +++ contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestEmptyIndex.java (working copy) @@ -105,7 +105,6 @@ assertNull(terms.term()); assertFalse(terms.next()); - assertFalse(terms.skipTo(new Term("foo", "bar"))); } Index: contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java =================================================================== --- contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (revision 825052) +++ contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (working copy) @@ -391,42 +391,6 @@ } } - // compare term enumeration seeking - - aprioriTermEnum = aprioriReader.terms(); - - TermEnum aprioriTermEnumSeeker = aprioriReader.terms(); - TermEnum testTermEnumSeeker = testReader.terms(); - - while (aprioriTermEnum.next()) { - if (aprioriTermEnumSeeker.skipTo(aprioriTermEnum.term())) { - assertTrue(testTermEnumSeeker.skipTo(aprioriTermEnum.term())); - assertEquals(aprioriTermEnumSeeker.term(), testTermEnumSeeker.term()); - } else { - assertFalse(testTermEnumSeeker.skipTo(aprioriTermEnum.term())); - } - } - - aprioriTermEnum.close(); - aprioriTermEnumSeeker.close(); - testTermEnumSeeker.close(); - - // skip to non existing terms - - aprioriTermEnumSeeker = aprioriReader.terms(); - testTermEnumSeeker = testReader.terms(); - - aprioriTermEnum = aprioriReader.terms(); - aprioriTermEnum.next(); - Term nonExistingTerm = new Term(aprioriTermEnum.term().field(), "bzzzzoo993djdj380sdf"); - aprioriTermEnum.close(); - - assertEquals(aprioriTermEnumSeeker.skipTo(nonExistingTerm), testTermEnumSeeker.skipTo(nonExistingTerm)); - assertEquals(aprioriTermEnumSeeker.term(), testTermEnumSeeker.term()); - - aprioriTermEnumSeeker.close(); - testTermEnumSeeker.close(); - // compare term vectors and position vectors for (int documentNumber = 0; documentNumber < aprioriReader.numDocs(); documentNumber++) { Index: contrib/misc/src/java/org/apache/lucene/index/TermVectorAccessor.java =================================================================== --- contrib/misc/src/java/org/apache/lucene/index/TermVectorAccessor.java (revision 825052) +++ contrib/misc/src/java/org/apache/lucene/index/TermVectorAccessor.java (working copy) @@ -99,32 +99,29 @@ positions.clear(); } - TermEnum termEnum = indexReader.terms(); - if (termEnum.skipTo(new Term(field, ""))) { + TermEnum termEnum = indexReader.terms(new Term(field, "")); + while (termEnum.term() != null && termEnum.term().field() == field) { + TermPositions termPositions = indexReader.termPositions(termEnum.term()); + if (termPositions.skipTo(documentNumber)) { - while (termEnum.term().field() == field) { - TermPositions termPositions = indexReader.termPositions(termEnum.term()); - if (termPositions.skipTo(documentNumber)) { + frequencies.add(Integer.valueOf(termPositions.freq())); + tokens.add(termEnum.term().text()); - frequencies.add(Integer.valueOf(termPositions.freq())); - tokens.add(termEnum.term().text()); - - if (!mapper.isIgnoringPositions()) { - int[] positions = new int[termPositions.freq()]; - for (int i = 0; i < positions.length; i++) { - positions[i] = termPositions.nextPosition(); - } - this.positions.add(positions); - } else { - positions.add(null); + if (!mapper.isIgnoringPositions()) { + int[] positions = new int[termPositions.freq()]; + for (int i = 0; i < positions.length; i++) { + positions[i] = termPositions.nextPosition(); } + this.positions.add(positions); + } else { + positions.add(null); } - termPositions.close(); - if (!termEnum.next()) { - break; - } } + termPositions.close(); + if (!termEnum.next()) { + break; + } mapper.setDocumentNumber(documentNumber); mapper.setExpectations(field, tokens.size(), false, !mapper.isIgnoringPositions()); Index: contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java =================================================================== --- contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java (revision 825052) +++ contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java (working copy) @@ -45,7 +45,7 @@ } System.out.println("Merging..."); - writer.addIndexes(indexes); + writer.addIndexesNoOptimize(indexes); System.out.println("Optimizing..."); writer.optimize(); Index: src/java/org/apache/lucene/index/CheckIndex.java =================================================================== --- src/java/org/apache/lucene/index/CheckIndex.java (revision 825052) +++ src/java/org/apache/lucene/index/CheckIndex.java (working copy) @@ -48,11 +48,6 @@ */ public class CheckIndex { - /** Default PrintStream for all CheckIndex instances. - * @deprecated Use {@link #setInfoStream} per instance, - * instead. */ - public static PrintStream out = null; - private PrintStream infoStream; private Directory dir; @@ -257,7 +252,7 @@ /** Create a new CheckIndex on the directory. */ public CheckIndex(Directory dir) { this.dir = dir; - infoStream = out; + infoStream = null; } /** Set infoStream where messages should go. If null, no Index: src/java/org/apache/lucene/index/DocInverterPerField.java =================================================================== --- src/java/org/apache/lucene/index/DocInverterPerField.java (revision 825052) +++ src/java/org/apache/lucene/index/DocInverterPerField.java (working copy) @@ -129,9 +129,6 @@ final int startLength = fieldState.length; - // deprecated - final boolean allowMinus1Position = docState.allowMinus1Position; - try { int offsetEnd = fieldState.offset-1; @@ -157,7 +154,7 @@ final int posIncr = posIncrAttribute.getPositionIncrement(); fieldState.position += posIncr; - if (allowMinus1Position || fieldState.position > 0) { + if (fieldState.position > 0) { fieldState.position--; } Index: src/java/org/apache/lucene/index/DocumentsWriter.java =================================================================== --- src/java/org/apache/lucene/index/DocumentsWriter.java (revision 825052) +++ src/java/org/apache/lucene/index/DocumentsWriter.java (working copy) @@ -150,9 +150,6 @@ Document doc; String maxTermPrefix; - // deprecated - boolean allowMinus1Position; - // Only called by asserts public boolean testPoint(String name) { return docWriter.writer.testPoint(name); @@ -299,11 +296,6 @@ threadStates[i].docState.similarity = similarity; } - synchronized void setAllowMinus1Position() { - for(int i=0;iFor IndexReader implementations that use - * TermInfosReader to read terms, this sets the - * indexDivisor to subsample the number of indexed terms - * loaded into memory. This has the same effect as {@link - * IndexWriter#setTermIndexInterval} except that setting - * must be done at indexing time while this setting can be - * set per reader. When set to N, then one in every - * N*termIndexInterval terms in the index is loaded into - * memory. By setting this to a value > 1 you can reduce - * memory usage, at the expense of higher latency when - * loading a TermInfo. The default value is 1.

- * - * NOTE: you must call this before the term - * index is loaded. If the index is already loaded, - * an IllegalStateException is thrown. - * @throws IllegalStateException if the term index has already been loaded into memory - * @deprecated Please use {@link IndexReader#open(Directory, IndexDeletionPolicy, boolean, int)} to specify the required TermInfos index divisor instead. - */ - public void setTermInfosIndexDivisor(int indexDivisor) throws IllegalStateException { - throw new UnsupportedOperationException("Please pass termInfosIndexDivisor up-front when opening IndexReader"); - } - - /**

For IndexReader implementations that use - * TermInfosReader to read terms, this returns the - * current indexDivisor as specified when the reader was - * opened. - */ - public int getTermInfosIndexDivisor() { - throw new UnsupportedOperationException("This reader does not support this method."); - } - /** * Check whether this IndexReader is still using the * current (i.e., most recently committed) version of the Index: src/java/org/apache/lucene/index/IndexWriter.java =================================================================== --- src/java/org/apache/lucene/index/IndexWriter.java (revision 825052) +++ src/java/org/apache/lucene/index/IndexWriter.java (working copy) @@ -181,12 +181,6 @@ public static final String WRITE_LOCK_NAME = "write.lock"; /** - * @deprecated - * @see LogMergePolicy#DEFAULT_MERGE_FACTOR - */ - public final static int DEFAULT_MERGE_FACTOR = LogMergePolicy.DEFAULT_MERGE_FACTOR; - - /** * Value to denote a flush trigger is disabled */ public final static int DISABLE_AUTO_FLUSH = -1; @@ -210,12 +204,6 @@ public final static int DEFAULT_MAX_BUFFERED_DELETE_TERMS = DISABLE_AUTO_FLUSH; /** - * @deprecated - * @see LogDocMergePolicy#DEFAULT_MAX_MERGE_DOCS - */ - public final static int DEFAULT_MAX_MERGE_DOCS = LogDocMergePolicy.DEFAULT_MAX_MERGE_DOCS; - - /** * Default value is 10,000. Change using {@link #setMaxFieldLength(int)}. */ public final static int DEFAULT_MAX_FIELD_LENGTH = 10000; @@ -1790,16 +1778,6 @@ return analyzer; } - /** Returns the number of documents currently in this - * index, not counting deletions. - * @deprecated Please use {@link #maxDoc()} (same as this - * method) or {@link #numDocs()} (also takes deletions - * into account), instead. */ - public synchronized int docCount() { - ensureOpen(); - return maxDoc(); - } - /** Returns total number of docs in this index, including * docs not yet flushed (still in the RAM buffer), * not counting deletions. @@ -1994,14 +1972,14 @@ * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ - public void deleteDocuments(Term[] terms) throws CorruptIndexException, IOException { + public void deleteDocuments(Term... terms) throws CorruptIndexException, IOException { ensureOpen(); try { boolean doFlush = docWriter.bufferDeleteTerms(terms); if (doFlush) flush(true, false, false); } catch (OutOfMemoryError oom) { - handleOOM(oom, "deleteDocuments(Term[])"); + handleOOM(oom, "deleteDocuments(Term..)"); } } @@ -2036,7 +2014,7 @@ * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ - public void deleteDocuments(Query[] queries) throws CorruptIndexException, IOException { + public void deleteDocuments(Query... queries) throws CorruptIndexException, IOException { ensureOpen(); boolean doFlush = docWriter.bufferDeleteQueries(queries); if (doFlush) @@ -2693,13 +2671,6 @@ } /** - * @deprecated Please use {@link #rollback} instead. - */ - public void abort() throws IOException { - rollback(); - } - - /** * Close the IndexWriter without committing * any changes that have occurred since the last commit * (or since it was opened, if commit hasn't been called). @@ -2946,84 +2917,12 @@ releaseRead(); } - /** Merges all segments from an array of indexes into this index. - * - *

NOTE: if this method hits an OutOfMemoryError - * you should immediately close the writer. See above for details.

- * - * @deprecated Use {@link #addIndexesNoOptimize} instead, - * then separately call {@link #optimize} afterwards if - * you need to. - * - * @throws CorruptIndexException if the index is corrupt - * @throws IOException if there is a low-level IO error - */ - public void addIndexes(Directory[] dirs) - throws CorruptIndexException, IOException { - - ensureOpen(); - - noDupDirs(dirs); - - // Do not allow add docs or deletes while we are running: - docWriter.pauseAllThreads(); - - try { - - if (infoStream != null) - message("flush at addIndexes"); - flush(true, false, true); - - boolean success = false; - - startTransaction(false); - - try { - - int docCount = 0; - synchronized(this) { - ensureOpen(); - for (int i = 0; i < dirs.length; i++) { - SegmentInfos sis = new SegmentInfos(); // read infos from dir - sis.read(dirs[i]); - for (int j = 0; j < sis.size(); j++) { - final SegmentInfo info = sis.info(j); - docCount += info.docCount; - assert !segmentInfos.contains(info); - segmentInfos.add(info); // add each info - } - } - } - - // Notify DocumentsWriter that the flushed count just increased - docWriter.updateFlushedDocCount(docCount); - - optimize(); - - success = true; - } finally { - if (success) { - commitTransaction(); - } else { - rollbackTransaction(); - } - } - } catch (OutOfMemoryError oom) { - handleOOM(oom, "addIndexes(Directory[])"); - } finally { - if (docWriter != null) { - docWriter.resumeAllThreads(); - } - } - } - private synchronized void resetMergeExceptions() { mergeExceptions = new ArrayList(); mergeGen++; } - private void noDupDirs(Directory[] dirs) { + private void noDupDirs(Directory... dirs) { HashSet dups = new HashSet(); for(int i=0;iSee {@link #addIndexesNoOptimize(Directory[])} for + *

See {@link #addIndexesNoOptimize} for * details on transactional semantics, temporary free * space required in the Directory, and non-CFS segments * on an Exception.

@@ -3259,7 +3158,7 @@ * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ - public void addIndexes(IndexReader[] readers) + public void addIndexes(IndexReader... readers) throws CorruptIndexException, IOException { ensureOpen(); @@ -3326,7 +3225,7 @@ segmentInfos.clear(); // pop old infos & add new info = new SegmentInfo(mergedName, docCount, directory, false, true, -1, null, false, merger.hasProx()); - setDiagnostics(info, "addIndexes(IndexReader[])"); + setDiagnostics(info, "addIndexes(IndexReader...)"); segmentInfos.add(info); } @@ -3395,7 +3294,7 @@ } } } catch (OutOfMemoryError oom) { - handleOOM(oom, "addIndexes(IndexReader[])"); + handleOOM(oom, "addIndexes(IndexReader...)"); } finally { if (docWriter != null) { docWriter.resumeAllThreads(); @@ -4930,22 +4829,6 @@ throw oom; } - // deprecated - private boolean allowMinus1Position; - - /** Deprecated: emulates IndexWriter's buggy behavior when - * first token(s) have positionIncrement==0 (ie, prior to - * fixing LUCENE-1542) */ - public void setAllowMinus1Position() { - allowMinus1Position = true; - docWriter.setAllowMinus1Position(); - } - - // deprecated - boolean getAllowMinus1Position() { - return allowMinus1Position; - } - // Used only by assert for testing. Current points: // startDoFlush // startCommitMerge Index: src/java/org/apache/lucene/index/MergePolicy.java =================================================================== --- src/java/org/apache/lucene/index/MergePolicy.java (revision 825052) +++ src/java/org/apache/lucene/index/MergePolicy.java (working copy) @@ -172,20 +172,12 @@ * executing a merge. */ public static class MergeException extends RuntimeException { private Directory dir; - /** @deprecated - * Use {@link #MergePolicy.MergeException(String,Directory)} instead */ - public MergeException(String message) { - super(message); - } + public MergeException(String message, Directory dir) { super(message); this.dir = dir; } - /** @deprecated - * Use {@link #MergePolicy.MergeException(Throwable,Directory)} instead */ - public MergeException(Throwable exc) { - super(exc); - } + public MergeException(Throwable exc, Directory dir) { super(exc); this.dir = dir; Index: src/java/org/apache/lucene/index/MultiReader.java =================================================================== --- src/java/org/apache/lucene/index/MultiReader.java (revision 825052) +++ src/java/org/apache/lucene/index/MultiReader.java (working copy) @@ -49,7 +49,7 @@ * @param subReaders set of (sub)readers * @throws IOException */ - public MultiReader(IndexReader[] subReaders) { + public MultiReader(IndexReader... subReaders) { initialize(subReaders, true); } @@ -352,11 +352,6 @@ return new MultiTermPositions(this, subReaders, starts); } - /** @deprecated */ - protected void doCommit() throws IOException { - doCommit(null); - } - protected void doCommit(Map commitUserData) throws IOException { for (int i = 0; i < subReaders.length; i++) subReaders[i].commit(commitUserData); Index: src/java/org/apache/lucene/index/ParallelReader.java =================================================================== --- src/java/org/apache/lucene/index/ParallelReader.java (revision 825052) +++ src/java/org/apache/lucene/index/ParallelReader.java (working copy) @@ -435,11 +435,6 @@ return (IndexReader[]) readers.toArray(new IndexReader[readers.size()]); } - /** @deprecated */ - protected void doCommit() throws IOException { - doCommit(null); - } - protected void doCommit(Map commitUserData) throws IOException { for (int i = 0; i < readers.size(); i++) ((IndexReader)readers.get(i)).commit(commitUserData); Index: src/java/org/apache/lucene/index/SegmentReader.java =================================================================== --- src/java/org/apache/lucene/index/SegmentReader.java (revision 825052) +++ src/java/org/apache/lucene/index/SegmentReader.java (working copy) @@ -792,11 +792,6 @@ return clone; } - /** @deprecated */ - protected void doCommit() throws IOException { - doCommit(null); - } - protected void doCommit(Map commitUserData) throws IOException { if (hasChanges) { if (deletedDocsDirty) { // re-write deleted Index: src/java/org/apache/lucene/index/TermEnum.java =================================================================== --- src/java/org/apache/lucene/index/TermEnum.java (revision 825052) +++ src/java/org/apache/lucene/index/TermEnum.java (working copy) @@ -36,29 +36,4 @@ /** Closes the enumeration to further activity, freeing resources. */ public abstract void close() throws IOException; - - /** Skips terms to the first beyond the current whose value is - * greater or equal to target.

Returns true iff there is such - * an entry.

Behaves as if written:

-   *   public boolean skipTo(Term target) {
-   *     do {
-   *       if (!next())
-   * 	     return false;
-   *     } while (target > term());
-   *     return true;
-   *   }
-   * 
- * Some implementations *could* be considerably more efficient than a linear scan. - * Check the implementation to be sure. - * @deprecated This method is not performant and will be removed in Lucene 3.0. - * Use {@link IndexReader#terms(Term)} to create a new TermEnum positioned at a - * given term. - */ - public boolean skipTo(Term target) throws IOException { - do { - if (!next()) - return false; - } while (target.compareTo(term()) > 0); - return true; - } } Index: src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java =================================================================== --- src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (revision 825052) +++ src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (working copy) @@ -43,27 +43,27 @@ writer = newWriter(dir, true); // add 100 documents addDocs(writer, 100); - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.close(); writer = newWriter(aux, true); writer.setUseCompoundFile(false); // use one without a compound file // add 40 documents in separate files addDocs(writer, 40); - assertEquals(40, writer.docCount()); + assertEquals(40, writer.maxDoc()); writer.close(); writer = newWriter(aux2, true); // add 40 documents in compound files addDocs2(writer, 50); - assertEquals(50, writer.docCount()); + assertEquals(50, writer.maxDoc()); writer.close(); // test doc count before segments are merged writer = newWriter(dir, false); - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.addIndexesNoOptimize(new Directory[] { aux, aux2 }); - assertEquals(190, writer.docCount()); + assertEquals(190, writer.maxDoc()); writer.close(); // make sure the old index is correct @@ -77,14 +77,14 @@ writer = newWriter(aux3, true); // add 40 documents addDocs(writer, 40); - assertEquals(40, writer.docCount()); + assertEquals(40, writer.maxDoc()); writer.close(); // test doc count before segments are merged/index is optimized writer = newWriter(dir, false); - assertEquals(190, writer.docCount()); + assertEquals(190, writer.maxDoc()); writer.addIndexesNoOptimize(new Directory[] { aux3 }); - assertEquals(230, writer.docCount()); + assertEquals(230, writer.maxDoc()); writer.close(); // make sure the new index is correct @@ -113,9 +113,9 @@ writer.close(); writer = newWriter(dir, false); - assertEquals(230, writer.docCount()); + assertEquals(230, writer.maxDoc()); writer.addIndexesNoOptimize(new Directory[] { aux4 }); - assertEquals(231, writer.docCount()); + assertEquals(231, writer.maxDoc()); writer.close(); verifyNumDocs(dir, 231); @@ -250,7 +250,7 @@ writer = newWriter(dir, true); // add 100 documents addDocs(writer, 100); - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.close(); writer = newWriter(aux, true); @@ -272,7 +272,7 @@ assertTrue(false); } catch (IllegalArgumentException e) { - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); } writer.close(); @@ -297,7 +297,7 @@ addDocs(writer, 10); writer.addIndexesNoOptimize(new Directory[] { aux }); - assertEquals(1040, writer.docCount()); + assertEquals(1040, writer.maxDoc()); assertEquals(2, writer.getSegmentCount()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -321,7 +321,7 @@ addDocs(writer, 2); writer.addIndexesNoOptimize(new Directory[] { aux }); - assertEquals(1032, writer.docCount()); + assertEquals(1032, writer.maxDoc()); assertEquals(2, writer.getSegmentCount()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -344,7 +344,7 @@ writer.setMergeFactor(4); writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) }); - assertEquals(1060, writer.docCount()); + assertEquals(1060, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -373,7 +373,7 @@ writer.setMergeFactor(4); writer.addIndexesNoOptimize(new Directory[] { aux, new RAMDirectory(aux) }); - assertEquals(1020, writer.docCount()); + assertEquals(1020, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -395,7 +395,7 @@ writer.setMaxBufferedDocs(100); writer.setMergeFactor(10); writer.addIndexesNoOptimize(new Directory[] { aux }); - assertEquals(30, writer.docCount()); + assertEquals(30, writer.maxDoc()); assertEquals(3, writer.getSegmentCount()); writer.close(); @@ -418,7 +418,7 @@ writer.setMergeFactor(4); writer.addIndexesNoOptimize(new Directory[] { aux, aux2 }); - assertEquals(1025, writer.docCount()); + assertEquals(1025, writer.maxDoc()); assertEquals(1000, writer.getDocCount(0)); writer.close(); @@ -476,7 +476,7 @@ writer.setMaxBufferedDocs(1000); // add 1000 documents in 1 segment addDocs(writer, 1000); - assertEquals(1000, writer.docCount()); + assertEquals(1000, writer.maxDoc()); assertEquals(1, writer.getSegmentCount()); writer.close(); @@ -493,7 +493,7 @@ writer.setMaxBufferedDocs(100); writer.setMergeFactor(10); } - assertEquals(30, writer.docCount()); + assertEquals(30, writer.maxDoc()); assertEquals(3, writer.getSegmentCount()); writer.close(); } Index: src/test/org/apache/lucene/index/TestCrash.java =================================================================== --- src/test/org/apache/lucene/index/TestCrash.java (revision 825052) +++ src/test/org/apache/lucene/index/TestCrash.java (working copy) @@ -82,7 +82,7 @@ MockRAMDirectory dir = (MockRAMDirectory) writer.getDirectory(); writer.close(); writer = initIndex(dir); - assertEquals(314, writer.docCount()); + assertEquals(314, writer.maxDoc()); crash(writer); /* Index: src/test/org/apache/lucene/index/TestIndexReader.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexReader.java (revision 825052) +++ src/test/org/apache/lucene/index/TestIndexReader.java (working copy) @@ -1762,7 +1762,7 @@ } assertFalse(((SegmentReader) r.getSequentialSubReaders()[0]).termsIndexLoaded()); - assertEquals(-1, r.getTermInfosIndexDivisor()); + assertEquals(-1, ((SegmentReader) r.getSequentialSubReaders()[0]).getTermInfosIndexDivisor()); writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); writer.addDocument(doc); writer.close(); Index: src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (revision 825052) +++ src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (working copy) @@ -119,7 +119,8 @@ IndexWriter.MaxFieldLength.LIMITED); iw.setMaxBufferedDocs(5); iw.setMergeFactor(3); - iw.addIndexes(new Directory[] { dir1, dir2 }); + iw.addIndexesNoOptimize(new Directory[] { dir1, dir2 }); + iw.optimize(); iw.close(); norms1.addAll(norms); Index: src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexWriter.java (revision 825052) +++ src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -96,7 +96,7 @@ for (i = 0; i < 100; i++) { addDoc(writer); } - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.close(); // delete 40 documents @@ -108,7 +108,7 @@ // test doc count before segments are merged/index is optimized writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); - assertEquals(100, writer.docCount()); + assertEquals(100, writer.maxDoc()); writer.close(); reader = IndexReader.open(dir, true); @@ -156,7 +156,7 @@ /* Test: make sure when we run out of disk space or hit - random IOExceptions in any of the addIndexes(*) calls + random IOExceptions in any of the addIndexesNoOptimize(*) calls that 1) index is not corrupt (searcher can open/search it) and 2) transactional semantics are followed: either all or none of the incoming documents were in @@ -171,7 +171,7 @@ boolean debug = false; // Build up a bunch of dirs that have indexes which we - // will then merge together by calling addIndexes(*): + // will then merge together by calling addIndexesNoOptimize(*): Directory[] dirs = new Directory[NUM_DIR]; long inputDiskUsage = 0; for(int i=0;i