Index: lucene/src/java/org/apache/lucene/index/BufferedDeletes.java --- lucene/src/java/org/apache/lucene/index/BufferedDeletes.java Sat Jan 29 14:51:53 2011 -0500 +++ lucene/src/java/org/apache/lucene/index/BufferedDeletes.java Sat Jan 29 19:25:42 2011 -0500 @@ -18,21 +18,23 @@ */ import java.util.ArrayList; +import java.util.Iterator; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.SortedMap; import java.util.TreeMap; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicInteger; import org.apache.lucene.search.Query; import org.apache.lucene.util.RamUsageEstimator; +import org.apache.lucene.index.BufferedDeletesStream.QueryAndLimit; -/** Holds buffered deletes, by docID, term or query for a - * single segment. This is used to hold buffered pending - * deletes against the to-be-flushed segment as well as - * per-segment deletes for each segment in the index. */ +/* Holds buffered deletes, by docID, term or query for a + * single segment. This is used to hold buffered pending + * deletes against the to-be-flushed segment. Once the + * deletes are pushed (on flush in DocumentsWriter), these + * deletes are converted to a FrozenDeletes instance. */ // NOTE: we are sync'd by BufferedDeletes, ie, all access to // instances of this class is via sync'd methods on @@ -63,13 +65,8 @@ undercount (say 24 bytes). Integer is OBJ_HEADER + INT. */ final static int BYTES_PER_DEL_QUERY = 5*RamUsageEstimator.NUM_BYTES_OBJECT_REF + 2*RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2*RamUsageEstimator.NUM_BYTES_INT + 24; - // TODO: many of the deletes stored here will map to - // Integer.MAX_VALUE; we could be more efficient for this - // case ie use a SortedSet not a SortedMap. But: Java's - // SortedSet impls are simply backed by a Map so we won't - // save anything unless we do something custom... final AtomicInteger numTermDeletes = new AtomicInteger(); - final SortedMap terms = new TreeMap(); + final Map terms; final Map queries = new HashMap(); final List docIDs = new ArrayList(); @@ -81,6 +78,14 @@ long gen; + public BufferedDeletes(boolean sortTerms) { + if (sortTerms) { + terms = new TreeMap(); + } else { + terms = new HashMap(); + } + } + @Override public String toString() { if (VERBOSE_DELETES) { @@ -130,6 +135,26 @@ // should already be cleared } + void update(FrozenBufferedDeletes in) { + numTermDeletes.addAndGet(in.numTermDeletes); + for(Term term : in.terms) { + if (!terms.containsKey(term)) { + // only incr bytesUsed if this term wasn't already buffered: + bytesUsed.addAndGet(BYTES_PER_DEL_TERM); + } + terms.put(term, MAX_INT); + } + + for(int queryIdx=0;queryIdx termsIterable() { + return new Iterable() { + // @Override -- not until Java 1.6 + public Iterator iterator() { + return terms.keySet().iterator(); + } + }; + } + + public Iterable queriesIterable() { + return new Iterable() { + + // @Override -- not until Java 1.6 + public Iterator iterator() { + return new Iterator() { + private final Iterator> iter = queries.entrySet().iterator(); + + // @Override -- not until Java 1.6 + public boolean hasNext() { + return iter.hasNext(); + } + + // @Override -- not until Java 1.6 + public QueryAndLimit next() { + final Map.Entry ent = iter.next(); + return new QueryAndLimit(ent.getKey(), ent.getValue()); + } + + // @Override -- not until Java 1.6 + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + }; + } void clear() { terms.clear(); Index: lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java --- lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java Sat Jan 29 14:51:53 2011 -0500 +++ lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java Sat Jan 29 19:25:42 2011 -0500 @@ -22,7 +22,6 @@ import java.util.List; import java.util.ArrayList; import java.util.Date; -import java.util.Map.Entry; import java.util.Comparator; import java.util.Collections; import java.util.concurrent.atomic.AtomicInteger; @@ -49,10 +48,11 @@ * track which BufferedDeletes packets to apply to any given * segment. */ +// nocommit -- rename to deletes stream class BufferedDeletesStream { // TODO: maybe linked list? - private final List deletes = new ArrayList(); + private final List deletes = new ArrayList(); // Starts at 1 so that SegmentInfos that have never had // deletes applied (whose bufferedDelGen defaults to 0) @@ -83,13 +83,13 @@ // Appends a new packet of buffered deletes to the stream, // setting its generation: - public synchronized void push(BufferedDeletes packet) { + public synchronized void push(FrozenBufferedDeletes packet) { assert packet.any(); assert checkDeleteStats(); - packet.gen = nextGen++; + assert packet.gen < nextGen; deletes.add(packet); - numTerms.addAndGet(packet.numTermDeletes.get()); - bytesUsed.addAndGet(packet.bytesUsed.get()); + numTerms.addAndGet(packet.numTermDeletes); + bytesUsed.addAndGet(packet.bytesUsed); if (infoStream != null) { message("push deletes " + packet + " delGen=" + packet.gen + " packetCount=" + deletes.size()); } @@ -182,14 +182,14 @@ while (infosIDX >= 0) { //System.out.println("BD: cycle delIDX=" + delIDX + " infoIDX=" + infosIDX); - final BufferedDeletes packet = delIDX >= 0 ? deletes.get(delIDX) : null; + final FrozenBufferedDeletes packet = delIDX >= 0 ? deletes.get(delIDX) : null; final SegmentInfo info = infos2.get(infosIDX); final long segGen = info.getBufferedDeletesGen(); if (packet != null && segGen < packet.gen) { //System.out.println(" coalesce"); if (coalescedDeletes == null) { - coalescedDeletes = new BufferedDeletes(); + coalescedDeletes = new BufferedDeletes(true); } coalescedDeletes.update(packet); delIDX--; @@ -202,25 +202,25 @@ int delCount = 0; try { if (coalescedDeletes != null) { - delCount += applyDeletes(coalescedDeletes, reader); + //System.out.println(" del coalesced"); + delCount += applyTermDeletes(coalescedDeletes.termsIterable(), reader); + delCount += applyQueryDeletes(coalescedDeletes.queriesIterable(), reader); } - delCount += applyDeletes(packet, reader); + //System.out.println(" del exact"); + // Don't delete by Term here; DocumentsWriter + // already did that on flush: + delCount += applyQueryDeletes(packet.queriesIterable(), reader); } finally { readerPool.release(reader); } anyNewDeletes |= delCount > 0; - // We've applied doc ids, and they're only applied - // on the current segment - bytesUsed.addAndGet(-packet.docIDs.size() * BufferedDeletes.BYTES_PER_DEL_DOCID); - packet.clearDocIDs(); - if (infoStream != null) { message("seg=" + info + " segGen=" + segGen + " segDeletes=[" + packet + "]; coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount); } if (coalescedDeletes == null) { - coalescedDeletes = new BufferedDeletes(); + coalescedDeletes = new BufferedDeletes(true); } coalescedDeletes.update(packet); delIDX--; @@ -236,7 +236,8 @@ SegmentReader reader = readerPool.get(info, false); int delCount = 0; try { - delCount += applyDeletes(coalescedDeletes, reader); + delCount += applyTermDeletes(coalescedDeletes.termsIterable(), reader); + delCount += applyQueryDeletes(coalescedDeletes.queriesIterable(), reader); } finally { readerPool.release(reader); } @@ -301,121 +302,122 @@ message("pruneDeletes: prune " + count + " packets; " + (deletes.size() - count) + " packets remain"); } for(int delIDX=0;delIDX= 0; - bytesUsed.addAndGet(-packet.bytesUsed.get()); + bytesUsed.addAndGet(-packet.bytesUsed); assert bytesUsed.get() >= 0; } deletes.subList(0, count).clear(); } } - private synchronized long applyDeletes(BufferedDeletes deletes, SegmentReader reader) throws IOException { + // Delete by Term + private synchronized long applyTermDeletes(Iterable termsIter, SegmentReader reader) throws IOException { + long delCount = 0; + Fields fields = reader.fields(); + if (fields == null) { + // This reader has no postings + return 0; + } - long delCount = 0; + TermsEnum termsEnum = null; + + String currentField = null; + DocsEnum docs = null; + + assert checkDeleteTerm(null); - assert checkDeleteTerm(null); - - if (deletes.terms.size() > 0) { - Fields fields = reader.fields(); - if (fields == null) { - // This reader has no postings - return 0; + for (Term term : termsIter) { + // Since we visit terms sorted, we gain performance + // by re-using the same TermsEnum and seeking only + // forwards + if (term.field() != currentField) { + assert currentField == null || currentField.compareTo(term.field()) < 0; + currentField = term.field(); + Terms terms = fields.terms(currentField); + if (terms != null) { + termsEnum = terms.iterator(); + } else { + termsEnum = null; + } } - TermsEnum termsEnum = null; - - String currentField = null; - DocsEnum docs = null; - - for (Entry entry: deletes.terms.entrySet()) { - Term term = entry.getKey(); - // Since we visit terms sorted, we gain performance - // by re-using the same TermsEnum and seeking only - // forwards - if (term.field() != currentField) { - assert currentField == null || currentField.compareTo(term.field()) < 0; - currentField = term.field(); - Terms terms = fields.terms(currentField); - if (terms != null) { - termsEnum = terms.iterator(); - } else { - termsEnum = null; - } - } + if (termsEnum == null) { + continue; + } + assert checkDeleteTerm(term); + + // System.out.println(" term=" + term); - if (termsEnum == null) { - continue; - } - assert checkDeleteTerm(term); - - if (termsEnum.seek(term.bytes(), false) == TermsEnum.SeekStatus.FOUND) { - DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs); + if (termsEnum.seek(term.bytes(), false) == TermsEnum.SeekStatus.FOUND) { + DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs); - if (docsEnum != null) { - docs = docsEnum; - final int limit = entry.getValue(); - while (true) { - final int docID = docs.nextDoc(); - if (docID == DocsEnum.NO_MORE_DOCS || docID >= limit) { - break; - } - reader.deleteDocument(docID); - // TODO: we could/should change - // reader.deleteDocument to return boolean - // true if it did in fact delete, because here - // we could be deleting an already-deleted doc - // which makes this an upper bound: - delCount++; + if (docsEnum != null) { + while (true) { + final int docID = docsEnum.nextDoc(); + if (docID == DocsEnum.NO_MORE_DOCS) { + break; } + reader.deleteDocument(docID); + // TODO: we could/should change + // reader.deleteDocument to return boolean + // true if it did in fact delete, because here + // we could be deleting an already-deleted doc + // which makes this an upper bound: + delCount++; } } } } - // Delete by docID - for (Integer docIdInt : deletes.docIDs) { - int docID = docIdInt.intValue(); - reader.deleteDocument(docID); - delCount++; + return delCount; + } + + public static class QueryAndLimit { + public final Query query; + public final int limit; + public QueryAndLimit(Query query, int limit) { + this.query = query; + this.limit = limit; } + } - // Delete by query - if (deletes.queries.size() > 0) { - IndexSearcher searcher = new IndexSearcher(reader); - assert searcher.getTopReaderContext().isAtomic; - final AtomicReaderContext readerContext = (AtomicReaderContext) searcher.getTopReaderContext(); - try { - for (Entry entry : deletes.queries.entrySet()) { - Query query = entry.getKey(); - int limit = entry.getValue().intValue(); - Weight weight = query.weight(searcher); - Scorer scorer = weight.scorer(readerContext, Weight.ScorerContext.def()); - if (scorer != null) { - while(true) { - int doc = scorer.nextDoc(); - if (doc >= limit) - break; + // Delete by query + private synchronized long applyQueryDeletes(Iterable queriesIter, SegmentReader reader) throws IOException { + long delCount = 0; + IndexSearcher searcher = new IndexSearcher(reader); + assert searcher.getTopReaderContext().isAtomic; + final AtomicReaderContext readerContext = (AtomicReaderContext) searcher.getTopReaderContext(); + try { + for (QueryAndLimit ent : queriesIter) { + Query query = ent.query; + int limit = ent.limit; + Weight weight = query.weight(searcher); + Scorer scorer = weight.scorer(readerContext, Weight.ScorerContext.def()); + if (scorer != null) { + while(true) { + int doc = scorer.nextDoc(); + if (doc >= limit) + break; - reader.deleteDocument(doc); - // TODO: we could/should change - // reader.deleteDocument to return boolean - // true if it did in fact delete, because here - // we could be deleting an already-deleted doc - // which makes this an upper bound: - delCount++; - } + reader.deleteDocument(doc); + // TODO: we could/should change + // reader.deleteDocument to return boolean + // true if it did in fact delete, because here + // we could be deleting an already-deleted doc + // which makes this an upper bound: + delCount++; } } - } finally { - searcher.close(); } + } finally { + searcher.close(); } return delCount; } - + // used only by assert private boolean checkDeleteTerm(Term term) { if (term != null) { @@ -429,9 +431,9 @@ private boolean checkDeleteStats() { int numTerms2 = 0; long bytesUsed2 = 0; - for(BufferedDeletes packet : deletes) { - numTerms2 += packet.numTermDeletes.get(); - bytesUsed2 += packet.bytesUsed.get(); + for(FrozenBufferedDeletes packet : deletes) { + numTerms2 += packet.numTermDeletes; + bytesUsed2 += packet.bytesUsed; } assert numTerms2 == numTerms.get(): "numTerms2=" + numTerms2 + " vs " + numTerms.get(); assert bytesUsed2 == bytesUsed.get(): "bytesUsed2=" + bytesUsed2 + " vs " + bytesUsed; Index: lucene/src/java/org/apache/lucene/index/DocumentsWriter.java --- lucene/src/java/org/apache/lucene/index/DocumentsWriter.java Sat Jan 29 14:51:53 2011 -0500 +++ lucene/src/java/org/apache/lucene/index/DocumentsWriter.java Sat Jan 29 19:25:42 2011 -0500 @@ -35,9 +35,11 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMFile; import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.BitVector; +import org.apache.lucene.util.RamUsageEstimator; import org.apache.lucene.util.RecyclingByteBlockAllocator; import org.apache.lucene.util.ThreadInterruptedException; -import org.apache.lucene.util.RamUsageEstimator; + import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_MASK; import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SIZE; @@ -133,8 +135,9 @@ // this, they wait for others to finish first private final int maxThreadStates; + // TODO: cutover to BytesRefHash // Deletes for our still-in-RAM (to be flushed next) segment - private BufferedDeletes pendingDeletes = new BufferedDeletes(); + private BufferedDeletes pendingDeletes = new BufferedDeletes(false); static class DocState { DocumentsWriter docWriter; @@ -336,6 +339,9 @@ return doFlush; } + // TODO: we could check w/ FreqProxTermsWriter: if the + // term doesn't exist, don't bother buffering into the + // per-DWPT map (but still must go into the global map) boolean deleteTerm(Term term, boolean skipWait) { final boolean doFlush = flushControl.waitUpdate(0, 1, skipWait); synchronized(this) { @@ -507,17 +513,19 @@ private void pushDeletes(SegmentInfo newSegment, SegmentInfos segmentInfos) { // Lock order: DW -> BD + final long delGen = bufferedDeletesStream.getNextGen(); if (pendingDeletes.any()) { if (segmentInfos.size() > 0 || newSegment != null) { + final FrozenBufferedDeletes packet = new FrozenBufferedDeletes(pendingDeletes, delGen); if (infoStream != null) { message("flush: push buffered deletes"); } - bufferedDeletesStream.push(pendingDeletes); + bufferedDeletesStream.push(packet); if (infoStream != null) { - message("flush: delGen=" + pendingDeletes.gen); + message("flush: delGen=" + packet.gen); } if (newSegment != null) { - newSegment.setBufferedDeletesGen(pendingDeletes.gen); + newSegment.setBufferedDeletesGen(packet.gen); } } else { if (infoStream != null) { @@ -527,9 +535,9 @@ // there are no segments, the deletions cannot // affect anything. } - pendingDeletes = new BufferedDeletes(); + pendingDeletes.clear(); } else if (newSegment != null) { - newSegment.setBufferedDeletesGen(bufferedDeletesStream.getNextGen()); + newSegment.setBufferedDeletesGen(delGen); } } @@ -580,7 +588,19 @@ final SegmentWriteState flushState = new SegmentWriteState(infoStream, directory, segment, fieldInfos, numDocs, writer.getConfig().getTermIndexInterval(), - SegmentCodecs.build(fieldInfos, writer.codecs)); + SegmentCodecs.build(fieldInfos, writer.codecs), + pendingDeletes); + // Apply delete-by-docID now (delete-byDocID only + // happens when an exception is hit processing that + // doc, eg if analyzer has some problem w/ the text): + if (pendingDeletes.docIDs.size() > 0) { + flushState.deletedDocs = new BitVector(numDocs); + for(int delDocID : pendingDeletes.docIDs) { + flushState.deletedDocs.set(delDocID); + } + pendingDeletes.bytesUsed.addAndGet(-pendingDeletes.docIDs.size() * BufferedDeletes.BYTES_PER_DEL_DOCID); + pendingDeletes.docIDs.clear(); + } newSegment = new SegmentInfo(segment, numDocs, directory, false, fieldInfos.hasProx(), flushState.segmentCodecs, false); @@ -592,10 +612,14 @@ double startMBUsed = bytesUsed()/1024./1024.; consumer.flush(threads, flushState); + newSegment.setHasVectors(flushState.hasVectors); if (infoStream != null) { message("new segment has " + (flushState.hasVectors ? "vectors" : "no vectors")); + if (flushState.deletedDocs != null) { + message("new segment has " + flushState.deletedDocs.count() + " deleted docs"); + } message("flushedFiles=" + newSegment.files()); message("flushed codecs=" + newSegment.getSegmentCodecs()); } @@ -616,6 +640,30 @@ newSegment.setUseCompoundFile(true); } + // Must write deleted docs after the CFS so we don't + // slurp the del file into CFS: + if (flushState.deletedDocs != null) { + final int delCount = flushState.deletedDocs.count(); + assert delCount > 0; + newSegment.setDelCount(delCount); + newSegment.advanceDelGen(); + final String delFileName = newSegment.getDelFileName(); + boolean success2 = false; + try { + flushState.deletedDocs.write(directory, delFileName); + success2 = true; + } finally { + if (!success2) { + try { + directory.deleteFile(delFileName); + } catch (Throwable t) { + // suppress this so we keep throwing the + // original exception + } + } + } + } + if (infoStream != null) { message("flush: segment=" + newSegment); final double newSegmentSizeNoStore = newSegment.sizeInBytes(false)/1024./1024.; Index: lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java --- lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java Sat Jan 29 14:51:53 2011 -0500 +++ lucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java Sat Jan 29 19:25:42 2011 -0500 @@ -26,8 +26,9 @@ import org.apache.lucene.index.codecs.FieldsConsumer; import org.apache.lucene.index.codecs.PostingsConsumer; +import org.apache.lucene.index.codecs.TermStats; import org.apache.lucene.index.codecs.TermsConsumer; -import org.apache.lucene.index.codecs.TermStats; +import org.apache.lucene.util.BitVector; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CollectionUtil; @@ -108,7 +109,7 @@ // If this field has postings then add them to the // segment - appendPostings(fields, consumer); + appendPostings(fieldName, state, fields, consumer); for(int i=0;i ent : deletes.queries.entrySet()) { + queries[upto] = ent.getKey(); + queryLimits[upto] = ent.getValue(); + upto++; + } + bytesUsed = terms.length * BYTES_PER_DEL_TERM + queries.length * BYTES_PER_DEL_QUERY; + numTermDeletes = deletes.numTermDeletes.get(); + this.gen = gen; + } + + public Iterable termsIterable() { + return new Iterable() { + // @Override -- not until Java 1.6 + public Iterator iterator() { + return new Iterator() { + private int upto; + + // @Override -- not until Java 1.6 + public boolean hasNext() { + return upto < terms.length; + } + + // @Override -- not until Java 1.6 + public Term next() { + return terms[upto++]; + } + + // @Override -- not until Java 1.6 + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + }; + } + + public Iterable queriesIterable() { + return new Iterable() { + // @Override -- not until Java 1.6 + public Iterator iterator() { + return new Iterator() { + private int upto; + + // @Override -- not until Java 1.6 + public boolean hasNext() { + return upto < queries.length; + } + + // @Override -- not until Java 1.6 + public QueryAndLimit next() { + QueryAndLimit ret = new QueryAndLimit(queries[upto], queryLimits[upto]); + upto++; + return ret; + } + + // @Override -- not until Java 1.6 + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + }; + } + + @Override + public String toString() { + String s = ""; + if (numTermDeletes != 0) { + s += " " + numTermDeletes + " deleted terms (unique count=" + terms.length + ")"; + } + if (queries.length != 0) { + s += " " + queries.length + " deleted queries"; + } + if (bytesUsed != 0) { + s += " bytesUsed=" + bytesUsed; + } + + return s; + } + + boolean any() { + return terms.length > 0 || queries.length > 0; + } +} Index: lucene/src/java/org/apache/lucene/index/SegmentMerger.java --- lucene/src/java/org/apache/lucene/index/SegmentMerger.java Sat Jan 29 14:51:53 2011 -0500 +++ lucene/src/java/org/apache/lucene/index/SegmentMerger.java Sat Jan 29 19:25:42 2011 -0500 @@ -266,7 +266,7 @@ // details. throw new RuntimeException("mergeFields produced an invalid result: docCount is " + docCount + " but fdx file size is " + fdxFileLength + " file=" + fileName + " file exists?=" + directory.fileExists(fileName) + "; now aborting this merge to prevent index corruption"); - segmentWriteState = new SegmentWriteState(null, directory, segment, fieldInfos, docCount, termIndexInterval, codecInfo); + segmentWriteState = new SegmentWriteState(null, directory, segment, fieldInfos, docCount, termIndexInterval, codecInfo, null); return docCount; } Index: lucene/src/java/org/apache/lucene/index/SegmentWriteState.java --- lucene/src/java/org/apache/lucene/index/SegmentWriteState.java Sat Jan 29 14:51:53 2011 -0500 +++ lucene/src/java/org/apache/lucene/index/SegmentWriteState.java Sat Jan 29 19:25:42 2011 -0500 @@ -20,6 +20,7 @@ import java.io.PrintStream; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BitVector; /** * @lucene.experimental @@ -32,6 +33,16 @@ public final int numDocs; public boolean hasVectors; + // Deletes to apply while we are flushing the segment. A + // Term is enrolled in here if it was deleted at one + // point, and it's mapped to the docIDUpto, meaning any + // docID < docIDUpto containing this term should be + // deleted. + public final BufferedDeletes segDeletes; + + // Lazily created: + public BitVector deletedDocs; + final SegmentCodecs segmentCodecs; public final String codecId; @@ -57,8 +68,9 @@ public SegmentWriteState(PrintStream infoStream, Directory directory, String segmentName, FieldInfos fieldInfos, - int numDocs, int termIndexInterval, SegmentCodecs segmentCodecs) { + int numDocs, int termIndexInterval, SegmentCodecs segmentCodecs, BufferedDeletes segDeletes) { this.infoStream = infoStream; + this.segDeletes = segDeletes; this.directory = directory; this.segmentName = segmentName; this.fieldInfos = fieldInfos; @@ -80,5 +92,6 @@ termIndexInterval = state.termIndexInterval; segmentCodecs = state.segmentCodecs; this.codecId = codecId; + segDeletes = state.segDeletes; } } Index: lucene/src/test/org/apache/lucene/index/TestAddIndexes.java --- lucene/src/test/org/apache/lucene/index/TestAddIndexes.java Sat Jan 29 14:51:53 2011 -0500 +++ lucene/src/test/org/apache/lucene/index/TestAddIndexes.java Sat Jan 29 19:25:42 2011 -0500 @@ -157,6 +157,7 @@ setUpDirs(dir, aux); IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND)); + writer.setInfoStream(VERBOSE ? System.out : null); writer.addIndexes(aux); // Adds 10 docs, then replaces them with another 10 Index: lucene/src/test/org/apache/lucene/index/TestCodecs.java --- lucene/src/test/org/apache/lucene/index/TestCodecs.java Sat Jan 29 14:51:53 2011 -0500 +++ lucene/src/test/org/apache/lucene/index/TestCodecs.java Sat Jan 29 19:25:42 2011 -0500 @@ -589,7 +589,7 @@ final int termIndexInterval = _TestUtil.nextInt(random, 13, 27); final SegmentCodecs codecInfo = SegmentCodecs.build(fieldInfos, CodecProvider.getDefault()); - final SegmentWriteState state = new SegmentWriteState(null, dir, SEGMENT, fieldInfos, 10000, termIndexInterval, codecInfo); + final SegmentWriteState state = new SegmentWriteState(null, dir, SEGMENT, fieldInfos, 10000, termIndexInterval, codecInfo, null); final FieldsConsumer consumer = state.segmentCodecs.codec().fieldsConsumer(state); Arrays.sort(fields); Index: lucene/src/test/org/apache/lucene/index/TestIndexWriter.java --- lucene/src/test/org/apache/lucene/index/TestIndexWriter.java Sat Jan 29 14:51:53 2011 -0500 +++ lucene/src/test/org/apache/lucene/index/TestIndexWriter.java Sat Jan 29 19:25:42 2011 -0500 @@ -2576,7 +2576,7 @@ count++; } } - assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500); + assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 1500); } w.close(); dir.close(); Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java --- lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Sat Jan 29 14:51:53 2011 -0500 +++ lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Sat Jan 29 19:25:42 2011 -0500 @@ -157,8 +157,6 @@ assertEquals(0, modifier.getSegmentCount()); modifier.commit(); - modifier.commit(); - IndexReader reader = IndexReader.open(dir, true); assertEquals(1, reader.numDocs());