Index: lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java --- lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java Mon Feb 14 11:08:15 2011 -0500 +++ lucene/contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java Mon Feb 14 17:44:51 2011 -0500 @@ -21,6 +21,7 @@ import org.apache.lucene.util.Version; import org.apache.lucene.util.LuceneTestCase; import org.junit.AfterClass; +import org.junit.Assume; import org.junit.BeforeClass; /** * Licensed to the Apache Software Foundation (ASF) under one or more @@ -186,6 +187,7 @@ } public void testDuplicateFilterQueryXML() throws ParserException, IOException { + Assume.assumeTrue(searcher.getIndexReader().getSequentialSubReaders().length == 1); Query q=parse("DuplicateFilterQuery.xml"); int h = searcher.search(q, null, 1000).totalHits; assertEquals("DuplicateFilterQuery should produce 1 result ", 1,h); Index: lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java --- lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java Mon Feb 14 11:08:15 2011 -0500 +++ lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java Mon Feb 14 17:44:51 2011 -0500 @@ -121,9 +121,13 @@ // Current gen, for the merged segment: public final long gen; - ApplyDeletesResult(boolean anyDeletes, long gen) { + // If non-null, contains segments that are 100% deleted + public final SegmentInfos allDeleted; + + ApplyDeletesResult(boolean anyDeletes, long gen, SegmentInfos allDeleted) { this.anyDeletes = anyDeletes; this.gen = gen; + this.allDeleted = allDeleted; } } @@ -154,14 +158,14 @@ final long t0 = System.currentTimeMillis(); if (infos.size() == 0) { - return new ApplyDeletesResult(false, nextGen++); + return new ApplyDeletesResult(false, nextGen++, null); } assert checkDeleteStats(); if (!any()) { message("applyDeletes: no deletes; skipping"); - return new ApplyDeletesResult(false, nextGen++); + return new ApplyDeletesResult(false, nextGen++, null); } if (infoStream != null) { @@ -178,6 +182,8 @@ int infosIDX = infos2.size()-1; int delIDX = deletes.size()-1; + SegmentInfos allDeleted = null; + while (infosIDX >= 0) { //System.out.println("BD: cycle delIDX=" + delIDX + " infoIDX=" + infosIDX); @@ -199,6 +205,7 @@ assert readerPool.infoIsLive(info); SegmentReader reader = readerPool.get(info, false); int delCount = 0; + final boolean segAllDeletes; try { if (coalescedDeletes != null) { //System.out.println(" del coalesced"); @@ -209,13 +216,21 @@ // Don't delete by Term here; DocumentsWriter // already did that on flush: delCount += applyQueryDeletes(packet.queriesIterable(), reader); + segAllDeletes = reader.numDocs() == 0; } finally { readerPool.release(reader); } anyNewDeletes |= delCount > 0; + if (segAllDeletes) { + if (allDeleted == null) { + allDeleted = new SegmentInfos(); + } + allDeleted.add(info); + } + if (infoStream != null) { - message("seg=" + info + " segGen=" + segGen + " segDeletes=[" + packet + "]; coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount); + message("seg=" + info + " segGen=" + segGen + " segDeletes=[" + packet + "]; coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount + (segAllDeletes ? " 100% deleted" : "")); } if (coalescedDeletes == null) { @@ -234,16 +249,25 @@ assert readerPool.infoIsLive(info); SegmentReader reader = readerPool.get(info, false); int delCount = 0; + final boolean segAllDeletes; try { delCount += applyTermDeletes(coalescedDeletes.termsIterable(), reader); delCount += applyQueryDeletes(coalescedDeletes.queriesIterable(), reader); + segAllDeletes = reader.numDocs() == 0; } finally { readerPool.release(reader); } anyNewDeletes |= delCount > 0; + if (segAllDeletes) { + if (allDeleted == null) { + allDeleted = new SegmentInfos(); + } + allDeleted.add(info); + } + if (infoStream != null) { - message("seg=" + info + " segGen=" + segGen + " coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount); + message("seg=" + info + " segGen=" + segGen + " coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount + (segAllDeletes ? " 100% deleted" : "")); } } info.setBufferedDeletesGen(nextGen); @@ -258,7 +282,7 @@ } // assert infos != segmentInfos || !any() : "infos=" + infos + " segmentInfos=" + segmentInfos + " any=" + any; - return new ApplyDeletesResult(anyNewDeletes, nextGen++); + return new ApplyDeletesResult(anyNewDeletes, nextGen++, allDeleted); } public synchronized long getNextGen() { Index: lucene/src/java/org/apache/lucene/index/DocumentsWriter.java --- lucene/src/java/org/apache/lucene/index/DocumentsWriter.java Mon Feb 14 11:08:15 2011 -0500 +++ lucene/src/java/org/apache/lucene/index/DocumentsWriter.java Mon Feb 14 17:44:51 2011 -0500 @@ -650,6 +650,11 @@ final String delFileName = newSegment.getDelFileName(); boolean success2 = false; try { + // TODO: in the NRT case it'd be better to hand + // this del vector over to the + // shortly-to-be-opened SegmentReader and let it + // carry the changes; there's no reason to use + // filesystem as intermediary here. flushState.deletedDocs.write(directory, delFileName); success2 = true; } finally { Index: lucene/src/java/org/apache/lucene/index/IndexWriter.java --- lucene/src/java/org/apache/lucene/index/IndexWriter.java Mon Feb 14 11:08:15 2011 -0500 +++ lucene/src/java/org/apache/lucene/index/IndexWriter.java Mon Feb 14 17:44:51 2011 -0500 @@ -388,8 +388,7 @@ private final Map readerMap = new HashMap(); - /** Forcefully clear changes for the specified segments, - * and remove from the pool. This is called on successful merge. */ + /** Forcefully clear changes for the specified segments. This is called on successful merge. */ synchronized void clear(SegmentInfos infos) throws IOException { if (infos == null) { for (Map.Entry ent: readerMap.entrySet()) { @@ -397,8 +396,9 @@ } } else { for (final SegmentInfo info: infos) { - if (readerMap.containsKey(info)) { - readerMap.get(info).hasChanges = false; + final SegmentReader r = readerMap.get(info); + if (r != null) { + r.hasChanges = false; } } } @@ -407,8 +407,8 @@ // used only by asserts public synchronized boolean infoIsLive(SegmentInfo info) { int idx = segmentInfos.indexOf(info); - assert idx != -1; - assert segmentInfos.get(idx) == info; + assert idx != -1: "info=" + info + " isn't in pool"; + assert segmentInfos.get(idx) == info: "info=" + info + " doesn't match live info in segmentInfos"; return true; } @@ -478,6 +478,21 @@ return false; } + + public synchronized void drop(SegmentInfos infos) throws IOException { + for(SegmentInfo info : infos) { + drop(info); + } + } + + public synchronized void drop(SegmentInfo info) throws IOException { + final SegmentReader sr = readerMap.get(info); + if (sr != null) { + sr.hasChanges = false; + readerMap.remove(info); + sr.close(); + } + } /** Remove all our references to readers, and commits * any pending changes. */ @@ -528,7 +543,6 @@ if (sr.hasChanges) { assert infoIsLive(sr.getSegmentInfo()); sr.doCommit(null); - // Must checkpoint w/ deleter, because this // segment reader will have created new _X_N.del // file. @@ -2558,6 +2572,18 @@ if (result.anyDeletes) { checkpoint(); } + if (!keepFullyDeletedSegments && result.allDeleted != null) { + if (infoStream != null) { + message("drop 100% deleted segments: " + result.allDeleted); + } + for(SegmentInfo info : result.allDeleted) { + segmentInfos.remove(info); + } + checkpoint(); + if (readerPool != null) { + readerPool.drop(result.allDeleted); + } + } bufferedDeletesStream.prune(segmentInfos); assert !bufferedDeletesStream.any(); flushControl.clearDeletes(); @@ -2634,9 +2660,13 @@ SegmentInfo info = sourceSegments.info(i); minGen = Math.min(info.getBufferedDeletesGen(), minGen); int docCount = info.docCount; - SegmentReader previousReader = merge.readersClone[i]; + final SegmentReader previousReader = merge.readerClones.get(i); + if (previousReader == null) { + // Reader was skipped because it was 100% deletions + continue; + } final Bits prevDelDocs = previousReader.getDeletedDocs(); - SegmentReader currentReader = merge.readers[i]; + final SegmentReader currentReader = merge.readers.get(i); final Bits currentDelDocs = currentReader.getDeletedDocs(); if (previousReader.hasDeletions()) { @@ -2719,18 +2749,21 @@ return false; } - ensureValidMerge(merge); - commitMergedDeletes(merge, mergedReader); // If the doc store we are using has been closed and // is in now compound format (but wasn't when we // started), then we will switch to the compound // format as well: - setMergeDocStoreIsCompoundFile(merge); assert !segmentInfos.contains(merge.info); + final boolean allDeleted = mergedReader.numDocs() == 0; + + if (infoStream != null && allDeleted) { + message("merged segment " + merge.info + " is 100% deleted" + (keepFullyDeletedSegments ? "" : "; skipping insert")); + } + final Set mergedAway = new HashSet(merge.segments); int segIdx = 0; int newSegIdx = 0; @@ -2739,7 +2772,7 @@ while(segIdx < curSegCount) { final SegmentInfo info = segmentInfos.info(segIdx++); if (mergedAway.contains(info)) { - if (!inserted) { + if (!inserted && (!allDeleted || keepFullyDeletedSegments)) { segmentInfos.set(segIdx-1, merge.info); inserted = true; newSegIdx++; @@ -2748,7 +2781,20 @@ segmentInfos.set(newSegIdx++, info); } } - assert newSegIdx == curSegCount - merge.segments.size() + 1; + + // Either we found place to insert segment, or, we did + // not, but only because all segments we merged became + // deleted while we are merging, in which case it should + // be the case that the new segment is also all deleted: + if (!inserted) { + assert allDeleted; + if (keepFullyDeletedSegments) { + segmentInfos.add(0, merge.info); + } else { + readerPool.drop(merge.info); + } + } + segmentInfos.subList(newSegIdx, segmentInfos.size()).clear(); if (infoStream != null) { @@ -2770,7 +2816,6 @@ // cascade the optimize: segmentsToOptimize.add(merge.info); } - return true; } @@ -2913,8 +2958,9 @@ // is running (while synchronized) to avoid race // condition where two conflicting merges from different // threads, start - for(int i=0;i BD final BufferedDeletesStream.ApplyDeletesResult result = bufferedDeletesStream.applyDeletes(readerPool, merge.segments); + if (result.anyDeletes) { checkpoint(); } + if (!keepFullyDeletedSegments && result.allDeleted != null) { + if (infoStream != null) { + message("drop 100% deleted segments: " + result.allDeleted); + } + for(SegmentInfo info : result.allDeleted) { + segmentInfos.remove(info); + if (merge.segments.contains(info)) { + mergingSegments.remove(info); + merge.segments.remove(info); + } + } + checkpoint(); + if (readerPool != null) { + readerPool.drop(result.allDeleted); + } + } + merge.info.setBufferedDeletesGen(result.gen); // Lock order: IW -> BD @@ -3032,23 +3096,6 @@ runningMerges.remove(merge); } - private synchronized void setMergeDocStoreIsCompoundFile(MergePolicy.OneMerge merge) { - final String mergeDocStoreSegment = merge.info.getDocStoreSegment(); - if (mergeDocStoreSegment != null && !merge.info.getDocStoreIsCompoundFile()) { - final int size = segmentInfos.size(); - for(int i=0;i 0) { + merger.add(clone); + } totDocCount += clone.numDocs(); + segUpto++; } if (infoStream != null) { - message("merge: total "+totDocCount+" docs"); + message("merge: total " + totDocCount + " docs"); } merge.checkAborted(directory); @@ -3160,11 +3235,11 @@ if (infoStream != null) { message("merge segmentCodecs=" + merger.getSegmentCodecs()); - message("merge store matchedCount=" + merger.getMatchedSubReaderCount() + " vs " + numSegments); + message("merge store matchedCount=" + merger.getMatchedSubReaderCount() + " vs " + merge.readers.size()); } - anyNonBulkMerges |= merger.getMatchedSubReaderCount() != numSegments; + anyNonBulkMerges |= merger.getMatchedSubReaderCount() != merge.readers.size(); - assert mergedDocCount == totDocCount; + assert mergedDocCount == totDocCount: "mergedDocCount=" + mergedDocCount + " vs " + totDocCount; // Very important to do this before opening the reader // because codec must know if prox was written for @@ -3405,9 +3480,6 @@ readerPool.commit(); toSync = (SegmentInfos) segmentInfos.clone(); - if (!keepFullyDeletedSegments) { - toSync.pruneDeletedSegments(); - } assert filesExist(toSync); Index: lucene/src/java/org/apache/lucene/index/MergePolicy.java --- lucene/src/java/org/apache/lucene/index/MergePolicy.java Mon Feb 14 11:08:15 2011 -0500 +++ lucene/src/java/org/apache/lucene/index/MergePolicy.java Mon Feb 14 17:44:51 2011 -0500 @@ -72,8 +72,8 @@ long mergeGen; // used by IndexWriter boolean isExternal; // used by IndexWriter int maxNumSegmentsOptimize; // used by IndexWriter - SegmentReader[] readers; // used by IndexWriter - SegmentReader[] readersClone; // used by IndexWriter + List readers; // used by IndexWriter + List readerClones; // used by IndexWriter public final SegmentInfos segments; boolean aborted; Throwable error; Index: lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java --- lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java Mon Feb 14 11:08:15 2011 -0500 +++ lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java Mon Feb 14 17:44:51 2011 -0500 @@ -102,6 +102,17 @@ } } + public void updateDocument(Term t, Document doc) throws IOException { + w.updateDocument(t, doc); + if (docCount++ == flushAt) { + if (LuceneTestCase.VERBOSE) { + System.out.println("RIW.updateDocument: now doing a commit"); + } + w.commit(); + flushAt += _TestUtil.nextInt(r, 10, 1000); + } + } + public void addIndexes(Directory... dirs) throws CorruptIndexException, IOException { w.addIndexes(dirs); } @@ -127,17 +138,21 @@ } public IndexReader getReader() throws IOException { + return getReader(true); + } + + public IndexReader getReader(boolean applyDeletions) throws IOException { getReaderCalled = true; if (r.nextInt(4) == 2) w.optimize(); // If we are writing with PreFlexRW, force a full // IndexReader.open so terms are sorted in codepoint // order during searching: - if (!w.codecs.getDefaultFieldCodec().equals("PreFlex") && r.nextBoolean()) { + if (!applyDeletions || !w.codecs.getDefaultFieldCodec().equals("PreFlex") && r.nextBoolean()) { if (LuceneTestCase.VERBOSE) { System.out.println("RIW.getReader: use NRT reader"); } - return w.getReader(); + return w.getReader(applyDeletions); } else { if (LuceneTestCase.VERBOSE) { System.out.println("RIW.getReader: open new reader"); Index: lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java --- lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java Mon Feb 14 11:08:15 2011 -0500 +++ lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java Mon Feb 14 17:44:51 2011 -0500 @@ -2,14 +2,13 @@ import java.io.IOException; import java.util.Random; -import java.lang.reflect.Method; import junit.framework.Assert; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; +import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.MultiReader; @@ -19,6 +18,7 @@ import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.ReaderUtil; +import org.apache.lucene.util._TestUtil; import static org.apache.lucene.util.LuceneTestCase.TEST_VERSION_CURRENT; @@ -172,16 +172,7 @@ } w.commit(); w.deleteDocuments( new MatchAllDocsQuery() ); - try { - // Carefully invoke what is a package-private (test - // only, internal) method on IndexWriter: - Method m = IndexWriter.class.getDeclaredMethod("keepFullyDeletedSegments"); - m.setAccessible(true); - m.invoke(w); - } catch (Exception e) { - // Should not happen? - throw new RuntimeException(e); - } + _TestUtil.keepFullyDeletedSegments(w); w.commit(); if (0 < numDeletedDocs) Index: lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java --- lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java Mon Feb 14 11:08:15 2011 -0500 +++ lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java Mon Feb 14 17:44:51 2011 -0500 @@ -1243,7 +1243,7 @@ } @Override - public String toString() { + public synchronized String toString() { return "RandomCodecProvider: " + previousMappings.toString(); } } Index: lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java --- lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java Mon Feb 14 11:08:15 2011 -0500 +++ lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java Mon Feb 14 17:44:51 2011 -0500 @@ -25,6 +25,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.io.PrintStream; +import java.lang.reflect.Method; import java.util.Enumeration; import java.util.Random; import java.util.Map; @@ -305,4 +306,17 @@ }); Assert.assertEquals("Reflection does not produce same map", reflectedValues, map); } + + public static void keepFullyDeletedSegments(IndexWriter w) { + try { + // Carefully invoke what is a package-private (test + // only, internal) method on IndexWriter: + Method m = IndexWriter.class.getDeclaredMethod("keepFullyDeletedSegments"); + m.setAccessible(true); + m.invoke(w); + } catch (Exception e) { + // Should not happen? + throw new RuntimeException(e); + } + } } Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java --- lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Mon Feb 14 11:08:15 2011 -0500 +++ lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Mon Feb 14 17:44:51 2011 -0500 @@ -81,7 +81,7 @@ IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2) .setMaxBufferedDeleteTerms(2)); - + modifier.setInfoStream(VERBOSE ? System.out : null); int id = 0; int value = 100; Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java --- lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java Mon Feb 14 11:08:15 2011 -0500 +++ lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java Mon Feb 14 17:44:51 2011 -0500 @@ -464,11 +464,11 @@ setReaderPooling(true). setMergePolicy(newLogMergePolicy(2)) ); + _TestUtil.keepFullyDeletedSegments(w); Document doc = new Document(); doc.add(newField("f", "doctor who", Field.Store.YES, Field.Index.ANALYZED)); w.addDocument(doc); - w.commit(); w.deleteDocuments(new Term("f", "who")); Index: lucene/src/test/org/apache/lucene/index/TestMultiFields.java --- lucene/src/test/org/apache/lucene/index/TestMultiFields.java Mon Feb 14 11:08:15 2011 -0500 +++ lucene/src/test/org/apache/lucene/index/TestMultiFields.java Mon Feb 14 17:44:51 2011 -0500 @@ -32,6 +32,7 @@ Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(NoMergePolicy.COMPOUND_FILES)); + _TestUtil.keepFullyDeletedSegments(w); Map> docs = new HashMap>(); Set deleted = new HashSet(); Index: lucene/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java --- lucene/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java Mon Feb 14 11:08:15 2011 -0500 +++ lucene/src/test/org/apache/lucene/index/TestNRTReaderWithThreads.java Mon Feb 14 17:44:51 2011 -0500 @@ -36,6 +36,7 @@ setMaxBufferedDocs(10). setMergePolicy(newLogMergePolicy(false,2)) ); + writer.setInfoStream(VERBOSE ? System.out : null); IndexReader reader = writer.getReader(); // start pooling readers reader.close(); RunThread[] indexThreads = new RunThread[4]; Index: lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java Mon Feb 14 17:44:51 2011 -0500 @@ -0,0 +1,75 @@ +package org.apache.lucene.index; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.document.*; +import org.apache.lucene.store.*; +import org.apache.lucene.util.*; +import org.junit.Test; + +public class TestRollingUpdates extends LuceneTestCase { + + // Just updates the same set of N docs over and over, to + // stress out deletions + + @Test + public void testRollingUpdates() throws Exception { + final Directory dir = newDirectory(); + + final LineFileDocs docs = new LineFileDocs(random); + + final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + final int SIZE = 200 * RANDOM_MULTIPLIER; + int id = 0; + IndexReader r = null; + final int numUpdates = (int) (SIZE * (2+random.nextDouble())); + for(int docIter=0;docIter= SIZE && random.nextInt(50) == 17) { + if (r != null) { + r.close(); + } + final boolean applyDeletions = random.nextBoolean(); + r = w.getReader(applyDeletions); + assertTrue("applyDeletions=" + applyDeletions + " r.numDocs()=" + r.numDocs() + " vs SIZE=" + SIZE, !applyDeletions || r.numDocs() == SIZE); + } + } + + if (r != null) { + r.close(); + } + + w.commit(); + assertEquals(SIZE, w.numDocs()); + + w.close(); + docs.close(); + + dir.close(); + } +} Index: lucene/src/test/org/apache/lucene/search/TestCachingSpanFilter.java --- lucene/src/test/org/apache/lucene/search/TestCachingSpanFilter.java Mon Feb 14 11:08:15 2011 -0500 +++ lucene/src/test/org/apache/lucene/search/TestCachingSpanFilter.java Mon Feb 14 17:44:51 2011 -0500 @@ -29,6 +29,7 @@ import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util._TestUtil; public class TestCachingSpanFilter extends LuceneTestCase { @@ -73,7 +74,9 @@ docs = searcher.search(constantScore, 1); assertEquals("[just filter] Should find a hit...", 1, docs.totalHits); - // now delete the doc, refresh the reader, and see that it's not there + // now delete the doc, refresh the reader, and see that + // it's not there + _TestUtil.keepFullyDeletedSegments(writer.w); writer.deleteDocuments(new Term("id", "1")); reader = refreshReader(reader); Index: lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java --- lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java Mon Feb 14 11:08:15 2011 -0500 +++ lucene/src/test/org/apache/lucene/search/TestCachingWrapperFilter.java Mon Feb 14 17:44:51 2011 -0500 @@ -22,8 +22,8 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexReader.AtomicReaderContext; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.SerialMergeScheduler; import org.apache.lucene.index.SlowMultiReaderWrapper; @@ -32,6 +32,7 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.OpenBitSet; import org.apache.lucene.util.OpenBitSetDISI; +import org.apache.lucene.util._TestUtil; public class TestCachingWrapperFilter extends LuceneTestCase { @@ -196,6 +197,7 @@ assertEquals("[just filter] Should find a hit...", 1, docs.totalHits); // now delete the doc, refresh the reader, and see that it's not there + _TestUtil.keepFullyDeletedSegments(writer.w); writer.deleteDocuments(new Term("id", "1")); reader = refreshReader(reader);