Index: src/test/org/apache/lucene/index/TestIndexWriterDelete.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexWriterDelete.java (revision 788310) +++ src/test/org/apache/lucene/index/TestIndexWriterDelete.java (working copy) @@ -264,6 +264,138 @@ } } + // test deleteAll() + public void testDeleteAll() throws IOException { + for (int pass=0;pass<2;pass++) { + boolean autoCommit = (0==pass); + Directory dir = new MockRAMDirectory(); + IndexWriter modifier = new IndexWriter(dir, autoCommit, + new WhitespaceAnalyzer(), true); + modifier.setMaxBufferedDocs(2); + modifier.setMaxBufferedDeleteTerms(2); + + int id = 0; + int value = 100; + + for (int i = 0; i < 7; i++) { + addDoc(modifier, ++id, value); + } + modifier.commit(); + + IndexReader reader = IndexReader.open(dir); + assertEquals(7, reader.numDocs()); + reader.close(); + + // Add 1 doc (so we will have something buffered) + addDoc(modifier, 99, value); + + // Delete all + modifier.deleteAll(); + + // Delete all shouldn't be on disk yet + reader = IndexReader.open(dir); + assertEquals(7, reader.numDocs()); + reader.close(); + + // Add 2 new docs (after the deleteAll, before the commit) + addDoc(modifier, 101, value); + addDoc(modifier, 102, value); + + // commit the delete all + modifier.commit(); + + // Validate there are no docs left + reader = IndexReader.open(dir); + assertEquals(2, reader.numDocs()); + reader.close(); + + modifier.close(); + dir.close(); + } + } + + // test rollback of deleteAll() + public void testDeleteAllRollback() throws IOException { + Directory dir = new MockRAMDirectory(); + IndexWriter modifier = new IndexWriter(dir, false, + new WhitespaceAnalyzer(), true); + modifier.setMaxBufferedDocs(2); + modifier.setMaxBufferedDeleteTerms(2); + + int id = 0; + int value = 100; + + for (int i = 0; i < 7; i++) { + addDoc(modifier, ++id, value); + } + modifier.commit(); + + addDoc(modifier, ++id, value); + + IndexReader reader = IndexReader.open(dir); + assertEquals(7, reader.numDocs()); + reader.close(); + + // Delete all + modifier.deleteAll(); + + // Roll it back + modifier.rollback(); + modifier.close(); + + // Validate that the docs are still there + reader = IndexReader.open(dir); + assertEquals(7, reader.numDocs()); + reader.close(); + + dir.close(); + } + + + // test deleteAll() w/ near real-time reader + public void testDeleteAllNRT() throws IOException { + Directory dir = new MockRAMDirectory(); + IndexWriter modifier = new IndexWriter(dir, false, + new WhitespaceAnalyzer(), true); + modifier.setMaxBufferedDocs(2); + modifier.setMaxBufferedDeleteTerms(2); + + int id = 0; + int value = 100; + + for (int i = 0; i < 7; i++) { + addDoc(modifier, ++id, value); + } + modifier.commit(); + + IndexReader reader = modifier.getReader(); + assertEquals(7, reader.numDocs()); + reader.close(); + + addDoc(modifier, ++id, value); + addDoc(modifier, ++id, value); + + // Delete all + modifier.deleteAll(); + + reader = modifier.getReader(); + assertEquals(0, reader.numDocs()); + reader.close(); + + + // Roll it back + modifier.rollback(); + modifier.close(); + + // Validate that the docs are still there + reader = IndexReader.open(dir); + assertEquals(7, reader.numDocs()); + reader.close(); + + dir.close(); + } + + private void addDoc(IndexWriter modifier, int id, int value) throws IOException { Document doc = new Document(); Index: src/java/org/apache/lucene/index/IndexWriter.java =================================================================== --- src/java/org/apache/lucene/index/IndexWriter.java (revision 788310) +++ src/java/org/apache/lucene/index/IndexWriter.java (working copy) @@ -3236,6 +3236,53 @@ closeInternal(false); } + /** + * Delete all documents in the index. + * + *

This method will drop all buffered documents and will + * remove all segments from the index. This change will not be + * visible until a {@link #commit()} has been called. This method + * can be rolled back using {@link #rollback()}.

+ * + *

NOTE: this method is much faster than using deleteDocuments( new MatchAllDocsQuery() ).

+ * + *

NOTE: this method will forcefully abort all merges + * in progress. If other threads are running {@link + * #optimize()} or any of the addIndexes methods, they + * will receive {@link MergePolicy.MergeAbortedException}s. + */ + public synchronized void deleteAll() throws IOException { + docWriter.pauseAllThreads(); + try { + + // Abort any running merges + finishMerges(false); + + // Remove any buffered docs + docWriter.abort(); + + // Remove all segments + segmentInfos.clear(); + + // Ask deleter to locate unreferenced files & remove them: + deleter.checkpoint(segmentInfos, false); + deleter.refresh(); + + // Don't bother saving any changes in our segmentInfos + readerPool.clear(null); + + // Mark that the index has changed + ++changeCount; + } catch (OutOfMemoryError oom) { + handleOOM(oom, "deleteAll"); + } finally { + docWriter.resumeAllThreads(); + if (infoStream != null) { + message("hit exception during deleteAll"); + } + } + } + private synchronized void finishMerges(boolean waitForMerges) throws IOException { if (!waitForMerges) {