Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java (revision 0) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java (revision 0) @@ -0,0 +1,525 @@ +package org.apache.lucene.index; + +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; + +import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.MockDirectoryWrapper; +import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util._TestUtil; + +import static org.apache.lucene.index.TestIndexWriter.assertNoUnreferencedFiles; + +/** + * Tests for IndexWriter when the disk runs out of space + */ +public class TestIndexWriterOnDiskFull extends LuceneTestCase { + + /* + * Make sure IndexWriter cleans up on hitting a disk + * full exception in addDocument. + * TODO: how to do this on windows with FSDirectory? + */ + public void testAddDocumentOnDiskFull() throws IOException { + + for(int pass=0;pass<2;pass++) { + if (VERBOSE) + System.out.println("TEST: pass=" + pass); + boolean doAbort = pass == 1; + long diskFree = 200; + while(true) { + if (VERBOSE) + System.out.println("TEST: cycle: diskFree=" + diskFree); + MockDirectoryWrapper dir = new MockDirectoryWrapper(new RAMDirectory()); + dir.setMaxSizeInBytes(diskFree); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); + MergeScheduler ms = writer.getConfig().getMergeScheduler(); + if (ms instanceof ConcurrentMergeScheduler) + // This test intentionally produces exceptions + // in the threads that CMS launches; we don't + // want to pollute test output with these. + ((ConcurrentMergeScheduler) ms).setSuppressExceptions(); + + boolean hitError = false; + try { + for(int i=0;i<200;i++) { + addDoc(writer); + } + writer.commit(); + } catch (IOException e) { + if (VERBOSE) { + System.out.println("TEST: exception on addDoc"); + e.printStackTrace(System.out); + } + hitError = true; + } + + if (hitError) { + if (doAbort) { + writer.rollback(); + } else { + try { + writer.close(); + } catch (IOException e) { + if (VERBOSE) { + System.out.println("TEST: exception on close"); + e.printStackTrace(System.out); + } + dir.setMaxSizeInBytes(0); + writer.close(); + } + } + + //_TestUtil.syncConcurrentMerges(ms); + + if (_TestUtil.anyFilesExceptWriteLock(dir)) { + assertNoUnreferencedFiles(dir, "after disk full during addDocument"); + + // Make sure reader can open the index: + IndexReader.open(dir, true).close(); + } + + dir.close(); + // Now try again w/ more space: + + diskFree += 500; + } else { + //_TestUtil.syncConcurrentMerges(writer); + writer.close(); + dir.close(); + break; + } + } + } + } + + /* + Test: make sure when we run out of disk space or hit + random IOExceptions in any of the addIndexes(*) calls + that 1) index is not corrupt (searcher can open/search + it) and 2) transactional semantics are followed: + either all or none of the incoming documents were in + fact added. + */ + public void testAddIndexOnDiskFull() throws IOException + { + int START_COUNT = 57; + int NUM_DIR = 50; + int END_COUNT = START_COUNT + NUM_DIR*25; + + // Build up a bunch of dirs that have indexes which we + // will then merge together by calling addIndexes(*): + Directory[] dirs = new Directory[NUM_DIR]; + long inputDiskUsage = 0; + for(int i=0;i= 2.0) { + rate /= 2; + } + if (diskRatio >= 4.0) { + rate /= 2; + } + if (diskRatio >= 6.0) { + rate = 0.0; + } + if (VERBOSE) + testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes"; + } else { + thisDiskFree = 0; + rate = 0.0; + if (VERBOSE) + testName = "disk full test " + methodName + " with unlimited disk space"; + } + + if (VERBOSE) + System.out.println("\ncycle: " + testName); + + dir.setTrackDiskUsage(true); + dir.setMaxSizeInBytes(thisDiskFree); + dir.setRandomIOExceptionRate(rate, diskFree); + + try { + + if (0 == method) { + writer.addIndexes(dirs); + writer.optimize(); + } else if (1 == method) { + IndexReader readers[] = new IndexReader[dirs.length]; + for(int i=0;i= 2.0) { - rate /= 2; - } - if (diskRatio >= 4.0) { - rate /= 2; - } - if (diskRatio >= 6.0) { - rate = 0.0; - } - if (VERBOSE) - testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes"; - } else { - thisDiskFree = 0; - rate = 0.0; - if (VERBOSE) - testName = "disk full test " + methodName + " with unlimited disk space"; - } - - if (VERBOSE) - System.out.println("\ncycle: " + testName); - - dir.setTrackDiskUsage(true); - dir.setMaxSizeInBytes(thisDiskFree); - dir.setRandomIOExceptionRate(rate, diskFree); - - try { - - if (0 == method) { - writer.addIndexes(dirs); - writer.optimize(); - } else if (1 == method) { - IndexReader readers[] = new IndexReader[dirs.length]; - for(int i=0;i= 5) - break; - } else { - if (noErrors) { - System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected IOException:"); - ioe.printStackTrace(System.out); - error = ioe; - } - break; - } - } catch (Throwable t) { - //t.printStackTrace(System.out); - if (noErrors) { - System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected Throwable:"); - t.printStackTrace(System.out); - error = t; - } - break; - } - } while(System.currentTimeMillis() < stopTime); - } - } - - // LUCENE-1130: make sure we can close() even while - // threads are trying to add documents. Strictly - // speaking, this isn't valid us of Lucene's APIs, but we - // still want to be robust to this case: - public void testCloseWithThreads() throws Exception { - int NUM_THREADS = 3; - - for(int iter=0;iter<7;iter++) { - Directory dir = newDirectory(); - IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()) - .setMaxBufferedDocs(10).setMergeScheduler(new ConcurrentMergeScheduler()); - // We expect AlreadyClosedException - ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions(); - IndexWriter writer = new IndexWriter(dir, conf); - ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4); - - IndexerThread[] threads = new IndexerThread[NUM_THREADS]; - - for(int i=0;i 0) { - done = true; - break; - } - } - - writer.close(false); - - // Make sure threads that are adding docs are not hung: - for(int i=0;i 0); - reader.close(); - - dir.close(); - } - } - - // LUCENE-1130: make sure immeidate disk full on creating - // an IndexWriter (hit during DW.ThreadState.init()) is - // OK: - public void testImmediateDiskFull() throws IOException { - MockDirectoryWrapper dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()) - .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler())); - dir.setMaxSizeInBytes(Math.max(1, dir.getRecomputedActualSizeInBytes())); - final Document doc = new Document(); - doc.add(newField("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - try { - writer.addDocument(doc); - fail("did not hit disk full"); - } catch (IOException ioe) { - } - // Without fix for LUCENE-1130: this call will hang: - try { - writer.addDocument(doc); - fail("did not hit disk full"); - } catch (IOException ioe) { - } - try { - writer.close(false); - fail("did not hit disk full"); - } catch (IOException ioe) { - } - - // Make sure once disk space is avail again, we can - // cleanly close: - dir.setMaxSizeInBytes(0); - writer.close(false); - dir.close(); - } - - // LUCENE-1130: make sure immediate disk full on creating - // an IndexWriter (hit during DW.ThreadState.init()), with - // multiple threads, is OK: - public void testImmediateDiskFullWithThreads() throws Exception { - - int NUM_THREADS = 3; - - for(int iter=0;iter<10;iter++) { - MockDirectoryWrapper dir = newDirectory(); - IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()) - .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()); - // We expect disk full exceptions in the merge threads - ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions(); - IndexWriter writer = new IndexWriter(dir, conf); - ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4); - dir.setMaxSizeInBytes(4*1024+20*iter); - - IndexerThread[] threads = new IndexerThread[NUM_THREADS]; - - for(int i=0;i= 5) + break; + } else { + if (noErrors) { + System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected IOException:"); + ioe.printStackTrace(System.out); + error = ioe; + } + break; + } + } catch (Throwable t) { + //t.printStackTrace(System.out); + if (noErrors) { + System.out.println(Thread.currentThread().getName() + ": ERROR: unexpected Throwable:"); + t.printStackTrace(System.out); + error = t; + } + break; + } + } while(System.currentTimeMillis() < stopTime); + } + } + + // LUCENE-1130: make sure immediate disk full on creating + // an IndexWriter (hit during DW.ThreadState.init()), with + // multiple threads, is OK: + public void testImmediateDiskFullWithThreads() throws Exception { + + int NUM_THREADS = 3; + + for(int iter=0;iter<10;iter++) { + MockDirectoryWrapper dir = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()) + .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()); + // We expect disk full exceptions in the merge threads + ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions(); + IndexWriter writer = new IndexWriter(dir, conf); + ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4); + dir.setMaxSizeInBytes(4*1024+20*iter); + + IndexerThread[] threads = new IndexerThread[NUM_THREADS]; + + for(int i=0;i 0) { + done = true; + break; + } + } + + writer.close(false); + + // Make sure threads that are adding docs are not hung: + for(int i=0;i 0); + reader.close(); + + dir.close(); + } + } + + // Runs test, with multiple threads, using the specific + // failure to trigger an IOException + public void _testMultipleThreadsFailure(MockDirectoryWrapper.Failure failure) throws Exception { + + int NUM_THREADS = 3; + + for(int iter=0;iter<2;iter++) { + MockDirectoryWrapper dir = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, + new MockAnalyzer()).setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()); + // We expect disk full exceptions in the merge threads + ((ConcurrentMergeScheduler) conf.getMergeScheduler()).setSuppressExceptions(); + IndexWriter writer = new IndexWriter(dir, conf); + ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4); + + IndexerThread[] threads = new IndexerThread[NUM_THREADS]; + + for(int i=0;i