Index: lucene/src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (revision 1031076) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -339,117 +339,6 @@ } } - // Simulate a writer that crashed while writing segments - // file: make sure we can still open the index (ie, - // gracefully fallback to the previous segments file), - // and that we can add to the index: - public void testSimulatedCrashedWriter() throws IOException { - MockDirectoryWrapper dir = newDirectory(); - dir.setPreventDoubleWrite(false); - - IndexWriter writer = null; - - writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); - - // add 100 documents - for (int i = 0; i < 100; i++) { - addDoc(writer); - } - - // close - writer.close(); - - long gen = SegmentInfos.getCurrentSegmentGeneration(dir); - assertTrue("segment generation should be > 0 but got " + gen, gen > 0); - - // Make the next segments file, with last byte - // missing, to simulate a writer that crashed while - // writing segments file: - String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir); - String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, - "", - 1+gen); - IndexInput in = dir.openInput(fileNameIn); - IndexOutput out = dir.createOutput(fileNameOut); - long length = in.length(); - for(int i=0;i 0 but got " + gen, gen > 0); - - String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir); - String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, - "", - 1+gen); - IndexInput in = dir.openInput(fileNameIn); - IndexOutput out = dir.createOutput(fileNameOut); - long length = in.length(); - for(int i=0;i 0 but got " + gen, gen > 0); - - String[] files = dir.listAll(); - boolean corrupted = false; - for(int i=0;i= 30) { - doFail = false; - throw new IOException("now failing during flush"); - } - } - } - } - - // LUCENE-1072: make sure an errant exception on flushing - // one segment only takes out those docs in that one flush - public void testDocumentsWriterAbort() throws IOException { - MockDirectoryWrapper dir = newDirectory(); - FailOnlyOnFlush failure = new FailOnlyOnFlush(); - failure.setDoFail(); - dir.failOn(failure); - - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)); - Document doc = new Document(); - String contents = "aa bb cc dd ee ff gg hh ii jj kk"; - doc.add(newField("content", contents, Field.Store.NO, - Field.Index.ANALYZED)); - boolean hitError = false; - for(int i=0;i<200;i++) { - try { - writer.addDocument(doc); - } catch (IOException ioe) { - // only one flush should fail: - assertFalse(hitError); - hitError = true; - } - } - assertTrue(hitError); - writer.close(); - IndexReader reader = IndexReader.open(dir, true); - assertEquals(198, reader.docFreq(new Term("content", "aa"))); - reader.close(); - dir.close(); - } - - private class CrashingFilter extends TokenFilter { - String fieldName; - int count; - - public CrashingFilter(String fieldName, TokenStream input) { - super(input); - this.fieldName = fieldName; - } - - @Override - public boolean incrementToken() throws IOException { - if (this.fieldName.equals("crash") && count++ >= 4) - throw new IOException("I'm experiencing problems"); - return input.incrementToken(); - } - - @Override - public void reset() throws IOException { - super.reset(); - count = 0; - } - } - - public void testDocumentsWriterExceptions() throws IOException { - Analyzer analyzer = new Analyzer() { - @Override - public TokenStream tokenStream(String fieldName, Reader reader) { - return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); - } - }; - - for(int i=0;i<2;i++) { - MockDirectoryWrapper dir = newDirectory(); - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)); - //writer.setInfoStream(System.out); - Document doc = new Document(); - doc.add(newField("contents", "here are some contents", Field.Store.YES, - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - writer.addDocument(doc); - writer.addDocument(doc); - doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES, - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - doc.add(newField("other", "this will not get indexed", Field.Store.YES, - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - try { - writer.addDocument(doc); - fail("did not hit expected exception"); - } catch (IOException ioe) { - } - - if (0 == i) { - doc = new Document(); - doc.add(newField("contents", "here are some contents", Field.Store.YES, - Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - writer.addDocument(doc); - writer.addDocument(doc); - } - writer.close(); - - IndexReader reader = IndexReader.open(dir, true); - int expected = 3+(1-i)*2; - assertEquals(expected, reader.docFreq(new Term("contents", "here"))); - assertEquals(expected, reader.maxDoc()); - int numDel = 0; - final Bits delDocs = MultiFields.getDeletedDocs(reader); - assertNotNull(delDocs); - for(int j=0;j 0 but got " + gen, gen > 0); - - final String segmentsFileName = SegmentInfos.getCurrentSegmentFileName(dir); - IndexInput in = dir.openInput(segmentsFileName); - IndexOutput out = dir.createOutput(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen)); - out.copyBytes(in, in.length()-1); - byte b = in.readByte(); - out.writeByte((byte) (1+b)); - out.close(); - in.close(); - - IndexReader reader = null; - try { - reader = IndexReader.open(dir, true); - } catch (IOException e) { - e.printStackTrace(System.out); - fail("segmentInfos failed to retry fallback to correct segments_N file"); - } - reader.close(); - dir.close(); - } - // LUCENE-1044: test writer.commit() when ac=false public void testForceCommit() throws IOException { Directory dir = newDirectory(); @@ -1919,56 +1376,6 @@ dir.close(); } - // Throws IOException during MockDirectoryWrapper.sync - private static class FailOnlyInSync extends MockDirectoryWrapper.Failure { - boolean didFail; - @Override - public void eval(MockDirectoryWrapper dir) throws IOException { - if (doFail) { - StackTraceElement[] trace = new Exception().getStackTrace(); - for (int i = 0; i < trace.length; i++) { - if (doFail && "org.apache.lucene.store.MockDirectoryWrapper".equals(trace[i].getClassName()) && "sync".equals(trace[i].getMethodName())) { - didFail = true; - throw new IOException("now failing on purpose during sync"); - } - } - } - } - } - - // LUCENE-1044: test exception during sync - public void testExceptionDuringSync() throws IOException { - MockDirectoryWrapper dir = newDirectory(); - FailOnlyInSync failure = new FailOnlyInSync(); - dir.failOn(failure); - - IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()) - .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler())); - failure.setDoFail(); - ((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5); - - for (int i = 0; i < 23; i++) { - addDoc(writer); - if ((i-1)%2 == 0) { - try { - writer.commit(); - } catch (IOException ioe) { - // expected - } - } - } - - ((ConcurrentMergeScheduler) writer.getConfig().getMergeScheduler()).sync(); - assertTrue(failure.didFail); - failure.clearDoFail(); - writer.close(); - - IndexReader reader = IndexReader.open(dir, true); - assertEquals(23, reader.numDocs()); - reader.close(); - dir.close(); - } - // LUCENE-1168 public void testTermVectorCorruption() throws IOException { @@ -2268,123 +1675,14 @@ dir.close(); } - // LUCENE-1198 + + private static final class MockIndexWriter extends IndexWriter { public MockIndexWriter(Directory dir, IndexWriterConfig conf) throws IOException { super(dir, conf); } - boolean doFail; - - @Override - boolean testPoint(String name) { - if (doFail && name.equals("DocumentsWriter.ThreadState.init start")) - throw new RuntimeException("intentionally failing"); - return true; - } - } - - - public void testExceptionDocumentsWriterInit() throws IOException { - Directory dir = newDirectory(); - MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); - Document doc = new Document(); - doc.add(newField("field", "a field", Field.Store.YES, - Field.Index.ANALYZED)); - w.addDocument(doc); - w.doFail = true; - try { - w.addDocument(doc); - fail("did not hit exception"); - } catch (RuntimeException re) { - // expected - } - w.close(); - _TestUtil.checkIndex(dir); - dir.close(); - } - - // LUCENE-1208 - public void testExceptionJustBeforeFlush() throws IOException { - Directory dir = newDirectory(); - MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)); - Document doc = new Document(); - doc.add(newField("field", "a field", Field.Store.YES, - Field.Index.ANALYZED)); - w.addDocument(doc); - - Analyzer analyzer = new Analyzer() { - @Override - public TokenStream tokenStream(String fieldName, Reader reader) { - return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); - } - }; - - Document crashDoc = new Document(); - crashDoc.add(newField("crash", "do it on token 4", Field.Store.YES, - Field.Index.ANALYZED)); - try { - w.addDocument(crashDoc, analyzer); - fail("did not hit expected exception"); - } catch (IOException ioe) { - // expected - } - w.addDocument(doc); - w.close(); - dir.close(); - } - - private static final class MockIndexWriter2 extends IndexWriter { - - public MockIndexWriter2(Directory dir, IndexWriterConfig conf) throws IOException { - super(dir, conf); - } - - boolean doFail; - boolean failed; - - @Override - boolean testPoint(String name) { - if (doFail && name.equals("startMergeInit")) { - failed = true; - throw new RuntimeException("intentionally failing"); - } - return true; - } - } - - - // LUCENE-1210 - public void testExceptionOnMergeInit() throws IOException { - Directory dir = newDirectory(); - IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()) - .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()); - ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2); - MockIndexWriter2 w = new MockIndexWriter2(dir, conf); - w.doFail = true; - Document doc = new Document(); - doc.add(newField("field", "a field", Field.Store.YES, - Field.Index.ANALYZED)); - for(int i=0;i<10;i++) - try { - w.addDocument(doc); - } catch (RuntimeException re) { - break; - } - - ((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).sync(); - assertTrue(w.failed); - w.close(); - dir.close(); - } - - private static final class MockIndexWriter3 extends IndexWriter { - - public MockIndexWriter3(Directory dir, IndexWriterConfig conf) throws IOException { - super(dir, conf); - } - boolean afterWasCalled; boolean beforeWasCalled; @@ -2403,7 +1701,7 @@ // LUCENE-1222 public void testDoBeforeAfterFlush() throws IOException { Directory dir = newDirectory(); - MockIndexWriter3 w = new MockIndexWriter3(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); + MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); Document doc = new Document(); doc.add(newField("field", "a field", Field.Store.YES, Field.Index.ANALYZED)); @@ -2427,58 +1725,8 @@ dir.close(); } - private static class FailOnlyInCommit extends MockDirectoryWrapper.Failure { - boolean fail1, fail2; - - @Override - public void eval(MockDirectoryWrapper dir) throws IOException { - StackTraceElement[] trace = new Exception().getStackTrace(); - boolean isCommit = false; - boolean isDelete = false; - for (int i = 0; i < trace.length; i++) { - if ("org.apache.lucene.index.SegmentInfos".equals(trace[i].getClassName()) && "prepareCommit".equals(trace[i].getMethodName())) - isCommit = true; - if ("org.apache.lucene.store.MockDirectoryWrapper".equals(trace[i].getClassName()) && "deleteFile".equals(trace[i].getMethodName())) - isDelete = true; - } - - if (isCommit) { - if (!isDelete) { - fail1 = true; - throw new RuntimeException("now fail first"); - } else { - fail2 = true; - throw new IOException("now fail during delete"); - } - } - } - } - - // LUCENE-1214 - public void testExceptionsDuringCommit() throws Throwable { - MockDirectoryWrapper dir = newDirectory(); - FailOnlyInCommit failure = new FailOnlyInCommit(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); - Document doc = new Document(); - doc.add(newField("field", "a field", Field.Store.YES, - Field.Index.ANALYZED)); - w.addDocument(doc); - dir.failOn(failure); - try { - w.close(); - fail(); - } catch (IOException ioe) { - fail("expected only RuntimeException"); - } catch (RuntimeException re) { - // Expected - } - assertTrue(failure.fail1 && failure.fail2); - w.rollback(); - dir.close(); - } - final String[] utf8Data = new String[] { // unpaired low surrogate "ab\udc17cd", "ab\ufffdcd", @@ -2870,318 +2118,6 @@ dir.close(); } - private abstract class RunAddIndexesThreads { - - Directory dir, dir2; - final static int NUM_INIT_DOCS = 17; - IndexWriter writer2; - final List failures = new ArrayList(); - volatile boolean didClose; - final IndexReader[] readers; - final int NUM_COPY; - final static int NUM_THREADS = 5; - final Thread[] threads = new Thread[NUM_THREADS]; - - public RunAddIndexesThreads(int numCopy) throws Throwable { - NUM_COPY = numCopy; - dir = new MockDirectoryWrapper(new RAMDirectory()); - IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( - TEST_VERSION_CURRENT, new MockAnalyzer()) - .setMaxBufferedDocs(2)); - for (int i = 0; i < NUM_INIT_DOCS; i++) - addDoc(writer); - writer.close(); - - dir2 = new MockDirectoryWrapper(new RAMDirectory()); - writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); - writer2.commit(); - - readers = new IndexReader[NUM_COPY]; - for(int i=0;i thrown = new ArrayList(); - final Directory dir = newDirectory(); - final IndexWriter writer = new IndexWriter(dir, - newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())) { - @Override - public void message(final String message) { - if (message.startsWith("now flush at close") && 0 == thrown.size()) { - thrown.add(null); - throw new OutOfMemoryError("fake OOME at " + message); - } - } - }; - - // need to set an info stream so message is called - writer.setInfoStream(new PrintStream(new ByteArrayOutputStream())); - try { - writer.close(); - fail("OutOfMemoryError expected"); - } - catch (final OutOfMemoryError expected) {} - - // throws IllegalStateEx w/o bug fix - writer.close(); - dir.close(); - } - // LUCENE-1442 public void testDoubleOffsetCounting() throws Exception { Directory dir = newDirectory(); Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (revision 1031073) +++ lucene/src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (working copy) @@ -17,16 +17,32 @@ * limitations under the License. */ +import java.util.ArrayList; +import java.util.List; import java.util.Random; +import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.io.PrintStream; +import java.io.Reader; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util._TestUtil; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.MockDirectoryWrapper; +import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.analysis.MockTokenizer; +import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexWriterConfig.OpenMode; public class TestIndexWriterExceptions extends LuceneTestCase { @@ -214,4 +230,861 @@ _TestUtil.checkIndex(dir); dir.close(); } + + // LUCENE-1198 + private static final class MockIndexWriter2 extends IndexWriter { + + public MockIndexWriter2(Directory dir, IndexWriterConfig conf) throws IOException { + super(dir, conf); + } + + boolean doFail; + + @Override + boolean testPoint(String name) { + if (doFail && name.equals("DocumentsWriter.ThreadState.init start")) + throw new RuntimeException("intentionally failing"); + return true; + } + } + + private class CrashingFilter extends TokenFilter { + String fieldName; + int count; + + public CrashingFilter(String fieldName, TokenStream input) { + super(input); + this.fieldName = fieldName; + } + + @Override + public boolean incrementToken() throws IOException { + if (this.fieldName.equals("crash") && count++ >= 4) + throw new IOException("I'm experiencing problems"); + return input.incrementToken(); + } + + @Override + public void reset() throws IOException { + super.reset(); + count = 0; + } + } + + public void testExceptionDocumentsWriterInit() throws IOException { + Directory dir = newDirectory(); + MockIndexWriter2 w = new MockIndexWriter2(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); + Document doc = new Document(); + doc.add(newField("field", "a field", Field.Store.YES, + Field.Index.ANALYZED)); + w.addDocument(doc); + w.doFail = true; + try { + w.addDocument(doc); + fail("did not hit exception"); + } catch (RuntimeException re) { + // expected + } + w.close(); + _TestUtil.checkIndex(dir); + dir.close(); + } + + // LUCENE-1208 + public void testExceptionJustBeforeFlush() throws IOException { + Directory dir = newDirectory(); + MockIndexWriter w = new MockIndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)); + Document doc = new Document(); + doc.add(newField("field", "a field", Field.Store.YES, + Field.Index.ANALYZED)); + w.addDocument(doc); + + Analyzer analyzer = new Analyzer() { + @Override + public TokenStream tokenStream(String fieldName, Reader reader) { + return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); + } + }; + + Document crashDoc = new Document(); + crashDoc.add(newField("crash", "do it on token 4", Field.Store.YES, + Field.Index.ANALYZED)); + try { + w.addDocument(crashDoc, analyzer); + fail("did not hit expected exception"); + } catch (IOException ioe) { + // expected + } + w.addDocument(doc); + w.close(); + dir.close(); + } + + private static final class MockIndexWriter3 extends IndexWriter { + + public MockIndexWriter3(Directory dir, IndexWriterConfig conf) throws IOException { + super(dir, conf); + } + + boolean doFail; + boolean failed; + + @Override + boolean testPoint(String name) { + if (doFail && name.equals("startMergeInit")) { + failed = true; + throw new RuntimeException("intentionally failing"); + } + return true; + } + } + + + // LUCENE-1210 + public void testExceptionOnMergeInit() throws IOException { + Directory dir = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()) + .setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()); + ((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2); + MockIndexWriter3 w = new MockIndexWriter3(dir, conf); + w.doFail = true; + Document doc = new Document(); + doc.add(newField("field", "a field", Field.Store.YES, + Field.Index.ANALYZED)); + for(int i=0;i<10;i++) + try { + w.addDocument(doc); + } catch (RuntimeException re) { + break; + } + + ((ConcurrentMergeScheduler) w.getConfig().getMergeScheduler()).sync(); + assertTrue(w.failed); + w.close(); + dir.close(); + } + + // LUCENE-1072 + public void testExceptionFromTokenStream() throws IOException { + Directory dir = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig( TEST_VERSION_CURRENT, new Analyzer() { + + @Override + public TokenStream tokenStream(String fieldName, Reader reader) { + return new TokenFilter(new MockTokenizer(reader, MockTokenizer.SIMPLE, true)) { + private int count = 0; + + @Override + public boolean incrementToken() throws IOException { + if (count++ == 5) { + throw new IOException(); + } + return input.incrementToken(); + } + }; + } + + }); + IndexWriter writer = new IndexWriter(dir, conf); + + Document doc = new Document(); + String contents = "aa bb cc dd ee ff gg hh ii jj kk"; + doc.add(newField("content", contents, Field.Store.NO, + Field.Index.ANALYZED)); + try { + writer.addDocument(doc); + fail("did not hit expected exception"); + } catch (Exception e) { + } + + // Make sure we can add another normal document + doc = new Document(); + doc.add(newField("content", "aa bb cc dd", Field.Store.NO, + Field.Index.ANALYZED)); + writer.addDocument(doc); + + // Make sure we can add another normal document + doc = new Document(); + doc.add(newField("content", "aa bb cc dd", Field.Store.NO, + Field.Index.ANALYZED)); + writer.addDocument(doc); + + writer.close(); + IndexReader reader = IndexReader.open(dir, true); + final Term t = new Term("content", "aa"); + assertEquals(reader.docFreq(t), 3); + + // Make sure the doc that hit the exception was marked + // as deleted: + DocsEnum tdocs = MultiFields.getTermDocsEnum(reader, + MultiFields.getDeletedDocs(reader), + t.field(), + new BytesRef(t.text())); + + int count = 0; + while(tdocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { + count++; + } + assertEquals(2, count); + + assertEquals(reader.docFreq(new Term("content", "gg")), 0); + reader.close(); + dir.close(); + } + + private static class FailOnlyOnFlush extends MockDirectoryWrapper.Failure { + boolean doFail = false; + int count; + + @Override + public void setDoFail() { + this.doFail = true; + } + @Override + public void clearDoFail() { + this.doFail = false; + } + + @Override + public void eval(MockDirectoryWrapper dir) throws IOException { + if (doFail) { + StackTraceElement[] trace = new Exception().getStackTrace(); + boolean sawAppend = false; + boolean sawFlush = false; + for (int i = 0; i < trace.length; i++) { + if ("org.apache.lucene.index.FreqProxTermsWriter".equals(trace[i].getClassName()) && "appendPostings".equals(trace[i].getMethodName())) + sawAppend = true; + if ("doFlush".equals(trace[i].getMethodName())) + sawFlush = true; + } + + if (sawAppend && sawFlush && count++ >= 30) { + doFail = false; + throw new IOException("now failing during flush"); + } + } + } + } + + // LUCENE-1072: make sure an errant exception on flushing + // one segment only takes out those docs in that one flush + public void testDocumentsWriterAbort() throws IOException { + MockDirectoryWrapper dir = newDirectory(); + FailOnlyOnFlush failure = new FailOnlyOnFlush(); + failure.setDoFail(); + dir.failOn(failure); + + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2)); + Document doc = new Document(); + String contents = "aa bb cc dd ee ff gg hh ii jj kk"; + doc.add(newField("content", contents, Field.Store.NO, + Field.Index.ANALYZED)); + boolean hitError = false; + for(int i=0;i<200;i++) { + try { + writer.addDocument(doc); + } catch (IOException ioe) { + // only one flush should fail: + assertFalse(hitError); + hitError = true; + } + } + assertTrue(hitError); + writer.close(); + IndexReader reader = IndexReader.open(dir, true); + assertEquals(198, reader.docFreq(new Term("content", "aa"))); + reader.close(); + dir.close(); + } + + public void testDocumentsWriterExceptions() throws IOException { + Analyzer analyzer = new Analyzer() { + @Override + public TokenStream tokenStream(String fieldName, Reader reader) { + return new CrashingFilter(fieldName, new MockTokenizer(reader, MockTokenizer.WHITESPACE, false)); + } + }; + + for(int i=0;i<2;i++) { + MockDirectoryWrapper dir = newDirectory(); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)); + //writer.setInfoStream(System.out); + Document doc = new Document(); + doc.add(newField("contents", "here are some contents", Field.Store.YES, + Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + writer.addDocument(doc); + writer.addDocument(doc); + doc.add(newField("crash", "this should crash after 4 terms", Field.Store.YES, + Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + doc.add(newField("other", "this will not get indexed", Field.Store.YES, + Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + try { + writer.addDocument(doc); + fail("did not hit expected exception"); + } catch (IOException ioe) { + } + + if (0 == i) { + doc = new Document(); + doc.add(newField("contents", "here are some contents", Field.Store.YES, + Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + writer.addDocument(doc); + writer.addDocument(doc); + } + writer.close(); + + IndexReader reader = IndexReader.open(dir, true); + int expected = 3+(1-i)*2; + assertEquals(expected, reader.docFreq(new Term("contents", "here"))); + assertEquals(expected, reader.maxDoc()); + int numDel = 0; + final Bits delDocs = MultiFields.getDeletedDocs(reader); + assertNotNull(delDocs); + for(int j=0;j thrown = new ArrayList(); + final Directory dir = newDirectory(); + final IndexWriter writer = new IndexWriter(dir, + newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())) { + @Override + public void message(final String message) { + if (message.startsWith("now flush at close") && 0 == thrown.size()) { + thrown.add(null); + throw new OutOfMemoryError("fake OOME at " + message); + } + } + }; + + // need to set an info stream so message is called + writer.setInfoStream(new PrintStream(new ByteArrayOutputStream())); + try { + writer.close(); + fail("OutOfMemoryError expected"); + } + catch (final OutOfMemoryError expected) {} + + // throws IllegalStateEx w/o bug fix + writer.close(); + dir.close(); + } + + // LUCENE-1347 + private static final class MockIndexWriter4 extends IndexWriter { + + public MockIndexWriter4(Directory dir, IndexWriterConfig conf) throws IOException { + super(dir, conf); + } + + boolean doFail; + + @Override + boolean testPoint(String name) { + if (doFail && name.equals("rollback before checkpoint")) + throw new RuntimeException("intentionally failing"); + return true; + } + } + + // LUCENE-1347 + public void testRollbackExceptionHang() throws Throwable { + Directory dir = newDirectory(); + MockIndexWriter4 w = new MockIndexWriter4(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); + + addDoc(w); + w.doFail = true; + try { + w.rollback(); + fail("did not hit intentional RuntimeException"); + } catch (RuntimeException re) { + // expected + } + + w.doFail = false; + w.rollback(); + dir.close(); + } + + // LUCENE-1044: Simulate checksum error in segments_N + public void testSegmentsChecksumError() throws IOException { + Directory dir = newDirectory(); + + IndexWriter writer = null; + + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); + + // add 100 documents + for (int i = 0; i < 100; i++) { + addDoc(writer); + } + + // close + writer.close(); + + long gen = SegmentInfos.getCurrentSegmentGeneration(dir); + assertTrue("segment generation should be > 0 but got " + gen, gen > 0); + + final String segmentsFileName = SegmentInfos.getCurrentSegmentFileName(dir); + IndexInput in = dir.openInput(segmentsFileName); + IndexOutput out = dir.createOutput(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen)); + out.copyBytes(in, in.length()-1); + byte b = in.readByte(); + out.writeByte((byte) (1+b)); + out.close(); + in.close(); + + IndexReader reader = null; + try { + reader = IndexReader.open(dir, true); + } catch (IOException e) { + e.printStackTrace(System.out); + fail("segmentInfos failed to retry fallback to correct segments_N file"); + } + reader.close(); + dir.close(); + } + + // Simulate a corrupt index by removing last byte of + // latest segments file and make sure we get an + // IOException trying to open the index: + public void testSimulatedCorruptIndex1() throws IOException { + Directory dir = newDirectory(); + + IndexWriter writer = null; + + writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); + + // add 100 documents + for (int i = 0; i < 100; i++) { + addDoc(writer); + } + + // close + writer.close(); + + long gen = SegmentInfos.getCurrentSegmentGeneration(dir); + assertTrue("segment generation should be > 0 but got " + gen, gen > 0); + + String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir); + String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, + "", + 1+gen); + IndexInput in = dir.openInput(fileNameIn); + IndexOutput out = dir.createOutput(fileNameOut); + long length = in.length(); + for(int i=0;i 0 but got " + gen, gen > 0); + + String[] files = dir.listAll(); + boolean corrupted = false; + for(int i=0;i 0 but got " + gen, gen > 0); + + // Make the next segments file, with last byte + // missing, to simulate a writer that crashed while + // writing segments file: + String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir); + String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, + "", + 1+gen); + IndexInput in = dir.openInput(fileNameIn); + IndexOutput out = dir.createOutput(fileNameOut); + long length = in.length(); + for(int i=0;i failures = new ArrayList(); + volatile boolean didClose; + final IndexReader[] readers; + final int NUM_COPY; + final static int NUM_THREADS = 5; + final Thread[] threads = new Thread[NUM_THREADS]; + + public RunAddIndexesThreads(int numCopy) throws Throwable { + NUM_COPY = numCopy; + dir = new MockDirectoryWrapper(new RAMDirectory()); + IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( + TEST_VERSION_CURRENT, new MockAnalyzer()) + .setMaxBufferedDocs(2)); + for (int i = 0; i < NUM_INIT_DOCS; i++) + addDoc(writer); + writer.close(); + + dir2 = new MockDirectoryWrapper(new RAMDirectory()); + writer2 = new IndexWriter(dir2, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())); + writer2.commit(); + + readers = new IndexReader[NUM_COPY]; + for(int i=0;i