Index: src/test/org/apache/lucene/store/MockRAMDirectory.java =================================================================== --- src/test/org/apache/lucene/store/MockRAMDirectory.java (revision 610858) +++ src/test/org/apache/lucene/store/MockRAMDirectory.java (working copy) @@ -198,7 +198,7 @@ * RAMOutputStream.BUFFER_SIZE (now 1024) bytes. */ - final synchronized long getRecomputedActualSizeInBytes() { + public final synchronized long getRecomputedActualSizeInBytes() { long size = 0; Iterator it = fileMap.values().iterator(); while (it.hasNext()) Index: src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexWriter.java (revision 610858) +++ src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -2112,4 +2112,115 @@ directory.close(); } + private class IndexerThread extends Thread { + + boolean diskFull; + IOException error; + IndexWriter writer; + + public IndexerThread(IndexWriter writer) { + this.writer = writer; + } + + public void run() { + + final Document doc = new Document(); + doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); + + int idUpto = 0; + while(true) { + try { + writer.updateDocument(new Term("id", ""+(idUpto++)), doc); + } catch (IOException ioe) { + if (ioe.getMessage().startsWith("fake disk full at")) + diskFull = true; + else { + System.out.println("\nERROR: unexpected IOException:"); + ioe.printStackTrace(System.out); + error = ioe; + } + break; + } + } + } + } + + // LUCENE-1130: make sure disk full during + // DW.ThreadState.init() does not lead to hang: + public void testDiskFullWithThreads() throws IOException { + + int NUM_THREADS = 3; + + for(int iter=0;iter<100;iter++) { + MockRAMDirectory dir = new MockRAMDirectory(); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer()); + ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); + // We expect disk full exceptions in the merge threads + cms.setSuppressExceptions(); + writer.setMergeScheduler(cms); + writer.setMaxBufferedDocs(10); + writer.setMergeFactor(4); + dir.setMaxSizeInBytes(4*1024+20*iter); + + IndexerThread[] threads = new IndexerThread[NUM_THREADS]; + boolean diskFull = false; + + for(int i=0;i 0 && ((float) fp.numPostings) / fp.postingsHashSize < 0.2) { @@ -1215,7 +1239,7 @@ int fieldCount; Fieldable[] docFields = new Fieldable[1]; - int lastDocID = -1; + int lastGen = -1; FieldData next; boolean doNorms; @@ -2277,7 +2301,7 @@ boolean success = false; try { - state.init(doc, nextDocID++); + state.init(doc, nextDocID); if (delTerm != null) { addDeleteTerm(delTerm, state.docID); @@ -2285,16 +2309,21 @@ state.doFlushAfter = timeToFlushDeletes(); } + // Only increment nextDocID on successful init + nextDocID++; + success = true; } finally { if (!success) { synchronized(this) { state.isIdle = true; + notifyAll(); if (state.doFlushAfter) { state.doFlushAfter = false; flushPending = false; } - notifyAll(); + if (state.abortOnExc) + abort(); } } } @@ -2333,6 +2362,7 @@ if (!success) { synchronized(this) { state.isIdle = true; + notifyAll(); if (state.abortOnExc) // Abort all buffered docs since last flush abort(); @@ -2342,7 +2372,6 @@ // keeps indexing as "all or none" (atomic) when // adding a document: addDeleteDocID(state.docID); - notifyAll(); } } } @@ -2474,6 +2503,7 @@ if (nextWriteDocID == state.docID) { // It's my turn, so write everything now: state.isIdle = true; + notifyAll(); nextWriteDocID++; state.writeDocument(); @@ -2497,12 +2527,6 @@ numWaiting = upto; } } - - // Now notify any incoming calls to addDocument - // (above) that are waiting on our line to - // shrink - notifyAll(); - } else { // Another thread got a docID before me, but, it // hasn't finished its processing. So add myself to