Index: lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java =================================================================== --- lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java (revision 1530072) +++ lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/precedence/TestPrecedenceQueryParser.java (working copy) @@ -29,8 +29,8 @@ import java.util.TimeZone; import org.apache.lucene.analysis.*; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; -import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.document.DateTools; import org.apache.lucene.queryparser.flexible.core.QueryNodeException; import org.apache.lucene.queryparser.flexible.core.QueryNodeParseException; @@ -48,6 +48,8 @@ import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.automaton.BasicAutomata; import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.junit.AfterClass; +import org.junit.BeforeClass; /** *
@@ -64,8 +66,18 @@
//that it adjusts to fit the precedence requirement, adding its extra tests.
public class TestPrecedenceQueryParser extends LuceneTestCase {
- public static Analyzer qpAnalyzer = new QPTestAnalyzer();
+ public static Analyzer qpAnalyzer;
+ @BeforeClass
+ public static void beforeClass() {
+ qpAnalyzer = new QPTestAnalyzer();
+ }
+
+ @AfterClass
+ public static void afterClass() {
+ qpAnalyzer = null;
+ }
+
public static final class QPTestFilter extends TokenFilter {
/**
* Filter which discards the token 'stop' and which expands the token
Index: lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java
===================================================================
--- lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java (revision 1530072)
+++ lucene/queryparser/src/test/org/apache/lucene/queryparser/flexible/standard/TestQPHelper.java (working copy)
@@ -70,6 +70,8 @@
import org.apache.lucene.util.automaton.BasicAutomata;
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
import org.apache.lucene.util.automaton.RegExp;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
import org.junit.Ignore;
/**
@@ -81,8 +83,18 @@
// TODO: really this should extend QueryParserTestBase too!
public class TestQPHelper extends LuceneTestCase {
- public static Analyzer qpAnalyzer = new QPTestAnalyzer();
+ public static Analyzer qpAnalyzer;
+ @BeforeClass
+ public static void beforeClass() {
+ qpAnalyzer = new QPTestAnalyzer();
+ }
+
+ @AfterClass
+ public static void afterClass() {
+ qpAnalyzer = null;
+ }
+
public static final class QPTestFilter extends TokenFilter {
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
Index: lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java
===================================================================
--- lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java (revision 1530072)
+++ lucene/queryparser/src/test/org/apache/lucene/queryparser/util/QueryParserTestBase.java (working copy)
@@ -41,8 +41,8 @@
//import org.apache.lucene.queryparser.classic.ParseException;
//import org.apache.lucene.queryparser.classic.QueryParser;
//import org.apache.lucene.queryparser.classic.QueryParserBase;
+import org.apache.lucene.queryparser.classic.QueryParserBase;
//import org.apache.lucene.queryparser.classic.QueryParserTokenManager;
-import org.apache.lucene.queryparser.classic.QueryParserBase;
import org.apache.lucene.queryparser.flexible.standard.CommonQueryParserConfiguration;
import org.apache.lucene.search.*;
import org.apache.lucene.search.BooleanClause.Occur;
@@ -51,6 +51,8 @@
import org.apache.lucene.util.automaton.BasicAutomata;
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
import org.apache.lucene.util.automaton.RegExp;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
/**
* Base Test class for QueryParser subclasses
@@ -59,8 +61,18 @@
// to the core QP and subclass/use the parts that are not in the flexible QP
public abstract class QueryParserTestBase extends LuceneTestCase {
- public static Analyzer qpAnalyzer = new QPTestAnalyzer();
+ public static Analyzer qpAnalyzer;
+ @BeforeClass
+ public static void beforeClass() {
+ qpAnalyzer = new QPTestAnalyzer();
+ }
+
+ @AfterClass
+ public static void afterClass() {
+ qpAnalyzer = null;
+ }
+
public static final class QPTestFilter extends TokenFilter {
CharTermAttribute termAtt;
OffsetAttribute offsetAtt;
@@ -102,7 +114,6 @@
}
}
-
public static final class QPTestAnalyzer extends Analyzer {
/** Filters MockTokenizer with StopFilter. */
Index: lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java (revision 1530072)
+++ lucene/core/src/test/org/apache/lucene/index/TestIndexWriterReader.java (working copy)
@@ -35,6 +35,7 @@
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper.FakeIOException;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.BytesRef;
@@ -157,6 +158,7 @@
assertEquals(1, count(new Term("id", Integer.toString(8000)), r2));
r1.close();
+ assertTrue(r2.isCurrent());
writer.close();
assertTrue(r2.isCurrent());
@@ -1042,10 +1044,6 @@
d.close();
}
- private static final class FakeIOException extends IOException {
- public FakeIOException() {}
- }
-
@Test
public void testNRTOpenExceptions() throws Exception {
// LUCENE-5262: test that several failed attempts to obtain an NRT reader
Index: lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java (revision 1530072)
+++ lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java (working copy)
@@ -1025,13 +1025,23 @@
@Override
public void run() {
// LUCENE-2239: won't work with NIOFS/MMAP
- Directory dir = new MockDirectoryWrapper(random, new RAMDirectory());
+ MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory());
+
+ // When interrupt arrives in w.close(), when it's
+ // writing liveDocs, this can lead to double-write of
+ // _X_N.del:
+ //dir.setPreventDoubleWrite(false);
IndexWriter w = null;
while(!finish) {
try {
while(!finish) {
if (w != null) {
+ // If interrupt arrives inside here, it's
+ // fine: we will cycle back and the first
+ // thing we do is try to close again,
+ // i.e. we'll never try to open a new writer
+ // until this one successfully closes:
w.close();
w = null;
}
Index: lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
===================================================================
--- lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (revision 1530072)
+++ lucene/core/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (working copy)
@@ -40,6 +40,7 @@
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper.FakeIOException;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
@@ -322,7 +323,7 @@
int value = 100;
try {
latch.await();
- for (int i = 0; i < 1000; i++) {
+ for (int j = 0; j < 1000; j++) {
Document doc = new Document();
doc.add(newTextField("content", "aaa", Field.Store.NO));
doc.add(newStringField("id", String.valueOf(id++), Field.Store.YES));
@@ -1218,13 +1219,11 @@
d.close();
}
- private static class FakeIOException extends IOException {
- }
-
- // Make sure if we hit disk full, and then later disk
- // frees up, and we successfully close IW or open an NRT
+ // Make sure if we hit a transient IOException (e.g., disk
+ // full), and then the exception stops (e.g., disk frees
+ // up), so we successfully close IW or open an NRT
// reader, we don't lose any deletes:
- public void testNoLostDeletesOnDiskFull() throws Exception {
+ public void testNoLostDeletesOnIOException() throws Exception {
int deleteCount = 0;
int docBase = 0;
@@ -1237,35 +1236,70 @@
@Override
public void eval(MockDirectoryWrapper dir) throws IOException {
StackTraceElement[] trace = new Exception().getStackTrace();
- if (shouldFail.get()) {
- for (int i = 0; i < trace.length; i++) {
- if ("writeLiveDocs".equals(trace[i].getMethodName())) {
- // Only sometimes throw the exc, so we get
- // it sometimes on creating the file, on
- // flushing buffer, on closing the file:
- if (random().nextInt(3) == 2) {
- if (VERBOSE) {
- System.out.println("TEST: now fail; exc:");
- new Throwable().printStackTrace(System.out);
- }
- shouldFail.set(false);
- throw new FakeIOException();
- } else {
- break;
- }
- }
+ if (shouldFail.get() == false) {
+ return;
+ }
+
+ boolean sawSeal = false;
+ boolean sawWrite = false;
+ for (int i = 0; i < trace.length; i++) {
+ if ("sealFlushedSegment".equals(trace[i].getMethodName())) {
+ sawSeal = true;
+ break;
}
+ if ("writeLiveDocs".equals(trace[i].getMethodName())) {
+ sawWrite = true;
+ }
}
+
+ // Don't throw exc if we are "flushing", else
+ // the segment is aborted and docs are lost:
+ if (sawWrite && sawSeal == false && random().nextInt(3) == 2) {
+ // Only sometimes throw the exc, so we get
+ // it sometimes on creating the file, on
+ // flushing buffer, on closing the file:
+ if (VERBOSE) {
+ System.out.println("TEST: now fail; thread=" + Thread.currentThread().getName() + " exc:");
+ new Throwable().printStackTrace(System.out);
+ }
+ shouldFail.set(false);
+ throw new FakeIOException();
+ }
}
});
+ RandomIndexWriter w = null;
+
for(int iter=0;iter<10*RANDOM_MULTIPLIER;iter++) {
int numDocs = atLeast(100);
if (VERBOSE) {
- System.out.println("\nTEST: iter=" + iter + " numDocs=" + numDocs + " docBase=" + docBase);
+ System.out.println("\nTEST: iter=" + iter + " numDocs=" + numDocs + " docBase=" + docBase + " delCount=" + deleteCount);
}
- IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
- IndexWriter w = new IndexWriter(dir, iwc);
+ if (w == null) {
+ IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
+ final MergeScheduler ms = iwc.getMergeScheduler();
+ if (ms instanceof ConcurrentMergeScheduler) {
+ final ConcurrentMergeScheduler suppressFakeIOE = new ConcurrentMergeScheduler() {
+ @Override
+ protected void handleMergeException(Throwable exc) {
+ // suppress only FakeIOException:
+ if (!(exc instanceof FakeIOException)) {
+ super.handleMergeException(exc);
+ }
+ }
+ };
+ final ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) ms;
+ suppressFakeIOE.setMaxMergesAndThreads(cms.getMaxMergeCount(), cms.getMaxThreadCount());
+ suppressFakeIOE.setMergeThreadPriority(cms.getMergeThreadPriority());
+ iwc.setMergeScheduler(suppressFakeIOE);
+ iwc.setMergeScheduler(suppressFakeIOE);
+ }
+ w = new RandomIndexWriter(random(), dir, iwc);
+ // Since we hit exc during merging, a partial
+ // forceMerge can easily return when there are still
+ // too many segments in the index:
+ w.setDoRandomForceMergeAssert(false);
+ }
for(int i=0;i