Index: src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java =================================================================== --- src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java (revision 823110) +++ src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java (working copy) @@ -50,10 +50,8 @@ public static final String INDEX_PATH = "test.snapshots"; public void testSnapshotDeletionPolicy() throws Exception { - File dir = new File(System.getProperty("tempDir"), INDEX_PATH); + File dir = _TestUtil.getTempDir(INDEX_PATH); try { - // Sometimes past test leaves the dir - _TestUtil.rmDir(dir); Directory fsDir = FSDirectory.open(dir); runTest(fsDir); fsDir.close(); @@ -70,27 +68,35 @@ Directory dir = new MockRAMDirectory(); SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); - IndexWriter writer = new IndexWriter(dir, true,new StandardAnalyzer(), dp); - // Force frequent commits + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), dp, IndexWriter.MaxFieldLength.UNLIMITED); + // Force frequent flushes writer.setMaxBufferedDocs(2); Document doc = new Document(); doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - for(int i=0;i<7;i++) + for(int i=0;i<7;i++) { writer.addDocument(doc); + if (i % 2 == 0) { + writer.commit(); + } + } IndexCommit cp = (IndexCommit) dp.snapshot(); copyFiles(dir, cp); writer.close(); copyFiles(dir, cp); - writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp); + writer = new IndexWriter(dir, new StandardAnalyzer(), dp, IndexWriter.MaxFieldLength.UNLIMITED); copyFiles(dir, cp); - for(int i=0;i<7;i++) + for(int i=0;i<7;i++) { writer.addDocument(doc); + if (i % 2 == 0) { + writer.commit(); + } + } copyFiles(dir, cp); writer.close(); copyFiles(dir, cp); dp.release(); - writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp); + writer = new IndexWriter(dir, new StandardAnalyzer(), dp, IndexWriter.MaxFieldLength.UNLIMITED); writer.close(); try { copyFiles(dir, cp); @@ -106,9 +112,9 @@ final long stopTime = System.currentTimeMillis() + 7000; SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); - final IndexWriter writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp); + final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), dp, IndexWriter.MaxFieldLength.UNLIMITED); - // Force frequent commits + // Force frequent flushes writer.setMaxBufferedDocs(2); final Thread t = new Thread() { @@ -123,6 +129,13 @@ t.printStackTrace(System.out); fail("addDocument failed"); } + if (i%2 == 0) { + try { + writer.commit(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } } try { Thread.sleep(1); Index: src/test/org/apache/lucene/search/payloads/PayloadHelper.java =================================================================== --- src/test/org/apache/lucene/search/payloads/PayloadHelper.java (revision 823110) +++ src/test/org/apache/lucene/search/payloads/PayloadHelper.java (working copy) @@ -103,7 +103,7 @@ RAMDirectory directory = new RAMDirectory(); PayloadAnalyzer analyzer = new PayloadAnalyzer(); IndexWriter writer - = new IndexWriter(directory, analyzer, true); + = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED); writer.setSimilarity(similarity); //writer.infoStream = System.out; for (int i = 0; i < numDocs; i++) { Index: src/test/org/apache/lucene/search/spans/TestPayloadSpans.java =================================================================== --- src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (revision 823110) +++ src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (working copy) @@ -115,7 +115,7 @@ throws IOException { RAMDirectory directory = new RAMDirectory(); PayloadAnalyzer analyzer = new PayloadAnalyzer(); - IndexWriter writer = new IndexWriter(directory, analyzer, true); + IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED); writer.setSimilarity(similarity); Document doc = new Document(); @@ -308,7 +308,6 @@ for (int i = 0; i < topDocs.scoreDocs.length; i++) { while (spans.next()) { Collection payloads = spans.getPayload(); - int cnt = 0; for (Iterator it = payloads.iterator(); it.hasNext();) { payloadSet.add(new String((byte[]) it.next())); } @@ -362,7 +361,7 @@ public void testPayloadSpanUtil() throws Exception { RAMDirectory directory = new RAMDirectory(); PayloadAnalyzer analyzer = new PayloadAnalyzer(); - IndexWriter writer = new IndexWriter(directory, analyzer, true); + IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED); writer.setSimilarity(similarity); Document doc = new Document(); doc.add(new Field(PayloadHelper.FIELD,"xx rr yy mm pp", Field.Store.YES, Field.Index.ANALYZED)); @@ -425,7 +424,7 @@ RAMDirectory directory = new RAMDirectory(); PayloadAnalyzer analyzer = new PayloadAnalyzer(); String[] docs = new String[]{"xx rr yy mm pp","xx yy mm rr pp", "nopayload qq ss pp np", "one two three four five six seven eight nine ten eleven", "nine one two three four five six seven eight eleven ten"}; - IndexWriter writer = new IndexWriter(directory, analyzer, true); + IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED); writer.setSimilarity(similarity); Index: src/test/org/apache/lucene/index/TestStressIndexing2.java =================================================================== --- src/test/org/apache/lucene/index/TestStressIndexing2.java (revision 823110) +++ src/test/org/apache/lucene/index/TestStressIndexing2.java (working copy) @@ -32,7 +32,6 @@ static int maxFields=4; static int bigFieldSize=10; static boolean sameFieldOrder=false; - static boolean autoCommit=false; static int mergeFactor=3; static int maxBufferedDocs=3; static int seed=0; @@ -41,8 +40,8 @@ public class MockIndexWriter extends IndexWriter { - public MockIndexWriter(Directory dir, boolean autoCommit, Analyzer a, boolean create) throws IOException { - super(dir, autoCommit, a, create); + public MockIndexWriter(Directory dir, Analyzer a, boolean create, IndexWriter.MaxFieldLength mfl) throws IOException { + super(dir, a, create, mfl); } boolean testPoint(String name) { @@ -88,7 +87,6 @@ r = newRandom(); for (int i=0; i<100; i++) { // increase iterations for better testing sameFieldOrder=r.nextBoolean(); - autoCommit=r.nextBoolean(); mergeFactor=r.nextInt(3)+2; maxBufferedDocs=r.nextInt(3)+2; seed++; @@ -124,7 +122,7 @@ public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException { Map docs = new HashMap(); - IndexWriter w = new MockIndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); + IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); w.setUseCompoundFile(false); /*** @@ -176,7 +174,7 @@ public Map indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException { Map docs = new HashMap(); for(int iter=0;iter<3;iter++) { - IndexWriter w = new MockIndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); + IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); w.setUseCompoundFile(false); // force many merges Index: src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexWriter.java (revision 823110) +++ src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -117,7 +117,7 @@ reader.close(); // optimize the index and check that the new doc count is correct - writer = new IndexWriter(dir, true, new WhitespaceAnalyzer()); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); assertEquals(100, writer.maxDoc()); assertEquals(60, writer.numDocs()); writer.optimize(); @@ -227,7 +227,7 @@ startDiskUsage += startDir.fileLength(files[i]); } - for(int iter=0;iter<6;iter++) { + for(int iter=0;iter<3;iter++) { if (debug) System.out.println("TEST: iter=" + iter); @@ -235,8 +235,7 @@ // Start with 100 bytes more than we are currently using: long diskFree = diskUsage+100; - boolean autoCommit = iter % 2 == 0; - int method = iter/2; + int method = iter; boolean success = false; boolean done = false; @@ -254,7 +253,7 @@ // Make a new dir that will enforce disk usage: MockRAMDirectory dir = new MockRAMDirectory(startDir); - writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED); IOException err = null; MergeScheduler ms = writer.getMergeScheduler(); @@ -290,12 +289,12 @@ rate = 0.0; } if (debug) - testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes autoCommit=" + autoCommit; + testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes"; } else { thisDiskFree = 0; rate = 0.0; if (debug) - testName = "disk full test " + methodName + " with unlimited disk space autoCommit=" + autoCommit; + testName = "disk full test " + methodName + " with unlimited disk space"; } if (debug) @@ -351,29 +350,6 @@ // ConcurrentMergeScheduler are done _TestUtil.syncConcurrentMerges(writer); - if (autoCommit) { - - // Whether we succeeded or failed, check that - // all un-referenced files were in fact - // deleted (ie, we did not create garbage). - // Only check this when autoCommit is true: - // when it's false, it's expected that there - // are unreferenced files (ie they won't be - // referenced until the "commit on close"). - // Just create a new IndexFileDeleter, have it - // delete unreferenced files, then verify that - // in fact no files were deleted: - - String successStr; - if (success) { - successStr = "success"; - } else { - successStr = "IOException"; - } - String message = methodName + " failed to delete unreferenced files after " + successStr + " (" + diskFree + " bytes)"; - assertNoUnreferencedFiles(dir, message); - } - if (debug) { System.out.println(" now test readers"); } @@ -390,10 +366,8 @@ } int result = reader.docFreq(searchTerm); if (success) { - if (autoCommit && result != END_COUNT) { - fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT); - } else if (!autoCommit && result != START_COUNT) { - fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " [autoCommit = false]"); + if (result != START_COUNT) { + fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT); } } else { // On hitting exception we still may have added @@ -480,18 +454,17 @@ boolean debug = false; - for(int pass=0;pass<3;pass++) { + for(int pass=0;pass<2;pass++) { if (debug) System.out.println("TEST: pass=" + pass); - boolean autoCommit = pass == 0; - boolean doAbort = pass == 2; + boolean doAbort = pass == 1; long diskFree = 200; while(true) { if (debug) System.out.println("TEST: cycle: diskFree=" + diskFree); MockRAMDirectory dir = new MockRAMDirectory(); dir.setMaxSizeInBytes(diskFree); - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); MergeScheduler ms = writer.getMergeScheduler(); if (ms instanceof ConcurrentMergeScheduler) @@ -531,7 +504,7 @@ _TestUtil.syncConcurrentMerges(ms); - assertNoUnreferencedFiles(dir, "after disk full during addDocument with autoCommit=" + autoCommit); + assertNoUnreferencedFiles(dir, "after disk full during addDocument"); // Make sure reader can open the index: IndexReader.open(dir, true).close(); @@ -947,10 +920,9 @@ } /* - * Simple test for "commit on close": open writer with - * autoCommit=false, so it will only commit on close, - * then add a bunch of docs, making sure reader does not - * see these docs until writer is closed. + * Simple test for "commit on close": open writer then + * add a bunch of docs, making sure reader does not see + * these docs until writer is closed. */ public void testCommitOnClose() throws IOException { Directory dir = new RAMDirectory(); @@ -975,7 +947,7 @@ } searcher = new IndexSearcher(dir, false); hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; - assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length); + assertEquals("reader incorrectly sees changes from writer", 14, hits.length); searcher.close(); assertTrue("reader should have still been current", reader.isCurrent()); } @@ -991,10 +963,9 @@ } /* - * Simple test for "commit on close": open writer with - * autoCommit=false, so it will only commit on close, - * then add a bunch of docs, making sure reader does not - * see them until writer has closed. Then instead of + * Simple test for "commit on close": open writer, then + * add a bunch of docs, making sure reader does not see + * them until writer has closed. Then instead of * closing the writer, call abort and verify reader sees * nothing was added. Then verify we can open the index * and add docs to it. @@ -1024,7 +995,7 @@ searcher = new IndexSearcher(dir, false); hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; - assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length); + assertEquals("reader incorrectly sees changes from writer", 14, hits.length); searcher.close(); // Now, close the writer: @@ -1052,7 +1023,7 @@ } searcher = new IndexSearcher(dir, false); hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; - assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length); + assertEquals("reader incorrectly sees changes from writer", 14, hits.length); searcher.close(); } @@ -1103,9 +1074,9 @@ // and it doesn't delete intermediate segments then it // will exceed this 100X: // System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage); - assertTrue("writer used too much space while adding documents when autoCommit=false: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage, + assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage, midDiskUsage < 100*startDiskUsage); - assertTrue("writer used too much space after close when autoCommit=false endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage, + assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage, endDiskUsage < 100*startDiskUsage); } @@ -2116,15 +2087,15 @@ Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); doc.add(idField); - for(int pass=0;pass<3;pass++) { - boolean autoCommit = pass%2 == 0; - IndexWriter writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), true); + for(int pass=0;pass<2;pass++) { - //System.out.println("TEST: pass=" + pass + " ac=" + autoCommit + " cms=" + (pass >= 2)); + IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + + //System.out.println("TEST: pass=" + pass + " cms=" + (pass >= 2)); for(int iter=0;iter<10;iter++) { //System.out.println("TEST: iter=" + iter); MergeScheduler ms; - if (pass >= 2) + if (pass == 1) ms = new ConcurrentMergeScheduler(); else ms = new SerialMergeScheduler(); @@ -2189,7 +2160,7 @@ reader.close(); // Reopen - writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), false); + writer = new IndexWriter(directory, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED); } writer.close(); } @@ -2360,7 +2331,7 @@ for(int iter=0;iter<10;iter++) { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer()); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); // We expect disk full exceptions in the merge threads cms.setSuppressExceptions(); @@ -2421,7 +2392,7 @@ public void _testSingleThreadFailure(MockRAMDirectory.Failure failure) throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer()); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(2); final Document doc = new Document(); doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); @@ -2434,6 +2405,7 @@ try { writer.addDocument(doc); writer.addDocument(doc); + writer.commit(); fail("did not hit exception"); } catch (IOException ioe) { } @@ -2721,7 +2693,7 @@ FailOnlyInSync failure = new FailOnlyInSync(); dir.failOn(failure); - IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer()); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); failure.setDoFail(); ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); @@ -2731,8 +2703,16 @@ writer.setMaxBufferedDocs(2); writer.setMergeFactor(5); - for (int i = 0; i < 23; i++) + for (int i = 0; i < 23; i++) { addDoc(writer); + if ((i-1)%2 == 0) { + try { + writer.commit(); + } catch (IOException ioe) { + // expected + } + } + } cms.sync(); assertTrue(failure.didFail); @@ -2749,10 +2729,9 @@ public void testTermVectorCorruption() throws IOException { Directory dir = new MockRAMDirectory(); - for(int iter=0;iter<4;iter++) { - final boolean autoCommit = 1==iter/2; + for(int iter=0;iter<2;iter++) { IndexWriter writer = new IndexWriter(dir, - autoCommit, new StandardAnalyzer()); + new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); writer.setMergeScheduler(new SerialMergeScheduler()); @@ -2785,7 +2764,7 @@ reader.close(); writer = new IndexWriter(dir, - autoCommit, new StandardAnalyzer()); + new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); writer.setMergeScheduler(new SerialMergeScheduler()); @@ -2801,10 +2780,9 @@ // LUCENE-1168 public void testTermVectorCorruption2() throws IOException { Directory dir = new MockRAMDirectory(); - for(int iter=0;iter<4;iter++) { - final boolean autoCommit = 1==iter/2; + for(int iter=0;iter<2;iter++) { IndexWriter writer = new IndexWriter(dir, - autoCommit, new StandardAnalyzer()); + new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); writer.setMergeScheduler(new SerialMergeScheduler()); @@ -3049,7 +3027,7 @@ // LUCENE-1179 public void testEmptyFieldName() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer()); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); Document doc = new Document(); doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(doc); @@ -4034,7 +4012,7 @@ final List thrown = new ArrayList(); - final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer()) { + final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED) { public void message(final String message) { if (message.startsWith("now flush at close") && 0 == thrown.size()) { thrown.add(null); @@ -4324,7 +4302,7 @@ public void testDeadlock() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer()); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(2); Document doc = new Document(); doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, Index: src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java =================================================================== --- src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (revision 823110) +++ src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (working copy) @@ -149,6 +149,7 @@ writer.deleteDocuments(q); writer.optimize(); + writer.commit(); verifyNumDocs(dir, 1039); verifyTermDocs(dir, new Term("content", "aaa"), 1030); @@ -187,6 +188,7 @@ writer.deleteDocuments(q); writer.optimize(); + writer.commit(); verifyNumDocs(dir, 1039); verifyTermDocs(dir, new Term("content", "aaa"), 1030); @@ -225,6 +227,7 @@ writer.addIndexesNoOptimize(new Directory[] {aux}); writer.optimize(); + writer.commit(); verifyNumDocs(dir, 1039); verifyTermDocs(dir, new Term("content", "aaa"), 1030); @@ -425,7 +428,7 @@ private IndexWriter newWriter(Directory dir, boolean create) throws IOException { - final IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), create); + final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), create, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMergePolicy(new LogDocMergePolicy(writer)); return writer; } Index: src/test/org/apache/lucene/index/TestCrash.java =================================================================== --- src/test/org/apache/lucene/index/TestCrash.java (revision 823110) +++ src/test/org/apache/lucene/index/TestCrash.java (working copy) @@ -35,7 +35,7 @@ private IndexWriter initIndex(MockRAMDirectory dir) throws IOException { dir.setLockFactory(NoLockFactory.getNoLockFactory()); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer()); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); //writer.setMaxBufferedDocs(2); writer.setMaxBufferedDocs(10); ((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions(); Index: src/test/org/apache/lucene/index/TestThreadedOptimize.java =================================================================== --- src/test/org/apache/lucene/index/TestThreadedOptimize.java (revision 823110) +++ src/test/org/apache/lucene/index/TestThreadedOptimize.java (working copy) @@ -51,9 +51,9 @@ failed = true; } - public void runTest(Directory directory, boolean autoCommit, MergeScheduler merger) throws Exception { + public void runTest(Directory directory, MergeScheduler merger) throws Exception { - IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true); + IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(2); if (merger != null) writer.setMergeScheduler(merger); @@ -73,8 +73,6 @@ writer.setMergeFactor(4); //writer.setInfoStream(System.out); - final int docCount = writer.docCount(); - Thread[] threads = new Thread[NUM_THREADS]; for(int i=0;i= 2.0) { - rate /= 2; - } - if (diskRatio >= 4.0) { - rate /= 2; - } - if (diskRatio >= 6.0) { - rate = 0.0; - } - if (debug) { - System.out.println("\ncycle: " + diskFree + " bytes"); - } - testName = "disk full during reader.close() @ " + thisDiskFree - + " bytes"; - } else { - thisDiskFree = 0; + if (0 == x) { + thisDiskFree = diskFree; + if (diskRatio >= 2.0) { + rate /= 2; + } + if (diskRatio >= 4.0) { + rate /= 2; + } + if (diskRatio >= 6.0) { rate = 0.0; - if (debug) { - System.out.println("\ncycle: same writer: unlimited disk space"); - } - testName = "reader re-use after disk full"; } + if (debug) { + System.out.println("\ncycle: " + diskFree + " bytes"); + } + testName = "disk full during reader.close() @ " + thisDiskFree + + " bytes"; + } else { + thisDiskFree = 0; + rate = 0.0; + if (debug) { + System.out.println("\ncycle: same writer: unlimited disk space"); + } + testName = "reader re-use after disk full"; + } - dir.setMaxSizeInBytes(thisDiskFree); - dir.setRandomIOExceptionRate(rate, diskFree); + dir.setMaxSizeInBytes(thisDiskFree); + dir.setRandomIOExceptionRate(rate, diskFree); - try { - if (0 == x) { - int docId = 12; - for (int i = 0; i < 13; i++) { - if (updates) { - Document d = new Document(); - d.add(new Field("id", Integer.toString(i), Field.Store.YES, - Field.Index.NOT_ANALYZED)); - d.add(new Field("content", "bbb " + i, Field.Store.NO, - Field.Index.ANALYZED)); - modifier.updateDocument(new Term("id", Integer.toString(docId)), d); - } else { // deletes - modifier.deleteDocuments(new Term("id", Integer.toString(docId))); - // modifier.setNorm(docId, "contents", (float)2.0); - } - docId += 12; + try { + if (0 == x) { + int docId = 12; + for (int i = 0; i < 13; i++) { + if (updates) { + Document d = new Document(); + d.add(new Field("id", Integer.toString(i), Field.Store.YES, + Field.Index.NOT_ANALYZED)); + d.add(new Field("content", "bbb " + i, Field.Store.NO, + Field.Index.ANALYZED)); + modifier.updateDocument(new Term("id", Integer.toString(docId)), d); + } else { // deletes + modifier.deleteDocuments(new Term("id", Integer.toString(docId))); + // modifier.setNorm(docId, "contents", (float)2.0); } + docId += 12; } - modifier.close(); - success = true; - if (0 == x) { - done = true; - } } - catch (IOException e) { - if (debug) { - System.out.println(" hit IOException: " + e); - e.printStackTrace(System.out); - } - err = e; - if (1 == x) { - e.printStackTrace(); - fail(testName + " hit IOException after disk space was freed up"); - } + modifier.close(); + success = true; + if (0 == x) { + done = true; } - - // If the close() succeeded, make sure there are - // no unreferenced files. - if (success) - TestIndexWriter.assertNoUnreferencedFiles(dir, "after writer.close"); - - // Finally, verify index is not corrupt, and, if - // we succeeded, we see all docs changed, and if - // we failed, we see either all docs or no docs - // changed (transactional semantics): - IndexReader newReader = null; - try { - newReader = IndexReader.open(dir, true); + } + catch (IOException e) { + if (debug) { + System.out.println(" hit IOException: " + e); + e.printStackTrace(System.out); } - catch (IOException e) { + err = e; + if (1 == x) { e.printStackTrace(); - fail(testName - + ":exception when creating IndexReader after disk full during close: " - + e); + fail(testName + " hit IOException after disk space was freed up"); } + } - IndexSearcher searcher = new IndexSearcher(newReader); - ScoreDoc[] hits = null; - try { - hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; - } - catch (IOException e) { - e.printStackTrace(); - fail(testName + ": exception when searching: " + e); - } - int result2 = hits.length; - if (success) { - if (x == 0 && result2 != END_COUNT) { - fail(testName - + ": method did not throw exception but hits.length for search on term 'aaa' is " - + result2 + " instead of expected " + END_COUNT); - } else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT) { - // It's possible that the first exception was - // "recoverable" wrt pending deletes, in which - // case the pending deletes are retained and - // then re-flushing (with plenty of disk - // space) will succeed in flushing the - // deletes: - fail(testName - + ": method did not throw exception but hits.length for search on term 'aaa' is " - + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); - } - } else { - // On hitting exception we still may have added - // all docs: - if (result2 != START_COUNT && result2 != END_COUNT) { - err.printStackTrace(); - fail(testName - + ": method did throw exception but hits.length for search on term 'aaa' is " - + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); - } - } + // If the close() succeeded, make sure there are + // no unreferenced files. + if (success) + TestIndexWriter.assertNoUnreferencedFiles(dir, "after writer.close"); - searcher.close(); - newReader.close(); + // Finally, verify index is not corrupt, and, if + // we succeeded, we see all docs changed, and if + // we failed, we see either all docs or no docs + // changed (transactional semantics): + IndexReader newReader = null; + try { + newReader = IndexReader.open(dir, true); + } + catch (IOException e) { + e.printStackTrace(); + fail(testName + + ":exception when creating IndexReader after disk full during close: " + + e); + } - if (result2 == END_COUNT) { - break; + IndexSearcher searcher = new IndexSearcher(newReader); + ScoreDoc[] hits = null; + try { + hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs; + } + catch (IOException e) { + e.printStackTrace(); + fail(testName + ": exception when searching: " + e); + } + int result2 = hits.length; + if (success) { + if (x == 0 && result2 != END_COUNT) { + fail(testName + + ": method did not throw exception but hits.length for search on term 'aaa' is " + + result2 + " instead of expected " + END_COUNT); + } else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT) { + // It's possible that the first exception was + // "recoverable" wrt pending deletes, in which + // case the pending deletes are retained and + // then re-flushing (with plenty of disk + // space) will succeed in flushing the + // deletes: + fail(testName + + ": method did not throw exception but hits.length for search on term 'aaa' is " + + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } + } else { + // On hitting exception we still may have added + // all docs: + if (result2 != START_COUNT && result2 != END_COUNT) { + err.printStackTrace(); + fail(testName + + ": method did throw exception but hits.length for search on term 'aaa' is " + + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); + } } - dir.close(); + searcher.close(); + newReader.close(); - // Try again with 10 more bytes of free space: - diskFree += 10; + if (result2 == END_COUNT) { + break; + } } + + dir.close(); + + // Try again with 10 more bytes of free space: + diskFree += 10; } } @@ -677,87 +650,84 @@ "Venice has lots of canals" }; String[] text = { "Amsterdam", "Venice" }; - for(int pass=0;pass<2;pass++) { - boolean autoCommit = (0==pass); - MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true); - modifier.setUseCompoundFile(true); - modifier.setMaxBufferedDeleteTerms(2); + MockRAMDirectory dir = new MockRAMDirectory(); + IndexWriter modifier = new IndexWriter(dir, + new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + modifier.setUseCompoundFile(true); + modifier.setMaxBufferedDeleteTerms(2); - dir.failOn(failure.reset()); + dir.failOn(failure.reset()); - for (int i = 0; i < keywords.length; i++) { - Document doc = new Document(); - doc.add(new Field("id", keywords[i], Field.Store.YES, - Field.Index.NOT_ANALYZED)); - doc.add(new Field("country", unindexed[i], Field.Store.YES, - Field.Index.NO)); - doc.add(new Field("contents", unstored[i], Field.Store.NO, - Field.Index.ANALYZED)); - doc.add(new Field("city", text[i], Field.Store.YES, - Field.Index.ANALYZED)); - modifier.addDocument(doc); - } - // flush (and commit if ac) + for (int i = 0; i < keywords.length; i++) { + Document doc = new Document(); + doc.add(new Field("id", keywords[i], Field.Store.YES, + Field.Index.NOT_ANALYZED)); + doc.add(new Field("country", unindexed[i], Field.Store.YES, + Field.Index.NO)); + doc.add(new Field("contents", unstored[i], Field.Store.NO, + Field.Index.ANALYZED)); + doc.add(new Field("city", text[i], Field.Store.YES, + Field.Index.ANALYZED)); + modifier.addDocument(doc); + } + // flush (and commit if ac) - modifier.optimize(); - modifier.commit(); + modifier.optimize(); + modifier.commit(); - // one of the two files hits + // one of the two files hits - Term term = new Term("city", "Amsterdam"); - int hitCount = getHitCount(dir, term); - assertEquals(1, hitCount); + Term term = new Term("city", "Amsterdam"); + int hitCount = getHitCount(dir, term); + assertEquals(1, hitCount); - // open the writer again (closed above) + // open the writer again (closed above) - // delete the doc - // max buf del terms is two, so this is buffered + // delete the doc + // max buf del terms is two, so this is buffered - modifier.deleteDocuments(term); + modifier.deleteDocuments(term); - // add a doc (needed for the !ac case; see below) - // doc remains buffered + // add a doc (needed for the !ac case; see below) + // doc remains buffered - Document doc = new Document(); - modifier.addDocument(doc); + Document doc = new Document(); + modifier.addDocument(doc); - // commit the changes, the buffered deletes, and the new doc + // commit the changes, the buffered deletes, and the new doc - // The failure object will fail on the first write after the del - // file gets created when processing the buffered delete + // The failure object will fail on the first write after the del + // file gets created when processing the buffered delete - // in the ac case, this will be when writing the new segments - // files so we really don't need the new doc, but it's harmless + // in the ac case, this will be when writing the new segments + // files so we really don't need the new doc, but it's harmless - // in the !ac case, a new segments file won't be created but in - // this case, creation of the cfs file happens next so we need - // the doc (to test that it's okay that we don't lose deletes if - // failing while creating the cfs file) + // in the !ac case, a new segments file won't be created but in + // this case, creation of the cfs file happens next so we need + // the doc (to test that it's okay that we don't lose deletes if + // failing while creating the cfs file) - boolean failed = false; - try { - modifier.commit(); - } catch (IOException ioe) { - failed = true; - } + boolean failed = false; + try { + modifier.commit(); + } catch (IOException ioe) { + failed = true; + } - assertTrue(failed); + assertTrue(failed); - // The commit above failed, so we need to retry it (which will - // succeed, because the failure is a one-shot) + // The commit above failed, so we need to retry it (which will + // succeed, because the failure is a one-shot) - modifier.commit(); + modifier.commit(); - hitCount = getHitCount(dir, term); + hitCount = getHitCount(dir, term); - // Make sure the delete was successfully flushed: - assertEquals(0, hitCount); + // Make sure the delete was successfully flushed: + assertEquals(0, hitCount); - modifier.close(); - dir.close(); - } + modifier.close(); + dir.close(); } // This test tests that the files created by the docs writer before @@ -787,47 +757,43 @@ "Venice has lots of canals" }; String[] text = { "Amsterdam", "Venice" }; - for(int pass=0;pass<2;pass++) { - boolean autoCommit = (0==pass); - MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true); + MockRAMDirectory dir = new MockRAMDirectory(); + IndexWriter modifier = new IndexWriter(dir, + new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); - dir.failOn(failure.reset()); + dir.failOn(failure.reset()); - for (int i = 0; i < keywords.length; i++) { - Document doc = new Document(); - doc.add(new Field("id", keywords[i], Field.Store.YES, - Field.Index.NOT_ANALYZED)); - doc.add(new Field("country", unindexed[i], Field.Store.YES, - Field.Index.NO)); - doc.add(new Field("contents", unstored[i], Field.Store.NO, - Field.Index.ANALYZED)); - doc.add(new Field("city", text[i], Field.Store.YES, - Field.Index.ANALYZED)); - try { - modifier.addDocument(doc); - } catch (IOException io) { - break; - } + for (int i = 0; i < keywords.length; i++) { + Document doc = new Document(); + doc.add(new Field("id", keywords[i], Field.Store.YES, + Field.Index.NOT_ANALYZED)); + doc.add(new Field("country", unindexed[i], Field.Store.YES, + Field.Index.NO)); + doc.add(new Field("contents", unstored[i], Field.Store.NO, + Field.Index.ANALYZED)); + doc.add(new Field("city", text[i], Field.Store.YES, + Field.Index.ANALYZED)); + try { + modifier.addDocument(doc); + } catch (IOException io) { + break; } + } - String[] startFiles = dir.listAll(); - SegmentInfos infos = new SegmentInfos(); - infos.read(dir); - new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null); - String[] endFiles = dir.listAll(); + String[] startFiles = dir.listAll(); + SegmentInfos infos = new SegmentInfos(); + infos.read(dir); + new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null); + String[] endFiles = dir.listAll(); - if (!Arrays.equals(startFiles, endFiles)) { - fail("docswriter abort() failed to delete unreferenced files:\n before delete:\n " - + arrayToString(startFiles) + "\n after delete:\n " - + arrayToString(endFiles)); - } - - modifier.close(); - + if (!Arrays.equals(startFiles, endFiles)) { + fail("docswriter abort() failed to delete unreferenced files:\n before delete:\n " + + arrayToString(startFiles) + "\n after delete:\n " + + arrayToString(endFiles)); } + modifier.close(); + } private String arrayToString(String[] l) { Index: src/test/org/apache/lucene/index/TestStressIndexing.java =================================================================== --- src/test/org/apache/lucene/index/TestStressIndexing.java (revision 823110) +++ src/test/org/apache/lucene/index/TestStressIndexing.java (working copy) @@ -115,8 +115,8 @@ Run one indexer and 2 searchers against single index as stress test. */ - public void runStressTest(Directory directory, boolean autoCommit, MergeScheduler mergeScheduler) throws Exception { - IndexWriter modifier = new IndexWriter(directory, autoCommit, ANALYZER, true); + public void runStressTest(Directory directory, MergeScheduler mergeScheduler) throws Exception { + IndexWriter modifier = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setMaxBufferedDocs(10); @@ -166,37 +166,17 @@ public void testStressIndexAndSearching() throws Exception { RANDOM = newRandom(); - // RAMDir + // With ConcurrentMergeScheduler, in RAMDir Directory directory = new MockRAMDirectory(); - runStressTest(directory, true, null); + runStressTest(directory, new ConcurrentMergeScheduler()); directory.close(); - // FSDir + // With ConcurrentMergeScheduler, in FSDir File dirPath = _TestUtil.getTempDir("lucene.test.stress"); directory = FSDirectory.open(dirPath); - runStressTest(directory, true, null); + runStressTest(directory, new ConcurrentMergeScheduler()); directory.close(); - // With ConcurrentMergeScheduler, in RAMDir - directory = new MockRAMDirectory(); - runStressTest(directory, true, new ConcurrentMergeScheduler()); - directory.close(); - - // With ConcurrentMergeScheduler, in FSDir - directory = FSDirectory.open(dirPath); - runStressTest(directory, true, new ConcurrentMergeScheduler()); - directory.close(); - - // With ConcurrentMergeScheduler and autoCommit=false, in RAMDir - directory = new MockRAMDirectory(); - runStressTest(directory, false, new ConcurrentMergeScheduler()); - directory.close(); - - // With ConcurrentMergeScheduler and autoCommit=false, in FSDir - directory = FSDirectory.open(dirPath); - runStressTest(directory, false, new ConcurrentMergeScheduler()); - directory.close(); - _TestUtil.rmDir(dirPath); } } Index: src/test/org/apache/lucene/index/TestDeletionPolicy.java =================================================================== --- src/test/org/apache/lucene/index/TestDeletionPolicy.java (revision 823110) +++ src/test/org/apache/lucene/index/TestDeletionPolicy.java (working copy) @@ -83,8 +83,8 @@ } /** - * This is useful for adding to a big index w/ autoCommit - * false when you know readers are not using it. + * This is useful for adding to a big index when you know + * readers are not using it. */ class KeepNoneOnInitDeletionPolicy implements IndexDeletionPolicy { int numOnInit; @@ -202,12 +202,11 @@ final double SECONDS = 2.0; - boolean autoCommit = false; boolean useCompoundFile = true; Directory dir = new RAMDirectory(); ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS); - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setUseCompoundFile(useCompoundFile); writer.close(); @@ -216,7 +215,7 @@ // Record last time when writer performed deletes of // past commits lastDeleteTime = System.currentTimeMillis(); - writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setUseCompoundFile(useCompoundFile); for(int j=0;j<17;j++) { addDoc(writer); @@ -267,10 +266,9 @@ */ public void testKeepAllDeletionPolicy() throws IOException { - for(int pass=0;pass<4;pass++) { + for(int pass=0;pass<2;pass++) { - boolean autoCommit = pass < 2; - boolean useCompoundFile = (pass % 2) > 0; + boolean useCompoundFile = (pass % 2) != 0; // Never deletes a commit KeepAllDeletionPolicy policy = new KeepAllDeletionPolicy(); @@ -278,37 +276,30 @@ Directory dir = new RAMDirectory(); policy.dir = dir; - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(10); writer.setUseCompoundFile(useCompoundFile); writer.setMergeScheduler(new SerialMergeScheduler()); for(int i=0;i<107;i++) { addDoc(writer); - if (autoCommit && i%10 == 0) - writer.commit(); } writer.close(); - writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setUseCompoundFile(useCompoundFile); writer.optimize(); writer.close(); assertEquals(2, policy.numOnInit); - if (!autoCommit) - // If we are not auto committing then there should - // be exactly 2 commits (one per close above): - assertEquals(2, policy.numOnCommit); + // If we are not auto committing then there should + // be exactly 2 commits (one per close above): + assertEquals(2, policy.numOnCommit); + // Test listCommits Collection commits = IndexReader.listCommits(dir); - if (!autoCommit) - // 1 from opening writer + 2 from closing writer - assertEquals(3, commits.size()); - else - // 1 from opening writer + 2 from closing writer + - // 11 from calling writer.commit() explicitly above - assertEquals(14, commits.size()); + // 1 from opening writer + 2 from closing writer + assertEquals(3, commits.size()); Iterator it = commits.iterator(); // Make sure we can open a reader on each commit: @@ -448,21 +439,20 @@ /* Test keeping NO commit points. This is a viable and - * useful case eg where you want to build a big index with - * autoCommit false and you know there are no readers. + * useful case eg where you want to build a big index and + * you know there are no readers. */ public void testKeepNoneOnInitDeletionPolicy() throws IOException { - for(int pass=0;pass<4;pass++) { + for(int pass=0;pass<2;pass++) { - boolean autoCommit = pass < 2; - boolean useCompoundFile = (pass % 2) > 0; + boolean useCompoundFile = (pass % 2) != 0; KeepNoneOnInitDeletionPolicy policy = new KeepNoneOnInitDeletionPolicy(); Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(10); writer.setUseCompoundFile(useCompoundFile); for(int i=0;i<107;i++) { @@ -470,16 +460,15 @@ } writer.close(); - writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setUseCompoundFile(useCompoundFile); writer.optimize(); writer.close(); assertEquals(2, policy.numOnInit); - if (!autoCommit) - // If we are not auto committing then there should - // be exactly 2 commits (one per close above): - assertEquals(2, policy.numOnCommit); + // If we are not auto committing then there should + // be exactly 2 commits (one per close above): + assertEquals(2, policy.numOnCommit); // Simplistic check: just verify the index is in fact // readable: @@ -497,17 +486,16 @@ final int N = 5; - for(int pass=0;pass<4;pass++) { + for(int pass=0;pass<2;pass++) { - boolean autoCommit = pass < 2; - boolean useCompoundFile = (pass % 2) > 0; + boolean useCompoundFile = (pass % 2) != 0; Directory dir = new RAMDirectory(); KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N); for(int j=0;j 0); assertEquals(N+1, policy.numOnInit); - if (autoCommit) { - assertTrue(policy.numOnCommit > 1); - } else { - assertEquals(N+1, policy.numOnCommit); - } + assertEquals(N+1, policy.numOnCommit); // Simplistic check: just verify only the past N segments_N's still // exist, and, I can open a reader on each: @@ -559,27 +543,26 @@ final int N = 10; - for(int pass=0;pass<4;pass++) { + for(int pass=0;pass<2;pass++) { - boolean autoCommit = pass < 2; - boolean useCompoundFile = (pass % 2) > 0; + boolean useCompoundFile = (pass % 2) != 0; KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N); Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setUseCompoundFile(useCompoundFile); writer.close(); Term searchTerm = new Term("content", "aaa"); Query query = new TermQuery(searchTerm); for(int i=0;i 1) { - if (i % 2 == 0) { - expectedCount += 1; - } else { - expectedCount -= 17; - } + // count should be. + searcher = new IndexSearcher(reader); + hits = searcher.search(query, null, 1000).scoreDocs; + if (i > 1) { + if (i % 2 == 0) { + expectedCount += 1; + } else { + expectedCount -= 17; } - assertEquals(expectedCount, hits.length); - searcher.close(); } + assertEquals(expectedCount, hits.length); + searcher.close(); reader.close(); if (i == N) { fail("should have failed on commits before last 5"); @@ -659,15 +638,14 @@ final int N = 10; - for(int pass=0;pass<4;pass++) { + for(int pass=0;pass<2;pass++) { - boolean autoCommit = pass < 2; - boolean useCompoundFile = (pass % 2) > 0; + boolean useCompoundFile = (pass % 2) != 0; KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N); Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(10); writer.setUseCompoundFile(useCompoundFile); writer.close(); @@ -676,13 +654,13 @@ for(int i=0;i * - *

If writer has autoCommit = true then - * this method will in general be called many times during - * one instance of {@link IndexWriter}. If - * autoCommit = false then this method is - * only called once when {@link IndexWriter#close} is - * called, or not at all if the {@link IndexWriter#abort} - * is called. + *

This method is only called when {@link + * IndexWriter#commit} or {@link IndexWriter#close} is + * called, or possibly not at all if the {@link + * IndexWriter#abort} is called. * *

Note: the last CommitPoint is the most recent one, * i.e. the "front index state". Be careful not to delete it, Index: src/java/org/apache/lucene/index/DirectoryReader.java =================================================================== --- src/java/org/apache/lucene/index/DirectoryReader.java (revision 823110) +++ src/java/org/apache/lucene/index/DirectoryReader.java (working copy) @@ -785,9 +785,9 @@ /** * Check whether this IndexReader is still using the current (i.e., most recently committed) version of the index. If * a writer has committed any changes to the index since this reader was opened, this will return false, - * in which case you must open a new IndexReader in order to see the changes. See the description of the autoCommit flag which controls when the {@link IndexWriter} - * actually commits changes to the index. + * in which case you must open a new IndexReader in order + * to see the changes. Use {@link IndexWriter#commit} to + * commit changes to the index. * * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error Index: src/java/org/apache/lucene/index/SegmentMerger.java =================================================================== --- src/java/org/apache/lucene/index/SegmentMerger.java (revision 823110) +++ src/java/org/apache/lucene/index/SegmentMerger.java (working copy) @@ -276,9 +276,7 @@ private final int mergeFields() throws CorruptIndexException, IOException { if (!mergeDocStores) { - // When we are not merging by doc stores, that means - // all segments were written as part of a single - // autoCommit=false IndexWriter session, so their field + // When we are not merging by doc stores, their field // name -> number mapping are the same. So, we start // with the fieldInfos of the last segment in this // case, to keep that numbering. Index: src/java/org/apache/lucene/index/IndexReader.java =================================================================== --- src/java/org/apache/lucene/index/IndexReader.java (revision 823110) +++ src/java/org/apache/lucene/index/IndexReader.java (working copy) @@ -526,10 +526,9 @@ * index. If a writer has committed any changes to the * index since this reader was opened, this will return * false, in which case you must open a new - * IndexReader in order to see the changes. See the - * description of the autoCommit - * flag which controls when the {@link IndexWriter} - * actually commits changes to the index. + * IndexReader in order to see the changes. Changes must + * be committed using {@link IndexWriter#commit} to be + * visible to readers. * *

* Not implemented in the IndexReader base class. Index: src/java/org/apache/lucene/index/DocumentsWriter.java =================================================================== --- src/java/org/apache/lucene/index/DocumentsWriter.java (revision 823110) +++ src/java/org/apache/lucene/index/DocumentsWriter.java (working copy) @@ -79,9 +79,8 @@ * call). Finally the synchronized "finishDocument" is * called to flush changes to the directory. * - * When flush is called by IndexWriter, or, we flush - * internally when autoCommit=false, we forcefully idle all - * threads and flush only once they are all idle. This + * When flush is called by IndexWriter we forcefully idle + * all threads and flush only once they are all idle. This * means you can call flush with a given thread even while * other threads are actively adding/deleting documents. * @@ -349,8 +348,7 @@ } /** Returns the current doc store segment we are writing - * to. This will be the same as segment when autoCommit - * * is true. */ + * to. */ synchronized String getDocStoreSegment() { return docStoreSegment; } @@ -441,8 +439,9 @@ synchronized void abort() throws IOException { try { - if (infoStream != null) + if (infoStream != null) { message("docWriter: now abort"); + } // Forcefully remove waiting ThreadStates from line waitQueue.abort(); @@ -491,6 +490,9 @@ } finally { aborting = false; notifyAll(); + if (infoStream != null) { + message("docWriter: done abort"); + } } } Index: src/java/org/apache/lucene/index/IndexWriter.java =================================================================== --- src/java/org/apache/lucene/index/IndexWriter.java (revision 823110) +++ src/java/org/apache/lucene/index/IndexWriter.java (working copy) @@ -23,14 +23,12 @@ import org.apache.lucene.search.Similarity; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.Lock; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.BufferedIndexInput; import org.apache.lucene.util.Constants; -import java.io.File; import java.io.IOException; import java.io.PrintStream; import java.util.List; @@ -85,55 +83,6 @@ addDocument calls (see below for changing the {@link MergeScheduler}).

- -

The optional autoCommit argument to the {@link - #IndexWriter(Directory, boolean, Analyzer) constructors} - controls visibility of the changes to {@link IndexReader} - instances reading the same index. When this is - false, changes are not visible until {@link - #close()} or {@link #commit()} is called. Note that changes will still be - flushed to the {@link Directory} as new files, but are - not committed (no new segments_N file is written - referencing the new files, nor are the files sync'd to stable storage) - until {@link #close()} or {@link #commit()} is called. If something - goes terribly wrong (for example the JVM crashes), then - the index will reflect none of the changes made since the - last commit, or the starting state if commit was not called. - You can also call {@link #rollback()}, which closes the writer - without committing any changes, and removes any index - files that had been flushed but are now unreferenced. - This mode is useful for preventing readers from refreshing - at a bad time (for example after you've done all your - deletes but before you've done your adds). It can also be - used to implement simple single-writer transactional - semantics ("all or none"). You can do a two-phase commit - by calling {@link #prepareCommit()} - followed by {@link #commit()}. This is necessary when - Lucene is working with an external resource (for example, - a database) and both must either commit or rollback the - transaction.

- -

When autoCommit is true then - the writer will periodically commit on its own. [Deprecated: Note that in 3.0, IndexWriter will - no longer accept autoCommit=true (it will be hardwired to - false). You can always call {@link #commit()} yourself - when needed]. There is - no guarantee when exactly an auto commit will occur (it - used to be after every flush, but it is now after every - completed merge, as of 2.4). If you want to force a - commit, call {@link #commit()}, or, close the writer. Once - a commit has finished, newly opened {@link IndexReader} instances will - see the changes to the index as of that commit. When - running in this mode, be careful not to refresh your - readers while optimize or segment merges are taking place - as this can tie up substantial disk space.

- -

Regardless of autoCommit, an {@link - IndexReader} or {@link org.apache.lucene.search.IndexSearcher} will only see the - index as of the "point in time" that it was opened. Any - changes committed to the index after the reader was opened - are not visible until the reader is re-opened.

-

If an index will not have more documents added for a while and optimal search performance is desired, then either the full {@link #optimize() optimize} method or partial {@link #optimize(int)} method should be @@ -183,8 +132,7 @@ IllegalStateException. The only course of action is to call {@link #close()}, which internally will call {@link #rollback()}, to undo any changes to the index since the - last commit. If you opened the writer with autoCommit - false you can also just call {@link #rollback()} + last commit. You can also just call {@link #rollback()} directly.

NOTE: {@link @@ -199,8 +147,7 @@ /* * Clarification: Check Points (and commits) - * Being able to set autoCommit=false allows IndexWriter to flush and - * write new index files to the directory without writing a new segments_N + * IndexWriter writes new index files to the directory without writing a new segments_N * file which references these new files. It also means that the state of * the in memory SegmentInfos object is different than the most recent * segments_N file written to the directory. @@ -211,9 +158,6 @@ * (generation of) segments_N file - this check point is also an * IndexCommit. * - * With autoCommit=true, every checkPoint is also a CommitPoint. - * With autoCommit=false, some checkPoints may not be commits. - * * A new checkpoint always replaces the previous checkpoint and * becomes the new "front" of the index. This allows the IndexFileDeleter * to delete files that are referenced only by stale checkpoints. @@ -917,10 +861,6 @@ * is true, then a new, empty index will be created in * d, replacing the index already there, if any. * - *

NOTE: autoCommit (see above) is set to false with this - * constructor. - * * @param d the index directory * @param a the analyzer to use * @param create true to create the index or overwrite @@ -943,43 +883,11 @@ } /** - * Constructs an IndexWriter for the index in d. - * Text will be analyzed with a. If create - * is true, then a new, empty index will be created in - * d, replacing the index already there, if any. - * - * @param d the index directory - * @param a the analyzer to use - * @param create true to create the index or overwrite - * the existing one; false to append to the existing - * index - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be read/written to, or - * if it does not exist and create is - * false or if there is any other low-level - * IO error - * @deprecated This constructor will be removed in the 3.0 - * release, and call {@link #commit()} when needed. - * Use {@link #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)} instead. - */ - public IndexWriter(Directory d, Analyzer a, boolean create) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, create, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null); - } - - /** * Constructs an IndexWriter for the index in * d, first creating it if it does not * already exist. Text will be analyzed with * a. * - *

NOTE: autoCommit (see above) is set to false with this - * constructor. - * * @param d the index directory * @param a the analyzer to use * @param mfl Maximum field length in number of terms/tokens: LIMITED, UNLIMITED, or user-specified @@ -998,96 +906,11 @@ } /** - * Constructs an IndexWriter for the index in - * d, first creating it if it does not - * already exist. Text will be analyzed with - * a. - * - * @param d the index directory - * @param a the analyzer to use - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be - * read/written to or if there is any other low-level - * IO error - * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link - * #IndexWriter(Directory,Analyzer,MaxFieldLength)} - * instead, and call {@link #commit()} when needed. - */ - public IndexWriter(Directory d, Analyzer a) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null); - } - - /** - * Constructs an IndexWriter for the index in - * d, first creating it if it does not - * already exist. Text will be analyzed with - * a. - * - * @param d the index directory - * @param autoCommit see above - * @param a the analyzer to use - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be - * read/written to or if there is any other low-level - * IO error - * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link - * #IndexWriter(Directory,Analyzer,MaxFieldLength)} - * instead, and call {@link #commit()} when needed. - */ - public IndexWriter(Directory d, boolean autoCommit, Analyzer a) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, null, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null); - } - - /** - * Constructs an IndexWriter for the index in d. - * Text will be analyzed with a. If create - * is true, then a new, empty index will be created in - * d, replacing the index already there, if any. - * - * @param d the index directory - * @param autoCommit see above - * @param a the analyzer to use - * @param create true to create the index or overwrite - * the existing one; false to append to the existing - * index - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be read/written to, or - * if it does not exist and create is - * false or if there is any other low-level - * IO error - * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link - * #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)} - * instead, and call {@link #commit()} when needed. - */ - public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, create, null, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null); - } - - /** * Expert: constructs an IndexWriter with a custom {@link * IndexDeletionPolicy}, for the index in d, * first creating it if it does not already exist. Text * will be analyzed with a. * - *

NOTE: autoCommit (see above) is set to false with this - * constructor. - * * @param d the index directory * @param a the analyzer to use * @param deletionPolicy see above @@ -1107,43 +930,12 @@ /** * Expert: constructs an IndexWriter with a custom {@link - * IndexDeletionPolicy}, for the index in d, - * first creating it if it does not already exist. Text - * will be analyzed with a. - * - * @param d the index directory - * @param autoCommit see above - * @param a the analyzer to use - * @param deletionPolicy see above - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be - * read/written to or if there is any other low-level - * IO error - * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link - * #IndexWriter(Directory,Analyzer,IndexDeletionPolicy,MaxFieldLength)} - * instead, and call {@link #commit()} when needed. - */ - public IndexWriter(Directory d, boolean autoCommit, Analyzer a, IndexDeletionPolicy deletionPolicy) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, deletionPolicy, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null); - } - - /** - * Expert: constructs an IndexWriter with a custom {@link * IndexDeletionPolicy}, for the index in d. * Text will be analyzed with a. If * create is true, then a new, empty index * will be created in d, replacing the index * already there, if any. * - *

NOTE: autoCommit (see above) is set to false with this - * constructor. - * * @param d the index directory * @param a the analyzer to use * @param create true to create the index or overwrite @@ -1174,10 +966,6 @@ * will be created in d, replacing the index * already there, if any. * - *

NOTE: autoCommit (see above) is set to false with this - * constructor. - * * @param d the index directory * @param a the analyzer to use * @param create true to create the index or overwrite @@ -1203,39 +991,6 @@ } /** - * Expert: constructs an IndexWriter with a custom {@link - * IndexDeletionPolicy}, for the index in d. - * Text will be analyzed with a. If - * create is true, then a new, empty index - * will be created in d, replacing the index - * already there, if any. - * - * @param d the index directory - * @param autoCommit see above - * @param a the analyzer to use - * @param create true to create the index or overwrite - * the existing one; false to append to the existing - * index - * @param deletionPolicy see above - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be read/written to, or - * if it does not exist and create is - * false or if there is any other low-level - * IO error - * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link - * #IndexWriter(Directory,Analyzer,boolean,IndexDeletionPolicy,MaxFieldLength)} - * instead, and call {@link #commit()} when needed. - */ - public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, create, deletionPolicy, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null); - } - - /** * Expert: constructs an IndexWriter on specific commit * point, with a custom {@link IndexDeletionPolicy}, for * the index in d. Text will be analyzed @@ -1253,10 +1008,6 @@ * {@link IndexDeletionPolicy} has preserved past * commits. * - *

NOTE: autoCommit (see above) is set to false with this - * constructor. - * * @param d the index directory * @param a the analyzer to use * @param deletionPolicy see above @@ -1290,6 +1041,8 @@ IndexDeletionPolicy deletionPolicy, boolean autoCommit, int maxFieldLength, IndexingChain indexingChain, IndexCommit commit) throws CorruptIndexException, LockObtainFailedException, IOException { + + assert !autoCommit; directory = d; analyzer = a; setMessageID(defaultInfoStream); @@ -1706,31 +1459,6 @@ return getLogMergePolicy().getMergeFactor(); } - /** - * Expert: returns max delay inserted before syncing a - * commit point. On Windows, at least, pausing before - * syncing can increase net indexing throughput. The - * delay is variable based on size of the segment's files, - * and is only inserted when using - * ConcurrentMergeScheduler for merges. - * @deprecated This will be removed in 3.0, when - * autoCommit=true is removed from IndexWriter. - */ - public double getMaxSyncPauseSeconds() { - return maxSyncPauseSeconds; - } - - /** - * Expert: sets the max delay before syncing a commit - * point. - * @see #getMaxSyncPauseSeconds - * @deprecated This will be removed in 3.0, when - * autoCommit=true is removed from IndexWriter. - */ - public void setMaxSyncPauseSeconds(double seconds) { - maxSyncPauseSeconds = seconds; - } - /** If non-null, this will be the default infoStream used * by a newly instantiated IndexWriter. * @see #setInfoStream @@ -1763,7 +1491,6 @@ private void messageState() { message("setInfoStream: dir=" + directory + - " autoCommit=" + autoCommit + " mergePolicy=" + mergePolicy + " mergeScheduler=" + mergeScheduler + " ramBufferSizeMB=" + docWriter.getRAMBufferSizeMB() + @@ -2041,6 +1768,7 @@ if (infoStream != null) message("hit exception building compound file doc store for segment " + docStoreSegment); deleter.deleteFile(compoundFileName); + docWriter.abort(); } } @@ -3020,12 +2748,8 @@ * This removes any temporary files that had been created, * after which the state of the index will be the same as * it was when commit() was last called or when this - * writer was first opened. This can only be called when - * this IndexWriter was opened with - * autoCommit=false. This also clears a - * previous call to {@link #prepareCommit}. - * @throws IllegalStateException if this is called when - * the writer was opened with autoCommit=true. + * writer was first opened. This also clears a previous + * call to {@link #prepareCommit}. * @throws IOException if there is a low-level IO error */ public void rollback() throws IOException { @@ -3768,12 +3492,11 @@ /**

Expert: prepare for commit, specifying * commitUserData Map (String -> String). This does the - * first phase of 2-phase commit. You can only call this - * when autoCommit is false. This method does all steps - * necessary to commit changes since this writer was - * opened: flushes pending added and deleted docs, syncs - * the index files, writes most of next segments_N file. - * After calling this you must call either {@link + * first phase of 2-phase commit. This method does all + * steps necessary to commit changes since this writer + * was opened: flushes pending added and deleted docs, + * syncs the index files, writes most of next segments_N + * file. After calling this you must call either {@link * #commit()} to finish the commit, or {@link * #rollback()} to revert the commit and undo all changes * done since the writer was opened.

@@ -3790,14 +3513,12 @@ * that's recorded into the segments file in the index, * and retrievable by {@link * IndexReader#getCommitUserData}. Note that when - * IndexWriter commits itself, for example if open with - * autoCommit=true, or, during {@link #close}, the + * IndexWriter commits itself during {@link #close}, the * commitUserData is unchanged (just carried over from * the prior commit). If this is null then the previous * commitUserData is kept. Also, the commitUserData will * only "stick" if there are actually changes in the - * index to commit. Therefore it's best to use this - * feature only when autoCommit is false. + * index to commit. */ public final void prepareCommit(Map commitUserData) throws CorruptIndexException, IOException { prepareCommit(commitUserData, false); @@ -3991,7 +3712,7 @@ flushDocStores |= autoCommit; String docStoreSegment = docWriter.getDocStoreSegment(); - assert docStoreSegment != null || numDocs == 0; + assert docStoreSegment != null || numDocs == 0: "dss=" + docStoreSegment + " numDocs=" + numDocs; if (docStoreSegment == null) flushDocStores = false; Index: src/java/org/apache/lucene/index/IndexFileDeleter.java =================================================================== --- src/java/org/apache/lucene/index/IndexFileDeleter.java (revision 823110) +++ src/java/org/apache/lucene/index/IndexFileDeleter.java (working copy) @@ -40,13 +40,6 @@ * counting to map the live SegmentInfos instances to * individual files in the Directory. * - * When autoCommit=true, IndexWriter currently commits only - * on completion of a merge (though this may change with - * time: it is not a guarantee). When autoCommit=false, - * IndexWriter only commits when it is closed. Regardless - * of autoCommit, the user may call IndexWriter.commit() to - * force a blocking commit. - * * The same directory file may be referenced by more than * one IndexCommit, i.e. more than one SegmentInfos. * Therefore we count how many commits reference each file. Index: contrib/swing/src/java/org/apache/lucene/swing/models/TableSearcher.java =================================================================== --- contrib/swing/src/java/org/apache/lucene/swing/models/TableSearcher.java (revision 823110) +++ contrib/swing/src/java/org/apache/lucene/swing/models/TableSearcher.java (working copy) @@ -163,7 +163,7 @@ try { // recreate the RAMDirectory directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, analyzer, true); + IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED); // iterate through all rows for (int row=0; row < tableModel.getRowCount(); row++){ Index: contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java =================================================================== --- contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (revision 823110) +++ contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestIndicesEquals.java (working copy) @@ -65,7 +65,7 @@ RAMDirectory dir = new RAMDirectory(); // create dir data - IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(), true); + IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); for (int i = 0; i < 20; i++) { Document document = new Document(); assembleDocument(document, i); @@ -89,7 +89,7 @@ InstantiatedIndex ii = new InstantiatedIndex(); // create dir data - IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(), true); + IndexWriter indexWriter = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); for (int i = 0; i < 500; i++) { Document document = new Document(); assembleDocument(document, i); Index: contrib/benchmark/conf/autoCommit.alg =================================================================== --- contrib/benchmark/conf/autoCommit.alg (revision 823110) +++ contrib/benchmark/conf/autoCommit.alg (working copy) @@ -1,70 +0,0 @@ -#/** -# * Licensed to the Apache Software Foundation (ASF) under one or more -# * contributor license agreements. See the NOTICE file distributed with -# * this work for additional information regarding copyright ownership. -# * The ASF licenses this file to You under the Apache License, Version 2.0 -# * (the "License"); you may not use this file except in compliance with -# * the License. You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ -# ------------------------------------------------------------------------------------- -# multi val params are iterated by NewRound's, added to reports, start with column name. -# -# based on micro-standard -# -# modified to use wikipedia sources and index entire docs -# currently just used to measure ingest rate - -#merge.factor=mrg:10:100:10:100 -#max.buffered=buf:10:10:100:100 -ram.flush.mb=ram:32 -autocommit=acommit:true:false - -max.field.length=2147483647 - - -compound=true - -analyzer=org.apache.lucene.analysis.standard.StandardAnalyzer -directory=FSDirectory - -doc.stored=true -doc.tokenized=true -doc.term.vector=false -log.step=5000 - -docs.file=temp/enwiki-20070527-pages-articles.xml - -doc.maker=org.apache.lucene.benchmark.byTask.feeds.EnwikiDocMaker - -query.maker=org.apache.lucene.benchmark.byTask.feeds.ReutersQueryMaker - -# task at this depth or less would print when they start -task.max.depth.log=2 - -log.queries=false -# ------------------------------------------------------------------------------------- - -{ "Rounds" - - ResetSystemErase - - { "Populate" - CreateIndex - { "MAddDocs" AddDoc > : 200000 - CloseIndex - } - - NewRound - -} : 4 - -RepSumByName -RepSumByPrefRound MAddDocs Index: contrib/benchmark/conf/deletepercent.alg =================================================================== --- contrib/benchmark/conf/deletepercent.alg (revision 823110) +++ contrib/benchmark/conf/deletepercent.alg (working copy) @@ -17,7 +17,6 @@ # ------------------------------------------------------------------------------------- # multi val params are iterated by NewRound's, added to reports, start with column name. -autocommit=false analyzer=org.apache.lucene.analysis.standard.StandardAnalyzer directory=FSDirectory #directory=RamDirectory Index: contrib/benchmark/conf/indexing-multithreaded.alg =================================================================== --- contrib/benchmark/conf/indexing-multithreaded.alg (revision 823110) +++ contrib/benchmark/conf/indexing-multithreaded.alg (working copy) @@ -22,7 +22,6 @@ #ram.flush.mb=flush:32:40:48:56:32:40:48:56 compound=cmpnd:true:true:true:true:false:false:false:false -autocommit=false analyzer=org.apache.lucene.analysis.standard.StandardAnalyzer directory=FSDirectory #directory=RamDirectory Index: contrib/benchmark/conf/indexing-flush-by-RAM-multithreaded.alg =================================================================== --- contrib/benchmark/conf/indexing-flush-by-RAM-multithreaded.alg (revision 823110) +++ contrib/benchmark/conf/indexing-flush-by-RAM-multithreaded.alg (working copy) @@ -22,7 +22,6 @@ ram.flush.mb=flush:32:40:48:56:32:40:48:56 compound=cmpnd:true:true:true:true:false:false:false:false -autocommit=false analyzer=org.apache.lucene.analysis.standard.StandardAnalyzer directory=FSDirectory #directory=RamDirectory Index: contrib/benchmark/conf/indexing.alg =================================================================== --- contrib/benchmark/conf/indexing.alg (revision 823110) +++ contrib/benchmark/conf/indexing.alg (working copy) @@ -22,7 +22,6 @@ #ram.flush.mb=flush:32:40:48:56:32:40:48:56 compound=cmpnd:true:true:true:true:false:false:false:false -autocommit=false analyzer=org.apache.lucene.analysis.standard.StandardAnalyzer directory=FSDirectory #directory=RamDirectory Index: contrib/benchmark/conf/indexing-flush-by-RAM.alg =================================================================== --- contrib/benchmark/conf/indexing-flush-by-RAM.alg (revision 823110) +++ contrib/benchmark/conf/indexing-flush-by-RAM.alg (working copy) @@ -22,7 +22,6 @@ ram.flush.mb=flush:32:40:48:56:32:40:48:56 compound=cmpnd:true:true:true:true:false:false:false:false -autocommit=false analyzer=org.apache.lucene.analysis.standard.StandardAnalyzer directory=FSDirectory #directory=RamDirectory Index: contrib/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java =================================================================== --- contrib/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java (revision 823110) +++ contrib/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java (working copy) @@ -338,7 +338,6 @@ "docs.file=" + lineFile.getAbsolutePath().replace('\\', '/'), "content.source.forever=false", "doc.reuse.fields=false", - "autocommit=false", "ram.flush.mb=4", "# ----- alg ", "ResetSystemErase", Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OpenIndexTask.java =================================================================== --- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OpenIndexTask.java (revision 823110) +++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OpenIndexTask.java (working copy) @@ -29,8 +29,7 @@ * Open an index writer. *
Other side effects: index writer object in perfRunData is set. *
Relevant properties: merge.factor, max.buffered, - * max.field.length, ram.flush.mb [default 0], autocommit - * [default true]. + * max.field.length, ram.flush.mb [default 0]. */ public class OpenIndexTask extends PerfTask { @@ -38,7 +37,6 @@ public static final int DEFAULT_MAX_FIELD_LENGTH = IndexWriter.DEFAULT_MAX_FIELD_LENGTH; public static final int DEFAULT_MERGE_PFACTOR = LogMergePolicy.DEFAULT_MERGE_FACTOR; public static final double DEFAULT_RAM_FLUSH_MB = (int) IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB; - public static final boolean DEFAULT_AUTO_COMMIT = false; public OpenIndexTask(PerfRunData runData) { super(runData); @@ -48,9 +46,9 @@ PerfRunData runData = getRunData(); Config config = runData.getConfig(); IndexWriter writer = new IndexWriter(runData.getDirectory(), - config.get("autocommit", DEFAULT_AUTO_COMMIT), runData.getAnalyzer(), - false); + false, + IndexWriter.MaxFieldLength.UNLIMITED); CreateIndexTask.setIndexWriterConfig(writer, config); runData.setIndexWriter(writer); return 1; Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java =================================================================== --- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java (revision 823110) +++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java (working copy) @@ -34,7 +34,7 @@ * Create an index.
* Other side effects: index writer object in perfRunData is set.
* Relevant properties: merge.factor, max.buffered, - * max.field.length, ram.flush.mb [default 0], autocommit + * max.field.length, ram.flush.mb [default 0], * [default true]. *

* This task also supports a "writer.info.stream" property with the following @@ -129,9 +129,9 @@ IndexDeletionPolicy indexDeletionPolicy = getIndexDeletionPolicy(config); IndexWriter writer = new IndexWriter(runData.getDirectory(), - runData.getConfig().get("autocommit", OpenIndexTask.DEFAULT_AUTO_COMMIT), runData.getAnalyzer(), - true, indexDeletionPolicy); + true, indexDeletionPolicy, + IndexWriter.MaxFieldLength.LIMITED); setIndexWriterConfig(writer, config); runData.setIndexWriter(writer); return 1; Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/package.html =================================================================== --- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/package.html (revision 823110) +++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/package.html (working copy) @@ -583,7 +583,6 @@

  • max.buffered
  • directory
  • ram.flush.mb -
  • autocommit
  • Index: contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java =================================================================== --- contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java (revision 823110) +++ contrib/misc/src/test/org/apache/lucene/index/TestTermVectorAccessor.java (working copy) @@ -29,7 +29,7 @@ public void test() throws Exception { Directory dir = new RAMDirectory(); - IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(Collections.EMPTY_SET), true); + IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(Collections.EMPTY_SET), true, IndexWriter.MaxFieldLength.UNLIMITED); Document doc; Index: contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java =================================================================== --- contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java (revision 823110) +++ contrib/misc/src/test/org/apache/lucene/misc/ChainedFilterTest.java (working copy) @@ -57,7 +57,7 @@ public void setUp() throws Exception { directory = new RAMDirectory(); IndexWriter writer = - new IndexWriter(directory, new WhitespaceAnalyzer(), true); + new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); Calendar cal = Calendar.getInstance(); cal.setTimeInMillis(1041397200000L); // 2003 January 01 Index: contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java =================================================================== --- contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (revision 823110) +++ contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestParser.java (working copy) @@ -61,7 +61,7 @@ { BufferedReader d = new BufferedReader(new InputStreamReader(TestParser.class.getResourceAsStream("reuters21578.txt"))); dir=new RAMDirectory(); - IndexWriter writer=new IndexWriter(dir,analyzer,true); + IndexWriter writer=new IndexWriter(dir,analyzer,true, IndexWriter.MaxFieldLength.UNLIMITED); String line = d.readLine(); while(line!=null) { Index: contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java =================================================================== --- contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java (revision 823110) +++ contrib/xml-query-parser/src/test/org/apache/lucene/xmlparser/TestQueryTemplateManager.java (working copy) @@ -141,7 +141,7 @@ //Create an index RAMDirectory dir=new RAMDirectory(); - IndexWriter w=new IndexWriter(dir,analyzer,true); + IndexWriter w=new IndexWriter(dir,analyzer,true, IndexWriter.MaxFieldLength.UNLIMITED); for (int i = 0; i < docFieldValues.length; i++) { w.addDocument(getDocumentFromString(docFieldValues[i])); Index: contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java =================================================================== --- contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (revision 823110) +++ contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (working copy) @@ -75,7 +75,7 @@ protected void setUp() throws IOException { directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true); + IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); setUpPlotter( 2, 15); Index: contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java =================================================================== --- contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java (revision 823110) +++ contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestDistance.java (working copy) @@ -49,7 +49,7 @@ @Override protected void setUp() throws IOException { directory = new RAMDirectory(); - writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true); + writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); addData(writer); } Index: contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java =================================================================== --- contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (revision 823110) +++ contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (working copy) @@ -1210,7 +1210,7 @@ public void testMultiSearcher() throws Exception { // setup index 1 RAMDirectory ramDir1 = new RAMDirectory(); - IndexWriter writer1 = new IndexWriter(ramDir1, new StandardAnalyzer(), true); + IndexWriter writer1 = new IndexWriter(ramDir1, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); Document d = new Document(); Field f = new Field(FIELD_NAME, "multiOne", Field.Store.YES, Field.Index.ANALYZED); d.add(f); @@ -1221,7 +1221,7 @@ // setup index 2 RAMDirectory ramDir2 = new RAMDirectory(); - IndexWriter writer2 = new IndexWriter(ramDir2, new StandardAnalyzer(), true); + IndexWriter writer2 = new IndexWriter(ramDir2, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); d = new Document(); f = new Field(FIELD_NAME, "multiTwo", Field.Store.YES, Field.Index.ANALYZED); d.add(f); @@ -1601,7 +1601,7 @@ protected void setUp() throws Exception { super.setUp(); ramDir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(ramDir, new StandardAnalyzer(), true); + IndexWriter writer = new IndexWriter(ramDir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); for (int i = 0; i < texts.length; i++) { addDoc(writer, texts[i]); } Index: contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java =================================================================== --- contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java (revision 823110) +++ contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java (working copy) @@ -47,7 +47,7 @@ public void setUp() throws Exception { - IndexWriter writer = new IndexWriter(store, new WhitespaceAnalyzer(), true); + IndexWriter writer = new IndexWriter(store, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); Document doc; Index: contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java =================================================================== --- contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java (revision 823110) +++ contrib/spellchecker/src/test/org/apache/lucene/search/spell/TestSpellChecker.java (working copy) @@ -46,7 +46,7 @@ //create a user index userindex = new RAMDirectory(); - IndexWriter writer = new IndexWriter(userindex, new SimpleAnalyzer(), true); + IndexWriter writer = new IndexWriter(userindex, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); for (int i = 0; i < 1000; i++) { Document doc = new Document(); Index: contrib/spellchecker/src/java/org/apache/lucene/search/spell/SpellChecker.java =================================================================== --- contrib/spellchecker/src/java/org/apache/lucene/search/spell/SpellChecker.java (revision 823110) +++ contrib/spellchecker/src/java/org/apache/lucene/search/spell/SpellChecker.java (working copy) @@ -106,7 +106,7 @@ public void setSpellIndex(Directory spellIndex) throws IOException { this.spellIndex = spellIndex; if (!IndexReader.indexExists(spellIndex)) { - IndexWriter writer = new IndexWriter(spellIndex, null, true); + IndexWriter writer = new IndexWriter(spellIndex, null, true, IndexWriter.MaxFieldLength.UNLIMITED); writer.close(); } // close the old searcher, if there was one @@ -299,7 +299,7 @@ * @throws IOException */ public void clearIndex() throws IOException { - IndexWriter writer = new IndexWriter(spellIndex, null, true); + IndexWriter writer = new IndexWriter(spellIndex, null, true, IndexWriter.MaxFieldLength.UNLIMITED); writer.close(); //close the old searcher @@ -325,7 +325,7 @@ * @throws IOException */ public void indexDictionary(Dictionary dict, int mergeFactor, int ramMB) throws IOException { - IndexWriter writer = new IndexWriter(spellIndex, true, new WhitespaceAnalyzer()); + IndexWriter writer = new IndexWriter(spellIndex, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); writer.setMergeFactor(mergeFactor); writer.setRAMBufferSizeMB(ramMB); Index: contrib/lucli/src/java/lucli/LuceneMethods.java =================================================================== --- contrib/lucli/src/java/lucli/LuceneMethods.java (revision 823110) +++ contrib/lucli/src/java/lucli/LuceneMethods.java (working copy) @@ -174,7 +174,7 @@ public void optimize() throws IOException { //open the index writer. False: don't create a new one - IndexWriter indexWriter = new IndexWriter(indexName, createAnalyzer(), false); + IndexWriter indexWriter = new IndexWriter(indexName, createAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED); message("Starting to optimize index."); long start = System.currentTimeMillis(); indexWriter.optimize(); Index: contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java =================================================================== --- contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (revision 823110) +++ contrib/analyzers/common/src/test/org/apache/lucene/analysis/shingle/ShingleAnalyzerWrapperTest.java (working copy) @@ -59,7 +59,7 @@ */ public IndexSearcher setUpSearcher(Analyzer analyzer) throws Exception { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, analyzer, true); + IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED); Document doc; doc = new Document(); Index: contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java =================================================================== --- contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java (revision 823110) +++ contrib/regex/src/test/org/apache/lucene/search/regex/TestSpanRegexQuery.java (working copy) @@ -44,7 +44,7 @@ public void testSpanRegex() throws Exception { RAMDirectory directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true); + IndexWriter writer = new IndexWriter(directory, new SimpleAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); Document doc = new Document(); // doc.add(new Field("field", "the quick brown fox jumps over the lazy dog", // Field.Store.NO, Field.Index.ANALYZED)); Index: contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java =================================================================== --- contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (revision 823110) +++ contrib/queries/src/test/org/apache/lucene/search/DuplicateFilterTest.java (working copy) @@ -42,7 +42,7 @@ protected void setUp() throws Exception { directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(), true); + IndexWriter writer = new IndexWriter(directory, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); //Add series of docs with filterable fields : url, text and dates flags addDoc(writer, "http://lucene.apache.org", "lucene 1.4.3 available", "20040101"); Index: contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java =================================================================== --- contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java (revision 823110) +++ contrib/queries/src/test/org/apache/lucene/search/BooleanFilterTest.java (working copy) @@ -37,7 +37,7 @@ protected void setUp() throws Exception { directory = new RAMDirectory(); - IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true); + IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); //Add series of docs with filterable fields : acces rights, prices, dates and "in-stock" flags addDoc(writer, "admin guest", "010", "20040101","Y"); Index: tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java (revision 823230) +++ tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java (working copy) @@ -50,10 +50,8 @@ public static final String INDEX_PATH = "test.snapshots"; public void testSnapshotDeletionPolicy() throws Exception { - File dir = new File(System.getProperty("tempDir"), INDEX_PATH); + File dir = _TestUtil.getTempDir(INDEX_PATH); try { - // Sometimes past test leaves the dir - _TestUtil.rmDir(dir); Directory fsDir = FSDirectory.open(dir); runTest(fsDir); fsDir.close(); @@ -70,27 +68,36 @@ Directory dir = new MockRAMDirectory(); SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); - IndexWriter writer = new IndexWriter(dir, true,new StandardAnalyzer(), dp); + IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), dp, IndexWriter.MaxFieldLength.UNLIMITED); // Force frequent commits writer.setMaxBufferedDocs(2); Document doc = new Document(); doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); - for(int i=0;i<7;i++) + for(int i=0;i<7;i++) { writer.addDocument(doc); + if (i % 2 == 0) { + writer.commit(); + } + } IndexCommit cp = (IndexCommit) dp.snapshot(); copyFiles(dir, cp); writer.close(); copyFiles(dir, cp); - writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp); + writer = new IndexWriter(dir, new StandardAnalyzer(), dp, IndexWriter.MaxFieldLength.UNLIMITED); copyFiles(dir, cp); - for(int i=0;i<7;i++) + for(int i=0;i<7;i++) { writer.addDocument(doc); + if (i % 2 == 0) { + writer.commit(); + } + } + copyFiles(dir, cp); writer.close(); copyFiles(dir, cp); dp.release(); - writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp); + writer = new IndexWriter(dir, new StandardAnalyzer(), dp, IndexWriter.MaxFieldLength.UNLIMITED); writer.close(); try { copyFiles(dir, cp); @@ -106,7 +113,7 @@ final long stopTime = System.currentTimeMillis() + 7000; SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); - final IndexWriter writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp); + final IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), dp, IndexWriter.MaxFieldLength.UNLIMITED); // Force frequent commits writer.setMaxBufferedDocs(2); @@ -123,6 +130,13 @@ t.printStackTrace(System.out); fail("addDocument failed"); } + if (i%2 == 0) { + try { + writer.commit(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } } try { Thread.sleep(1); Index: tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/search/payloads/PayloadHelper.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/search/payloads/PayloadHelper.java (revision 823230) +++ tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/search/payloads/PayloadHelper.java (working copy) @@ -103,7 +103,7 @@ RAMDirectory directory = new RAMDirectory(); PayloadAnalyzer analyzer = new PayloadAnalyzer(); IndexWriter writer - = new IndexWriter(directory, analyzer, true); + = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED); writer.setSimilarity(similarity); //writer.infoStream = System.out; for (int i = 0; i < numDocs; i++) { Index: tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (revision 823230) +++ tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/search/spans/TestPayloadSpans.java (working copy) @@ -115,7 +115,7 @@ throws IOException { RAMDirectory directory = new RAMDirectory(); PayloadAnalyzer analyzer = new PayloadAnalyzer(); - IndexWriter writer = new IndexWriter(directory, analyzer, true); + IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED); writer.setSimilarity(similarity); Document doc = new Document(); @@ -362,7 +362,7 @@ public void testPayloadSpanUtil() throws Exception { RAMDirectory directory = new RAMDirectory(); PayloadAnalyzer analyzer = new PayloadAnalyzer(); - IndexWriter writer = new IndexWriter(directory, analyzer, true); + IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED); writer.setSimilarity(similarity); Document doc = new Document(); doc.add(new Field(PayloadHelper.FIELD,"xx rr yy mm pp", Field.Store.YES, Field.Index.ANALYZED)); @@ -425,7 +425,7 @@ RAMDirectory directory = new RAMDirectory(); PayloadAnalyzer analyzer = new PayloadAnalyzer(); String[] docs = new String[]{"xx rr yy mm pp","xx yy mm rr pp", "nopayload qq ss pp np", "one two three four five six seven eight nine ten eleven", "nine one two three four five six seven eight eleven ten"}; - IndexWriter writer = new IndexWriter(directory, analyzer, true); + IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED); writer.setSimilarity(similarity); Index: tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestStressIndexing2.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestStressIndexing2.java (revision 823230) +++ tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestStressIndexing2.java (working copy) @@ -32,7 +32,6 @@ static int maxFields=4; static int bigFieldSize=10; static boolean sameFieldOrder=false; - static boolean autoCommit=false; static int mergeFactor=3; static int maxBufferedDocs=3; static int seed=0; @@ -41,8 +40,8 @@ public class MockIndexWriter extends IndexWriter { - public MockIndexWriter(Directory dir, boolean autoCommit, Analyzer a, boolean create) throws IOException { - super(dir, autoCommit, a, create); + public MockIndexWriter(Directory dir, Analyzer a, boolean create, IndexWriter.MaxFieldLength mfl) throws IOException { + super(dir, a, create, mfl); } boolean testPoint(String name) { @@ -88,7 +87,6 @@ r = newRandom(); for (int i=0; i<100; i++) { // increase iterations for better testing sameFieldOrder=r.nextBoolean(); - autoCommit=r.nextBoolean(); mergeFactor=r.nextInt(3)+2; maxBufferedDocs=r.nextInt(3)+2; seed++; @@ -124,7 +122,7 @@ public DocsAndWriter indexRandomIWReader(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException { Map docs = new HashMap(); - IndexWriter w = new MockIndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); + IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); w.setUseCompoundFile(false); /*** @@ -176,7 +174,7 @@ public Map indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException { Map docs = new HashMap(); for(int iter=0;iter<3;iter++) { - IndexWriter w = new MockIndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); + IndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); w.setUseCompoundFile(false); // force many merges Index: tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestIndexWriter.java (revision 823230) +++ tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -121,7 +121,7 @@ reader.close(); // optimize the index and check that the new doc count is correct - writer = new IndexWriter(dir, true, new WhitespaceAnalyzer()); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); assertEquals(100, writer.maxDoc()); assertEquals(60, writer.numDocs()); writer.optimize(); @@ -231,7 +231,7 @@ startDiskUsage += startDir.fileLength(files[i]); } - for(int iter=0;iter<6;iter++) { + for(int iter=0;iter<3;iter++) { if (debug) System.out.println("TEST: iter=" + iter); @@ -239,8 +239,7 @@ // Start with 100 bytes more than we are currently using: long diskFree = diskUsage+100; - boolean autoCommit = iter % 2 == 0; - int method = iter/2; + int method = iter; boolean success = false; boolean done = false; @@ -258,7 +257,7 @@ // Make a new dir that will enforce disk usage: MockRAMDirectory dir = new MockRAMDirectory(startDir); - writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED); IOException err = null; MergeScheduler ms = writer.getMergeScheduler(); @@ -294,12 +293,12 @@ rate = 0.0; } if (debug) - testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes autoCommit=" + autoCommit; + testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes"; } else { thisDiskFree = 0; rate = 0.0; if (debug) - testName = "disk full test " + methodName + " with unlimited disk space autoCommit=" + autoCommit; + testName = "disk full test " + methodName + " with unlimited disk space"; } if (debug) @@ -355,29 +354,6 @@ // ConcurrentMergeScheduler are done _TestUtil.syncConcurrentMerges(writer); - if (autoCommit) { - - // Whether we succeeded or failed, check that - // all un-referenced files were in fact - // deleted (ie, we did not create garbage). - // Only check this when autoCommit is true: - // when it's false, it's expected that there - // are unreferenced files (ie they won't be - // referenced until the "commit on close"). - // Just create a new IndexFileDeleter, have it - // delete unreferenced files, then verify that - // in fact no files were deleted: - - String successStr; - if (success) { - successStr = "success"; - } else { - successStr = "IOException"; - } - String message = methodName + " failed to delete unreferenced files after " + successStr + " (" + diskFree + " bytes)"; - assertNoUnreferencedFiles(dir, message); - } - if (debug) { System.out.println(" now test readers"); } @@ -394,10 +370,8 @@ } int result = reader.docFreq(searchTerm); if (success) { - if (autoCommit && result != END_COUNT) { - fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT); - } else if (!autoCommit && result != START_COUNT) { - fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " [autoCommit = false]"); + if (result != START_COUNT) { + fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT); } } else { // On hitting exception we still may have added @@ -484,18 +458,17 @@ boolean debug = false; - for(int pass=0;pass<3;pass++) { + for(int pass=0;pass<2;pass++) { if (debug) System.out.println("TEST: pass=" + pass); - boolean autoCommit = pass == 0; - boolean doAbort = pass == 2; + boolean doAbort = pass == 1; long diskFree = 200; while(true) { if (debug) System.out.println("TEST: cycle: diskFree=" + diskFree); MockRAMDirectory dir = new MockRAMDirectory(); dir.setMaxSizeInBytes(diskFree); - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); MergeScheduler ms = writer.getMergeScheduler(); if (ms instanceof ConcurrentMergeScheduler) @@ -535,7 +508,7 @@ _TestUtil.syncConcurrentMerges(ms); - assertNoUnreferencedFiles(dir, "after disk full during addDocument with autoCommit=" + autoCommit); + assertNoUnreferencedFiles(dir, "after disk full during addDocument with"); // Make sure reader can open the index: IndexReader.open(dir, true).close(); @@ -2120,15 +2093,14 @@ Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); doc.add(idField); - for(int pass=0;pass<3;pass++) { - boolean autoCommit = pass%2 == 0; - IndexWriter writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), true); + for(int pass=0;pass<2;pass++) { + IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); //System.out.println("TEST: pass=" + pass + " ac=" + autoCommit + " cms=" + (pass >= 2)); for(int iter=0;iter<10;iter++) { //System.out.println("TEST: iter=" + iter); MergeScheduler ms; - if (pass >= 2) + if (pass == 1) ms = new ConcurrentMergeScheduler(); else ms = new SerialMergeScheduler(); @@ -2193,7 +2165,7 @@ reader.close(); // Reopen - writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), false); + writer = new IndexWriter(directory, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED); } writer.close(); } @@ -2364,7 +2336,7 @@ for(int iter=0;iter<10;iter++) { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer()); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); // We expect disk full exceptions in the merge threads cms.setSuppressExceptions(); @@ -2425,7 +2397,7 @@ public void _testSingleThreadFailure(MockRAMDirectory.Failure failure) throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer()); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(2); final Document doc = new Document(); doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); @@ -2438,6 +2410,7 @@ try { writer.addDocument(doc); writer.addDocument(doc); + writer.commit(); fail("did not hit exception"); } catch (IOException ioe) { } @@ -2725,7 +2698,7 @@ FailOnlyInSync failure = new FailOnlyInSync(); dir.failOn(failure); - IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer()); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); failure.setDoFail(); ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); @@ -2735,8 +2708,16 @@ writer.setMaxBufferedDocs(2); writer.setMergeFactor(5); - for (int i = 0; i < 23; i++) + for (int i = 0; i < 23; i++) { addDoc(writer); + if ((i-1)%2 == 0) { + try { + writer.commit(); + } catch (IOException ioe) { + // expected + } + } + } cms.sync(); assertTrue(failure.didFail); @@ -2753,10 +2734,9 @@ public void testTermVectorCorruption() throws IOException { Directory dir = new MockRAMDirectory(); - for(int iter=0;iter<4;iter++) { - final boolean autoCommit = 1==iter/2; + for(int iter=0;iter<2;iter++) { IndexWriter writer = new IndexWriter(dir, - autoCommit, new StandardAnalyzer()); + new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); writer.setMergeScheduler(new SerialMergeScheduler()); @@ -2789,7 +2769,7 @@ reader.close(); writer = new IndexWriter(dir, - autoCommit, new StandardAnalyzer()); + new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); writer.setMergeScheduler(new SerialMergeScheduler()); @@ -2805,10 +2785,9 @@ // LUCENE-1168 public void testTermVectorCorruption2() throws IOException { Directory dir = new MockRAMDirectory(); - for(int iter=0;iter<4;iter++) { - final boolean autoCommit = 1==iter/2; + for(int iter=0;iter<2;iter++) { IndexWriter writer = new IndexWriter(dir, - autoCommit, new StandardAnalyzer()); + new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); writer.setMergeScheduler(new SerialMergeScheduler()); @@ -3053,7 +3032,7 @@ // LUCENE-1179 public void testEmptyFieldName() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer()); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); Document doc = new Document(); doc.add(new Field("", "a b c", Field.Store.NO, Field.Index.ANALYZED)); writer.addDocument(doc); @@ -4038,7 +4017,7 @@ final List thrown = new ArrayList(); - final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer()) { + final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED) { public void message(final String message) { if (message.startsWith("now flush at close") && 0 == thrown.size()) { thrown.add(null); @@ -4328,7 +4307,7 @@ public void testDeadlock() throws Exception { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer()); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(2); Document doc = new Document(); doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES, Index: tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (revision 823230) +++ tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestAddIndexesNoOptimize.java (working copy) @@ -149,6 +149,7 @@ writer.deleteDocuments(q); writer.optimize(); + writer.commit(); verifyNumDocs(dir, 1039); verifyTermDocs(dir, new Term("content", "aaa"), 1030); @@ -187,6 +188,7 @@ writer.deleteDocuments(q); writer.optimize(); + writer.commit(); verifyNumDocs(dir, 1039); verifyTermDocs(dir, new Term("content", "aaa"), 1030); @@ -225,6 +227,7 @@ writer.addIndexesNoOptimize(new Directory[] {aux}); writer.optimize(); + writer.commit(); verifyNumDocs(dir, 1039); verifyTermDocs(dir, new Term("content", "aaa"), 1030); @@ -425,7 +428,7 @@ private IndexWriter newWriter(Directory dir, boolean create) throws IOException { - final IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), create); + final IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), create, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMergePolicy(new LogDocMergePolicy(writer)); return writer; } Index: tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestCrash.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestCrash.java (revision 823230) +++ tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestCrash.java (working copy) @@ -35,7 +35,7 @@ private IndexWriter initIndex(MockRAMDirectory dir) throws IOException { dir.setLockFactory(NoLockFactory.getNoLockFactory()); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer()); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); //writer.setMaxBufferedDocs(2); writer.setMaxBufferedDocs(10); ((ConcurrentMergeScheduler) writer.getMergeScheduler()).setSuppressExceptions(); Index: tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestThreadedOptimize.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestThreadedOptimize.java (revision 823230) +++ tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestThreadedOptimize.java (working copy) @@ -51,9 +51,9 @@ failed = true; } - public void runTest(Directory directory, boolean autoCommit, MergeScheduler merger) throws Exception { + public void runTest(Directory directory, MergeScheduler merger) throws Exception { - IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true); + IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(2); if (merger != null) writer.setMergeScheduler(merger); @@ -118,11 +118,9 @@ assertEquals(expectedDocCount, writer.docCount()); - if (!autoCommit) { - writer.close(); - writer = new IndexWriter(directory, autoCommit, ANALYZER, false); - writer.setMaxBufferedDocs(2); - } + writer.close(); + writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED); + writer.setMaxBufferedDocs(2); IndexReader reader = IndexReader.open(directory, true); assertTrue(reader.isOptimized()); @@ -138,10 +136,8 @@ */ public void testThreadedOptimize() throws Exception { Directory directory = new MockRAMDirectory(); - runTest(directory, false, new SerialMergeScheduler()); - runTest(directory, true, new SerialMergeScheduler()); - runTest(directory, false, new ConcurrentMergeScheduler()); - runTest(directory, true, new ConcurrentMergeScheduler()); + runTest(directory, new SerialMergeScheduler()); + runTest(directory, new ConcurrentMergeScheduler()); directory.close(); String tempDir = System.getProperty("tempDir"); @@ -150,10 +146,8 @@ String dirName = tempDir + "/luceneTestThreadedOptimize"; directory = FSDirectory.open(new File(dirName)); - runTest(directory, false, new SerialMergeScheduler()); - runTest(directory, true, new SerialMergeScheduler()); - runTest(directory, false, new ConcurrentMergeScheduler()); - runTest(directory, true, new ConcurrentMergeScheduler()); + runTest(directory, new SerialMergeScheduler()); + runTest(directory, new ConcurrentMergeScheduler()); directory.close(); _TestUtil.rmDir(dirName); } Index: tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (revision 823230) +++ tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (working copy) @@ -33,10 +33,12 @@ private static final Analyzer ANALYZER = new SimpleAnalyzer(); private static class FailOnlyOnFlush extends MockRAMDirectory.Failure { - boolean doFail = false; + boolean doFail; + boolean hitExc; public void setDoFail() { this.doFail = true; + hitExc = false; } public void clearDoFail() { this.doFail = false; @@ -47,6 +49,7 @@ StackTraceElement[] trace = new Exception().getStackTrace(); for (int i = 0; i < trace.length; i++) { if ("doFlush".equals(trace[i].getMethodName())) { + hitExc = true; //new RuntimeException().printStackTrace(System.out); throw new IOException("now failing during flush"); } @@ -63,33 +66,42 @@ FailOnlyOnFlush failure = new FailOnlyOnFlush(); directory.failOn(failure); - IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true); + IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); writer.setMergeScheduler(cms); writer.setMaxBufferedDocs(2); Document doc = new Document(); Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); doc.add(idField); + int extraCount = 0; for(int i=0;i<10;i++) { for(int j=0;j<20;j++) { idField.setValue(Integer.toString(i*20+j)); writer.addDocument(doc); } - writer.addDocument(doc); - - failure.setDoFail(); - try { - writer.flush(); - fail("failed to hit IOException"); - } catch (IOException ioe) { - failure.clearDoFail(); + // must cycle here because sometimes the merge flushes + // the doc we just added and so there's nothing to + // flush, and we don't hit the exception + while(true) { + writer.addDocument(doc); + failure.setDoFail(); + try { + writer.flush(); + if (failure.hitExc) { + fail("failed to hit IOException"); + } + extraCount++; + } catch (IOException ioe) { + failure.clearDoFail(); + break; + } } } writer.close(); IndexReader reader = IndexReader.open(directory, true); - assertEquals(200, reader.numDocs()); + assertEquals(200+extraCount, reader.numDocs()); reader.close(); directory.close(); } @@ -100,7 +112,7 @@ RAMDirectory directory = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true); + IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); writer.setMergeScheduler(cms); @@ -142,32 +154,28 @@ RAMDirectory directory = new MockRAMDirectory(); - for(int pass=0;pass<2;pass++) { + IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); - boolean autoCommit = pass==0; - IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true); + for(int iter=0;iter<7;iter++) { + ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); + writer.setMergeScheduler(cms); + writer.setMaxBufferedDocs(2); - for(int iter=0;iter<7;iter++) { - ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); - writer.setMergeScheduler(cms); - writer.setMaxBufferedDocs(2); - - for(int j=0;j<21;j++) { - Document doc = new Document(); - doc.add(new Field("content", "a b c", Field.Store.NO, Field.Index.ANALYZED)); - writer.addDocument(doc); - } + for(int j=0;j<21;j++) { + Document doc = new Document(); + doc.add(new Field("content", "a b c", Field.Store.NO, Field.Index.ANALYZED)); + writer.addDocument(doc); + } - writer.close(); - TestIndexWriter.assertNoUnreferencedFiles(directory, "testNoExtraFiles autoCommit=" + autoCommit); + writer.close(); + TestIndexWriter.assertNoUnreferencedFiles(directory, "testNoExtraFiles"); - // Reopen - writer = new IndexWriter(directory, autoCommit, ANALYZER, false); - } - - writer.close(); + // Reopen + writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED); } + writer.close(); + directory.close(); } @@ -178,45 +186,41 @@ Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); doc.add(idField); - for(int pass=0;pass<2;pass++) { - boolean autoCommit = pass==0; - IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true); + IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); - for(int iter=0;iter<10;iter++) { - ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); - writer.setMergeScheduler(cms); - writer.setMaxBufferedDocs(2); - writer.setMergeFactor(100); + for(int iter=0;iter<10;iter++) { + ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); + writer.setMergeScheduler(cms); + writer.setMaxBufferedDocs(2); + writer.setMergeFactor(100); - for(int j=0;j<201;j++) { - idField.setValue(Integer.toString(iter*201+j)); - writer.addDocument(doc); - } + for(int j=0;j<201;j++) { + idField.setValue(Integer.toString(iter*201+j)); + writer.addDocument(doc); + } - int delID = iter*201; - for(int j=0;j<20;j++) { - writer.deleteDocuments(new Term("id", Integer.toString(delID))); - delID += 5; - } + int delID = iter*201; + for(int j=0;j<20;j++) { + writer.deleteDocuments(new Term("id", Integer.toString(delID))); + delID += 5; + } - // Force a bunch of merge threads to kick off so we - // stress out aborting them on close: - writer.setMergeFactor(3); - writer.addDocument(doc); - writer.flush(); + // Force a bunch of merge threads to kick off so we + // stress out aborting them on close: + writer.setMergeFactor(3); + writer.addDocument(doc); + writer.flush(); - writer.close(false); + writer.close(false); - IndexReader reader = IndexReader.open(directory, true); - assertEquals((1+iter)*182, reader.numDocs()); - reader.close(); + IndexReader reader = IndexReader.open(directory, true); + assertEquals((1+iter)*182, reader.numDocs()); + reader.close(); - // Reopen - writer = new IndexWriter(directory, autoCommit, ANALYZER, false); - } - writer.close(); + // Reopen + writer = new IndexWriter(directory, ANALYZER, false, IndexWriter.MaxFieldLength.UNLIMITED); } - + writer.close(); directory.close(); } } Index: tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java (revision 823230) +++ tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java (working copy) @@ -125,7 +125,7 @@ public void testMaxBufferedDocsChange() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(101); writer.setMergeFactor(101); writer.setMergePolicy(new LogDocMergePolicy(writer)); @@ -139,7 +139,7 @@ } writer.close(); - writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), false); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(101); writer.setMergeFactor(101); writer.setMergePolicy(new LogDocMergePolicy(writer)); @@ -158,6 +158,9 @@ for (int i = 100; i < 1000; i++) { addDoc(writer); } + writer.commit(); + ((ConcurrentMergeScheduler) writer.getMergeScheduler()).sync(); + writer.commit(); checkInvariants(writer); writer.close(); @@ -167,7 +170,7 @@ public void testMergeDocCount0() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMergePolicy(new LogDocMergePolicy(writer)); writer.setMaxBufferedDocs(10); writer.setMergeFactor(100); @@ -182,7 +185,7 @@ reader.deleteDocuments(new Term("content", "aaa")); reader.close(); - writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), false); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMergePolicy(new LogDocMergePolicy(writer)); writer.setMaxBufferedDocs(10); writer.setMergeFactor(5); @@ -191,6 +194,9 @@ for (int i = 0; i < 10; i++) { addDoc(writer); } + writer.commit(); + ((ConcurrentMergeScheduler) writer.getMergeScheduler()).sync(); + writer.commit(); checkInvariants(writer); assertEquals(10, writer.docCount()); Index: tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestAtomicUpdate.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestAtomicUpdate.java (revision 823230) +++ tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestAtomicUpdate.java (working copy) @@ -33,8 +33,8 @@ public class MockIndexWriter extends IndexWriter { - public MockIndexWriter(Directory dir, boolean autoCommit, Analyzer a, boolean create) throws IOException { - super(dir, autoCommit, a, create); + public MockIndexWriter(Directory dir, Analyzer a, boolean create, IndexWriter.MaxFieldLength mfl) throws IOException { + super(dir, a, create, mfl); } boolean testPoint(String name) { @@ -125,7 +125,7 @@ TimedThread[] threads = new TimedThread[4]; - IndexWriter writer = new MockIndexWriter(directory, true, ANALYZER, true); + IndexWriter writer = new MockIndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(7); writer.setMergeFactor(3); Index: tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestIndexWriterDelete.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (revision 823230) +++ tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestIndexWriterDelete.java (working copy) @@ -40,237 +40,217 @@ "Venice has lots of canals" }; String[] text = { "Amsterdam", "Venice" }; - for(int pass=0;pass<2;pass++) { - boolean autoCommit = (0==pass); + Directory dir = new MockRAMDirectory(); + IndexWriter modifier = new IndexWriter(dir, + new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + modifier.setUseCompoundFile(true); + modifier.setMaxBufferedDeleteTerms(1); - Directory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true); - modifier.setUseCompoundFile(true); - modifier.setMaxBufferedDeleteTerms(1); + for (int i = 0; i < keywords.length; i++) { + Document doc = new Document(); + doc.add(new Field("id", keywords[i], Field.Store.YES, + Field.Index.NOT_ANALYZED)); + doc.add(new Field("country", unindexed[i], Field.Store.YES, + Field.Index.NO)); + doc.add(new Field("contents", unstored[i], Field.Store.NO, + Field.Index.ANALYZED)); + doc + .add(new Field("city", text[i], Field.Store.YES, + Field.Index.ANALYZED)); + modifier.addDocument(doc); + } + modifier.optimize(); + modifier.commit(); - for (int i = 0; i < keywords.length; i++) { - Document doc = new Document(); - doc.add(new Field("id", keywords[i], Field.Store.YES, - Field.Index.NOT_ANALYZED)); - doc.add(new Field("country", unindexed[i], Field.Store.YES, - Field.Index.NO)); - doc.add(new Field("contents", unstored[i], Field.Store.NO, - Field.Index.ANALYZED)); - doc - .add(new Field("city", text[i], Field.Store.YES, - Field.Index.ANALYZED)); - modifier.addDocument(doc); - } - modifier.optimize(); - modifier.commit(); + Term term = new Term("city", "Amsterdam"); + int hitCount = getHitCount(dir, term); + assertEquals(1, hitCount); + modifier.deleteDocuments(term); + modifier.commit(); + hitCount = getHitCount(dir, term); + assertEquals(0, hitCount); - Term term = new Term("city", "Amsterdam"); - int hitCount = getHitCount(dir, term); - assertEquals(1, hitCount); - modifier.deleteDocuments(term); - modifier.commit(); - hitCount = getHitCount(dir, term); - assertEquals(0, hitCount); + modifier.close(); + dir.close(); - modifier.close(); - dir.close(); - } } // test when delete terms only apply to disk segments public void testNonRAMDelete() throws IOException { - for(int pass=0;pass<2;pass++) { - boolean autoCommit = (0==pass); - Directory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true); - modifier.setMaxBufferedDocs(2); - modifier.setMaxBufferedDeleteTerms(2); + Directory dir = new MockRAMDirectory(); + IndexWriter modifier = new IndexWriter(dir, + new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + modifier.setMaxBufferedDocs(2); + modifier.setMaxBufferedDeleteTerms(2); - int id = 0; - int value = 100; + int id = 0; + int value = 100; - for (int i = 0; i < 7; i++) { - addDoc(modifier, ++id, value); - } - modifier.commit(); + for (int i = 0; i < 7; i++) { + addDoc(modifier, ++id, value); + } + modifier.commit(); - assertEquals(0, modifier.getNumBufferedDocuments()); - assertTrue(0 < modifier.getSegmentCount()); + assertEquals(0, modifier.getNumBufferedDocuments()); + assertTrue(0 < modifier.getSegmentCount()); - modifier.commit(); + modifier.commit(); - IndexReader reader = IndexReader.open(dir, true); - assertEquals(7, reader.numDocs()); - reader.close(); + IndexReader reader = IndexReader.open(dir, true); + assertEquals(7, reader.numDocs()); + reader.close(); - modifier.deleteDocuments(new Term("value", String.valueOf(value))); + modifier.deleteDocuments(new Term("value", String.valueOf(value))); - modifier.commit(); + modifier.commit(); - reader = IndexReader.open(dir, true); - assertEquals(0, reader.numDocs()); - reader.close(); - modifier.close(); - dir.close(); - } + reader = IndexReader.open(dir, true); + assertEquals(0, reader.numDocs()); + reader.close(); + modifier.close(); + dir.close(); } public void testMaxBufferedDeletes() throws IOException { - for(int pass=0;pass<2;pass++) { - boolean autoCommit = (0==pass); - Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true); - writer.setMaxBufferedDeleteTerms(1); - writer.deleteDocuments(new Term("foobar", "1")); - writer.deleteDocuments(new Term("foobar", "1")); - writer.deleteDocuments(new Term("foobar", "1")); - assertEquals(3, writer.getFlushDeletesCount()); - writer.close(); - dir.close(); - } + Directory dir = new MockRAMDirectory(); + IndexWriter writer = new IndexWriter(dir, + new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + writer.setMaxBufferedDeleteTerms(1); + writer.deleteDocuments(new Term("foobar", "1")); + writer.deleteDocuments(new Term("foobar", "1")); + writer.deleteDocuments(new Term("foobar", "1")); + assertEquals(3, writer.getFlushDeletesCount()); + writer.close(); + dir.close(); } // test when delete terms only apply to ram segments public void testRAMDeletes() throws IOException { - for(int pass=0;pass<2;pass++) { - for(int t=0;t<2;t++) { - boolean autoCommit = (0==pass); - Directory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true); - modifier.setMaxBufferedDocs(4); - modifier.setMaxBufferedDeleteTerms(4); + for(int t=0;t<2;t++) { + Directory dir = new MockRAMDirectory(); + IndexWriter modifier = new IndexWriter(dir, + new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + modifier.setMaxBufferedDocs(4); + modifier.setMaxBufferedDeleteTerms(4); - int id = 0; - int value = 100; + int id = 0; + int value = 100; - addDoc(modifier, ++id, value); - if (0 == t) - modifier.deleteDocuments(new Term("value", String.valueOf(value))); - else - modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value)))); - addDoc(modifier, ++id, value); - if (0 == t) { - modifier.deleteDocuments(new Term("value", String.valueOf(value))); - assertEquals(2, modifier.getNumBufferedDeleteTerms()); - assertEquals(1, modifier.getBufferedDeleteTermsSize()); - } - else - modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value)))); + addDoc(modifier, ++id, value); + if (0 == t) + modifier.deleteDocuments(new Term("value", String.valueOf(value))); + else + modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value)))); + addDoc(modifier, ++id, value); + if (0 == t) { + modifier.deleteDocuments(new Term("value", String.valueOf(value))); + assertEquals(2, modifier.getNumBufferedDeleteTerms()); + assertEquals(1, modifier.getBufferedDeleteTermsSize()); + } + else + modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value)))); - addDoc(modifier, ++id, value); - assertEquals(0, modifier.getSegmentCount()); - modifier.flush(); + addDoc(modifier, ++id, value); + assertEquals(0, modifier.getSegmentCount()); + modifier.flush(); - modifier.commit(); + modifier.commit(); - IndexReader reader = IndexReader.open(dir, true); - assertEquals(1, reader.numDocs()); + IndexReader reader = IndexReader.open(dir, true); + assertEquals(1, reader.numDocs()); - int hitCount = getHitCount(dir, new Term("id", String.valueOf(id))); - assertEquals(1, hitCount); - reader.close(); - modifier.close(); - dir.close(); - } + int hitCount = getHitCount(dir, new Term("id", String.valueOf(id))); + assertEquals(1, hitCount); + reader.close(); + modifier.close(); + dir.close(); } } // test when delete terms apply to both disk and ram segments public void testBothDeletes() throws IOException { - for(int pass=0;pass<2;pass++) { - boolean autoCommit = (0==pass); - Directory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true); - modifier.setMaxBufferedDocs(100); - modifier.setMaxBufferedDeleteTerms(100); + Directory dir = new MockRAMDirectory(); + IndexWriter modifier = new IndexWriter(dir, + new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + modifier.setMaxBufferedDocs(100); + modifier.setMaxBufferedDeleteTerms(100); - int id = 0; - int value = 100; + int id = 0; + int value = 100; - for (int i = 0; i < 5; i++) { - addDoc(modifier, ++id, value); - } + for (int i = 0; i < 5; i++) { + addDoc(modifier, ++id, value); + } - value = 200; - for (int i = 0; i < 5; i++) { - addDoc(modifier, ++id, value); - } - modifier.commit(); + value = 200; + for (int i = 0; i < 5; i++) { + addDoc(modifier, ++id, value); + } + modifier.commit(); - for (int i = 0; i < 5; i++) { - addDoc(modifier, ++id, value); - } - modifier.deleteDocuments(new Term("value", String.valueOf(value))); + for (int i = 0; i < 5; i++) { + addDoc(modifier, ++id, value); + } + modifier.deleteDocuments(new Term("value", String.valueOf(value))); - modifier.commit(); + modifier.commit(); - IndexReader reader = IndexReader.open(dir, true); - assertEquals(5, reader.numDocs()); - modifier.close(); - } + IndexReader reader = IndexReader.open(dir, true); + assertEquals(5, reader.numDocs()); + modifier.close(); } // test that batched delete terms are flushed together public void testBatchDeletes() throws IOException { - for(int pass=0;pass<2;pass++) { - boolean autoCommit = (0==pass); - Directory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true); - modifier.setMaxBufferedDocs(2); - modifier.setMaxBufferedDeleteTerms(2); + Directory dir = new MockRAMDirectory(); + IndexWriter modifier = new IndexWriter(dir, + new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + modifier.setMaxBufferedDocs(2); + modifier.setMaxBufferedDeleteTerms(2); - int id = 0; - int value = 100; + int id = 0; + int value = 100; - for (int i = 0; i < 7; i++) { - addDoc(modifier, ++id, value); - } - modifier.commit(); + for (int i = 0; i < 7; i++) { + addDoc(modifier, ++id, value); + } + modifier.commit(); - IndexReader reader = IndexReader.open(dir, true); - assertEquals(7, reader.numDocs()); - reader.close(); + IndexReader reader = IndexReader.open(dir, true); + assertEquals(7, reader.numDocs()); + reader.close(); - id = 0; - modifier.deleteDocuments(new Term("id", String.valueOf(++id))); - modifier.deleteDocuments(new Term("id", String.valueOf(++id))); + id = 0; + modifier.deleteDocuments(new Term("id", String.valueOf(++id))); + modifier.deleteDocuments(new Term("id", String.valueOf(++id))); - modifier.commit(); + modifier.commit(); - reader = IndexReader.open(dir, true); - assertEquals(5, reader.numDocs()); - reader.close(); + reader = IndexReader.open(dir, true); + assertEquals(5, reader.numDocs()); + reader.close(); - Term[] terms = new Term[3]; - for (int i = 0; i < terms.length; i++) { - terms[i] = new Term("id", String.valueOf(++id)); - } - modifier.deleteDocuments(terms); - modifier.commit(); - reader = IndexReader.open(dir, true); - assertEquals(2, reader.numDocs()); - reader.close(); - - modifier.close(); - dir.close(); + Term[] terms = new Term[3]; + for (int i = 0; i < terms.length; i++) { + terms[i] = new Term("id", String.valueOf(++id)); } + modifier.deleteDocuments(terms); + modifier.commit(); + reader = IndexReader.open(dir, true); + assertEquals(2, reader.numDocs()); + reader.close(); + + modifier.close(); + dir.close(); } // test deleteAll() public void testDeleteAll() throws IOException { - for (int pass=0;pass<2;pass++) { - boolean autoCommit = (0==pass); Directory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true); + IndexWriter modifier = new IndexWriter(dir, + new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setMaxBufferedDocs(2); modifier.setMaxBufferedDeleteTerms(2); @@ -311,14 +291,13 @@ modifier.close(); dir.close(); - } } // test rollback of deleteAll() public void testDeleteAllRollback() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, false, - new WhitespaceAnalyzer(), true); + IndexWriter modifier = new IndexWriter(dir, + new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setMaxBufferedDocs(2); modifier.setMaxBufferedDeleteTerms(2); @@ -355,8 +334,8 @@ // test deleteAll() w/ near real-time reader public void testDeleteAllNRT() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, false, - new WhitespaceAnalyzer(), true); + IndexWriter modifier = new IndexWriter(dir, + new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setMaxBufferedDocs(2); modifier.setMaxBufferedDeleteTerms(2); @@ -445,13 +424,10 @@ int START_COUNT = 157; int END_COUNT = 144; - for(int pass=0;pass<2;pass++) { - boolean autoCommit = (0==pass); - // First build up a starting index: MockRAMDirectory startDir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(startDir, autoCommit, - new WhitespaceAnalyzer(), true); + IndexWriter writer = new IndexWriter(startDir, + new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); for (int i = 0; i < 157; i++) { Document d = new Document(); d.add(new Field("id", Integer.toString(i), Field.Store.YES, @@ -473,8 +449,8 @@ while (!done) { MockRAMDirectory dir = new MockRAMDirectory(startDir); dir.setPreventDoubleWrite(false); - IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer()); + IndexWriter modifier = new IndexWriter(dir, + new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); modifier.setMaxBufferedDocs(1000); // use flush or close modifier.setMaxBufferedDeleteTerms(1000); // use flush or close @@ -626,7 +602,6 @@ // Try again with 10 more bytes of free space: diskFree += 10; } - } } // This test tests that buffered deletes are cleared when @@ -677,11 +652,9 @@ "Venice has lots of canals" }; String[] text = { "Amsterdam", "Venice" }; - for(int pass=0;pass<2;pass++) { - boolean autoCommit = (0==pass); MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true); + IndexWriter modifier = new IndexWriter(dir, + new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setUseCompoundFile(true); modifier.setMaxBufferedDeleteTerms(2); @@ -757,7 +730,6 @@ modifier.close(); dir.close(); - } } // This test tests that the files created by the docs writer before @@ -787,11 +759,9 @@ "Venice has lots of canals" }; String[] text = { "Amsterdam", "Venice" }; - for(int pass=0;pass<2;pass++) { - boolean autoCommit = (0==pass); MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true); + IndexWriter modifier = new IndexWriter(dir, + new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); dir.failOn(failure.reset()); @@ -825,9 +795,6 @@ } modifier.close(); - - } - } private String arrayToString(String[] l) { Index: tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestStressIndexing.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestStressIndexing.java (revision 823230) +++ tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestStressIndexing.java (working copy) @@ -115,8 +115,8 @@ Run one indexer and 2 searchers against single index as stress test. */ - public void runStressTest(Directory directory, boolean autoCommit, MergeScheduler mergeScheduler) throws Exception { - IndexWriter modifier = new IndexWriter(directory, autoCommit, ANALYZER, true); + public void runStressTest(Directory directory, MergeScheduler mergeScheduler) throws Exception { + IndexWriter modifier = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.setMaxBufferedDocs(10); @@ -166,35 +166,20 @@ public void testStressIndexAndSearching() throws Exception { RANDOM = newRandom(); - // RAMDir - Directory directory = new MockRAMDirectory(); - runStressTest(directory, true, null); - directory.close(); - - // FSDir + // With ConcurrentMergeScheduler, in FSDir File dirPath = _TestUtil.getTempDir("lucene.test.stress"); - directory = FSDirectory.open(dirPath); - runStressTest(directory, true, null); + Directory directory = FSDirectory.open(dirPath); + runStressTest(directory, new ConcurrentMergeScheduler()); directory.close(); - // With ConcurrentMergeScheduler, in RAMDir - directory = new MockRAMDirectory(); - runStressTest(directory, true, new ConcurrentMergeScheduler()); - directory.close(); - - // With ConcurrentMergeScheduler, in FSDir - directory = FSDirectory.open(dirPath); - runStressTest(directory, true, new ConcurrentMergeScheduler()); - directory.close(); - // With ConcurrentMergeScheduler and autoCommit=false, in RAMDir directory = new MockRAMDirectory(); - runStressTest(directory, false, new ConcurrentMergeScheduler()); + runStressTest(directory, new ConcurrentMergeScheduler()); directory.close(); // With ConcurrentMergeScheduler and autoCommit=false, in FSDir directory = FSDirectory.open(dirPath); - runStressTest(directory, false, new ConcurrentMergeScheduler()); + runStressTest(directory, new ConcurrentMergeScheduler()); directory.close(); _TestUtil.rmDir(dirPath); Index: tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestDeletionPolicy.java =================================================================== --- tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestDeletionPolicy.java (revision 823230) +++ tags/lucene_2_9_back_compat_tests_20091007c/src/test/org/apache/lucene/index/TestDeletionPolicy.java (working copy) @@ -202,12 +202,11 @@ final double SECONDS = 2.0; - boolean autoCommit = false; boolean useCompoundFile = true; Directory dir = new RAMDirectory(); ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS); - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setUseCompoundFile(useCompoundFile); writer.close(); @@ -216,7 +215,7 @@ // Record last time when writer performed deletes of // past commits lastDeleteTime = System.currentTimeMillis(); - writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setUseCompoundFile(useCompoundFile); for(int j=0;j<17;j++) { addDoc(writer); @@ -267,10 +266,9 @@ */ public void testKeepAllDeletionPolicy() throws IOException { - for(int pass=0;pass<4;pass++) { + for(int pass=0;pass<2;pass++) { - boolean autoCommit = pass < 2; - boolean useCompoundFile = (pass % 2) > 0; + boolean useCompoundFile = (pass % 2) != 0; // Never deletes a commit KeepAllDeletionPolicy policy = new KeepAllDeletionPolicy(); @@ -278,37 +276,29 @@ Directory dir = new RAMDirectory(); policy.dir = dir; - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(10); writer.setUseCompoundFile(useCompoundFile); writer.setMergeScheduler(new SerialMergeScheduler()); for(int i=0;i<107;i++) { addDoc(writer); - if (autoCommit && i%10 == 0) - writer.commit(); } writer.close(); - writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setUseCompoundFile(useCompoundFile); writer.optimize(); writer.close(); assertEquals(2, policy.numOnInit); - if (!autoCommit) - // If we are not auto committing then there should - // be exactly 2 commits (one per close above): - assertEquals(2, policy.numOnCommit); + // If we are not auto committing then there should + // be exactly 2 commits (one per close above): + assertEquals(2, policy.numOnCommit); // Test listCommits Collection commits = IndexReader.listCommits(dir); - if (!autoCommit) - // 1 from opening writer + 2 from closing writer - assertEquals(3, commits.size()); - else - // 1 from opening writer + 2 from closing writer + - // 11 from calling writer.commit() explicitly above - assertEquals(14, commits.size()); + // 1 from opening writer + 2 from closing writer + assertEquals(3, commits.size()); Iterator it = commits.iterator(); // Make sure we can open a reader on each commit: @@ -453,16 +443,15 @@ */ public void testKeepNoneOnInitDeletionPolicy() throws IOException { - for(int pass=0;pass<4;pass++) { + for(int pass=0;pass<2;pass++) { - boolean autoCommit = pass < 2; - boolean useCompoundFile = (pass % 2) > 0; + boolean useCompoundFile = (pass % 2) != 0; KeepNoneOnInitDeletionPolicy policy = new KeepNoneOnInitDeletionPolicy(); Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(10); writer.setUseCompoundFile(useCompoundFile); for(int i=0;i<107;i++) { @@ -470,16 +459,15 @@ } writer.close(); - writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setUseCompoundFile(useCompoundFile); writer.optimize(); writer.close(); assertEquals(2, policy.numOnInit); - if (!autoCommit) - // If we are not auto committing then there should - // be exactly 2 commits (one per close above): - assertEquals(2, policy.numOnCommit); + // If we are not auto committing then there should + // be exactly 2 commits (one per close above): + assertEquals(2, policy.numOnCommit); // Simplistic check: just verify the index is in fact // readable: @@ -497,17 +485,16 @@ final int N = 5; - for(int pass=0;pass<4;pass++) { + for(int pass=0;pass<2;pass++) { - boolean autoCommit = pass < 2; - boolean useCompoundFile = (pass % 2) > 0; + boolean useCompoundFile = (pass % 2) != 0; Directory dir = new RAMDirectory(); KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N); for(int j=0;j 0); assertEquals(N+1, policy.numOnInit); - if (autoCommit) { - assertTrue(policy.numOnCommit > 1); - } else { - assertEquals(N+1, policy.numOnCommit); - } + assertEquals(N+1, policy.numOnCommit); // Simplistic check: just verify only the past N segments_N's still // exist, and, I can open a reader on each: @@ -559,22 +542,21 @@ final int N = 10; - for(int pass=0;pass<4;pass++) { + for(int pass=0;pass<2;pass++) { - boolean autoCommit = pass < 2; - boolean useCompoundFile = (pass % 2) > 0; + boolean useCompoundFile = (pass % 2) != 0; KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N); Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setUseCompoundFile(useCompoundFile); writer.close(); Term searchTerm = new Term("content", "aaa"); Query query = new TermQuery(searchTerm); for(int i=0;i 1) { - if (i % 2 == 0) { - expectedCount += 1; - } else { - expectedCount -= 17; - } + searcher = new IndexSearcher(reader); + hits = searcher.search(query, null, 1000).scoreDocs; + if (i > 1) { + if (i % 2 == 0) { + expectedCount += 1; + } else { + expectedCount -= 17; } - assertEquals(expectedCount, hits.length); - searcher.close(); } + assertEquals(expectedCount, hits.length); + searcher.close(); reader.close(); if (i == N) { fail("should have failed on commits before last 5"); @@ -659,15 +638,14 @@ final int N = 10; - for(int pass=0;pass<4;pass++) { + for(int pass=0;pass<2;pass++) { - boolean autoCommit = pass < 2; - boolean useCompoundFile = (pass % 2) > 0; + boolean useCompoundFile = (pass % 2) != 0; KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N); Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.UNLIMITED); writer.setMaxBufferedDocs(10); writer.setUseCompoundFile(useCompoundFile); writer.close(); @@ -676,7 +654,7 @@ for(int i=0;ipath, replacing the index already there, if any. * - * @param path the path to the index directory - * @param a the analyzer to use - * @param create true to create the index or overwrite - * the existing one; false to append to the existing - * index - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be read/written to, or - * if it does not exist and create is - * false or if there is any other low-level - * IO error - * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link - * #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)} - * instead, and call {@link #commit()} when needed. - */ - public IndexWriter(String path, Analyzer a, boolean create) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(FSDirectory.getDirectory(path), a, create, true, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null); - } - - /** - * Constructs an IndexWriter for the index in path. - * Text will be analyzed with a. If create - * is true, then a new, empty index will be created in - * path, replacing the index already there, if any. - * *

    NOTE: autoCommit (see above) is set to false with this * constructor. @@ -1008,35 +979,6 @@ } /** - * Constructs an IndexWriter for the index in path. - * Text will be analyzed with a. If create - * is true, then a new, empty index will be created in - * path, replacing the index already there, if any. - * - * @param path the path to the index directory - * @param a the analyzer to use - * @param create true to create the index or overwrite - * the existing one; false to append to the existing - * index - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be read/written to, or - * if it does not exist and create is - * false or if there is any other low-level - * IO error - * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link - * #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)} - * instead, and call {@link #commit()} when needed. - */ - public IndexWriter(File path, Analyzer a, boolean create) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(FSDirectory.getDirectory(path), a, create, true, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null); - } - - /** * Constructs an IndexWriter for the index in d. * Text will be analyzed with a. If create * is true, then a new, empty index will be created in @@ -1068,34 +1010,6 @@ } /** - * Constructs an IndexWriter for the index in d. - * Text will be analyzed with a. If create - * is true, then a new, empty index will be created in - * d, replacing the index already there, if any. - * - * @param d the index directory - * @param a the analyzer to use - * @param create true to create the index or overwrite - * the existing one; false to append to the existing - * index - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be read/written to, or - * if it does not exist and create is - * false or if there is any other low-level - * IO error - * @deprecated This constructor will be removed in the 3.0 - * release, and call {@link #commit()} when needed. - * Use {@link #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)} instead. - */ - public IndexWriter(Directory d, Analyzer a, boolean create) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, create, false, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null); - } - - /** * Constructs an IndexWriter for the index in * path, first creating it if it does not * already exist. Text will be analyzed with @@ -1129,30 +1043,6 @@ * already exist. Text will be analyzed with * a. * - * @param path the path to the index directory - * @param a the analyzer to use - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be - * read/written to or if there is any other low-level - * IO error - * @deprecated This constructor will be removed in the 3.0 - * release, and call {@link #commit()} when needed. - * Use {@link #IndexWriter(Directory,Analyzer,MaxFieldLength)} instead. - */ - public IndexWriter(String path, Analyzer a) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(FSDirectory.getDirectory(path), a, true, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null); - } - - /** - * Constructs an IndexWriter for the index in - * path, first creating it if it does not - * already exist. Text will be analyzed with - * a. - * *

    NOTE: autoCommit (see above) is set to false with this * constructor. @@ -1178,30 +1068,6 @@ /** * Constructs an IndexWriter for the index in - * path, first creating it if it does not - * already exist. Text will be analyzed with - * a. - * - * @param path the path to the index directory - * @param a the analyzer to use - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be - * read/written to or if there is any other low-level - * IO error - * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link #IndexWriter(Directory,Analyzer,MaxFieldLength)} - * instead, and call {@link #commit()} when needed. - */ - public IndexWriter(File path, Analyzer a) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(FSDirectory.getDirectory(path), a, true, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null); - } - - /** - * Constructs an IndexWriter for the index in * d, first creating it if it does not * already exist. Text will be analyzed with * a. @@ -1228,87 +1094,6 @@ } /** - * Constructs an IndexWriter for the index in - * d, first creating it if it does not - * already exist. Text will be analyzed with - * a. - * - * @param d the index directory - * @param a the analyzer to use - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be - * read/written to or if there is any other low-level - * IO error - * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link - * #IndexWriter(Directory,Analyzer,MaxFieldLength)} - * instead, and call {@link #commit()} when needed. - */ - public IndexWriter(Directory d, Analyzer a) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, false, null, true, DEFAULT_MAX_FIELD_LENGTH, null, null); - } - - /** - * Constructs an IndexWriter for the index in - * d, first creating it if it does not - * already exist. Text will be analyzed with - * a. - * - * @param d the index directory - * @param autoCommit see above - * @param a the analyzer to use - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be - * read/written to or if there is any other low-level - * IO error - * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link - * #IndexWriter(Directory,Analyzer,MaxFieldLength)} - * instead, and call {@link #commit()} when needed. - */ - public IndexWriter(Directory d, boolean autoCommit, Analyzer a) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, false, null, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null); - } - - /** - * Constructs an IndexWriter for the index in d. - * Text will be analyzed with a. If create - * is true, then a new, empty index will be created in - * d, replacing the index already there, if any. - * - * @param d the index directory - * @param autoCommit see above - * @param a the analyzer to use - * @param create true to create the index or overwrite - * the existing one; false to append to the existing - * index - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be read/written to, or - * if it does not exist and create is - * false or if there is any other low-level - * IO error - * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link - * #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)} - * instead, and call {@link #commit()} when needed. - */ - public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, create, false, null, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null); - } - - /** * Expert: constructs an IndexWriter with a custom {@link * IndexDeletionPolicy}, for the index in d, * first creating it if it does not already exist. Text @@ -1337,33 +1122,6 @@ /** * Expert: constructs an IndexWriter with a custom {@link - * IndexDeletionPolicy}, for the index in d, - * first creating it if it does not already exist. Text - * will be analyzed with a. - * - * @param d the index directory - * @param autoCommit see above - * @param a the analyzer to use - * @param deletionPolicy see above - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be - * read/written to or if there is any other low-level - * IO error - * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link - * #IndexWriter(Directory,Analyzer,IndexDeletionPolicy,MaxFieldLength)} - * instead, and call {@link #commit()} when needed. - */ - public IndexWriter(Directory d, boolean autoCommit, Analyzer a, IndexDeletionPolicy deletionPolicy) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, false, deletionPolicy, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null); - } - - /** - * Expert: constructs an IndexWriter with a custom {@link * IndexDeletionPolicy}, for the index in d. * Text will be analyzed with a. If * create is true, then a new, empty index @@ -1433,39 +1191,6 @@ } /** - * Expert: constructs an IndexWriter with a custom {@link - * IndexDeletionPolicy}, for the index in d. - * Text will be analyzed with a. If - * create is true, then a new, empty index - * will be created in d, replacing the index - * already there, if any. - * - * @param d the index directory - * @param autoCommit see above - * @param a the analyzer to use - * @param create true to create the index or overwrite - * the existing one; false to append to the existing - * index - * @param deletionPolicy see above - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be read/written to, or - * if it does not exist and create is - * false or if there is any other low-level - * IO error - * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link - * #IndexWriter(Directory,Analyzer,boolean,IndexDeletionPolicy,MaxFieldLength)} - * instead, and call {@link #commit()} when needed. - */ - public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, create, false, deletionPolicy, autoCommit, DEFAULT_MAX_FIELD_LENGTH, null, null); - } - - /** * Expert: constructs an IndexWriter on specific commit * point, with a custom {@link IndexDeletionPolicy}, for * the index in d. Text will be analyzed