Index: CHANGES.txt =================================================================== --- CHANGES.txt (revision 698225) +++ CHANGES.txt (working copy) @@ -67,7 +67,8 @@ 1. LUCENE-1084: Changed all IndexWriter constructors to take an explicit parameter for maximum field size. Deprecated all the pre-existing constructors; these will be removed in release 3.0. - (Steven Rowe via Mike McCandless) + NOTE: these new constructors set autoCommit to false. (Steven + Rowe via Mike McCandless) 2. LUCENE-584: Changed Filter API to return a DocIdSet instead of a java.util.BitSet. This allows using more efficient data structures Index: src/test/org/apache/lucene/TestMergeSchedulerExternal.java =================================================================== --- src/test/org/apache/lucene/TestMergeSchedulerExternal.java (revision 698225) +++ src/test/org/apache/lucene/TestMergeSchedulerExternal.java (working copy) @@ -91,7 +91,7 @@ Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); doc.add(idField); - IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); MyMergeScheduler ms = new MyMergeScheduler(); writer.setMergeScheduler(ms); writer.setMaxBufferedDocs(2); Index: src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java =================================================================== --- src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java (revision 698225) +++ src/test/org/apache/lucene/TestSnapshotDeletionPolicy.java (working copy) @@ -67,8 +67,7 @@ Directory dir = new MockRAMDirectory(); SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); - IndexWriter writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp, - IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, true,new StandardAnalyzer(), dp); // Force frequent commits writer.setMaxBufferedDocs(2); Document doc = new Document(); @@ -80,8 +79,7 @@ writer.close(); copyFiles(dir, cp); - writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp, - IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp); copyFiles(dir, cp); for(int i=0;i<7;i++) writer.addDocument(doc); @@ -89,8 +87,7 @@ writer.close(); copyFiles(dir, cp); dp.release(); - writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp, - IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp); writer.close(); try { copyFiles(dir, cp); @@ -106,8 +103,7 @@ final long stopTime = System.currentTimeMillis() + 7000; SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); - final IndexWriter writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp, - IndexWriter.MaxFieldLength.LIMITED); + final IndexWriter writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp); // Force frequent commits writer.setMaxBufferedDocs(2); Index: src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java =================================================================== --- src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (revision 698225) +++ src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java (working copy) @@ -63,7 +63,7 @@ FailOnlyOnFlush failure = new FailOnlyOnFlush(); directory.failOn(failure); - IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true); ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); writer.setMergeScheduler(cms); writer.setMaxBufferedDocs(2); @@ -100,7 +100,7 @@ RAMDirectory directory = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(directory, ANALYZER, true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true); ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); writer.setMergeScheduler(cms); @@ -145,7 +145,7 @@ for(int pass=0;pass<2;pass++) { boolean autoCommit = pass==0; - IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true); for(int iter=0;iter<7;iter++) { ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); @@ -162,7 +162,7 @@ TestIndexWriter.assertNoUnreferencedFiles(directory, "testNoExtraFiles autoCommit=" + autoCommit); // Reopen - writer = new IndexWriter(directory, autoCommit, ANALYZER, false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(directory, autoCommit, ANALYZER, false); } writer.close(); @@ -180,7 +180,7 @@ for(int pass=0;pass<2;pass++) { boolean autoCommit = pass==0; - IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true); for(int iter=0;iter<10;iter++) { ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); @@ -212,7 +212,7 @@ reader.close(); // Reopen - writer = new IndexWriter(directory, autoCommit, ANALYZER, false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(directory, autoCommit, ANALYZER, false); } writer.close(); } Index: src/test/org/apache/lucene/index/TestThreadedOptimize.java =================================================================== --- src/test/org/apache/lucene/index/TestThreadedOptimize.java (revision 698225) +++ src/test/org/apache/lucene/index/TestThreadedOptimize.java (working copy) @@ -53,7 +53,7 @@ public void runTest(Directory directory, boolean autoCommit, MergeScheduler merger) throws Exception { - IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true); writer.setMaxBufferedDocs(2); if (merger != null) writer.setMergeScheduler(merger); @@ -120,7 +120,7 @@ if (!autoCommit) { writer.close(); - writer = new IndexWriter(directory, autoCommit, ANALYZER, false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(directory, autoCommit, ANALYZER, false); writer.setMaxBufferedDocs(2); } Index: src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java (revision 698225) +++ src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java (working copy) @@ -125,7 +125,7 @@ public void testMaxBufferedDocsChange() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true); writer.setMaxBufferedDocs(101); writer.setMergeFactor(101); writer.setMergePolicy(new LogDocMergePolicy()); @@ -139,7 +139,7 @@ } writer.close(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), false); writer.setMaxBufferedDocs(101); writer.setMergeFactor(101); writer.setMergePolicy(new LogDocMergePolicy()); @@ -167,7 +167,7 @@ public void testMergeDocCount0() throws IOException { Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true); writer.setMergePolicy(new LogDocMergePolicy()); writer.setMaxBufferedDocs(10); writer.setMergeFactor(100); @@ -182,7 +182,7 @@ reader.deleteDocuments(new Term("content", "aaa")); reader.close(); - writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), false); writer.setMergePolicy(new LogDocMergePolicy()); writer.setMaxBufferedDocs(10); writer.setMergeFactor(5); Index: src/test/org/apache/lucene/index/TestAtomicUpdate.java =================================================================== --- src/test/org/apache/lucene/index/TestAtomicUpdate.java (revision 698225) +++ src/test/org/apache/lucene/index/TestAtomicUpdate.java (working copy) @@ -33,8 +33,8 @@ public class MockIndexWriter extends IndexWriter { - public MockIndexWriter(Directory dir, boolean autoCommit, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException { - super(dir, autoCommit, a, create, mfl); + public MockIndexWriter(Directory dir, boolean autoCommit, Analyzer a, boolean create) throws IOException { + super(dir, autoCommit, a, create); } boolean testPoint(String name) { @@ -125,7 +125,7 @@ TimedThread[] threads = new TimedThread[4]; - IndexWriter writer = new MockIndexWriter(directory, true, ANALYZER, true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new MockIndexWriter(directory, true, ANALYZER, true); writer.setMaxBufferedDocs(7); writer.setMergeFactor(3); Index: src/test/org/apache/lucene/index/TestIndexWriterDelete.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexWriterDelete.java (revision 698225) +++ src/test/org/apache/lucene/index/TestIndexWriterDelete.java (working copy) @@ -45,7 +45,7 @@ Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + new WhitespaceAnalyzer(), true); modifier.setUseCompoundFile(true); modifier.setMaxBufferedDeleteTerms(1); @@ -85,7 +85,7 @@ Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + new WhitespaceAnalyzer(), true); modifier.setMaxBufferedDocs(2); modifier.setMaxBufferedDeleteTerms(2); @@ -123,7 +123,7 @@ boolean autoCommit = (0==pass); Directory dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + new WhitespaceAnalyzer(), true); writer.setMaxBufferedDeleteTerms(1); writer.deleteDocuments(new Term("foobar", "1")); writer.deleteDocuments(new Term("foobar", "1")); @@ -141,7 +141,7 @@ boolean autoCommit = (0==pass); Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + new WhitespaceAnalyzer(), true); modifier.setMaxBufferedDocs(4); modifier.setMaxBufferedDeleteTerms(4); @@ -187,7 +187,7 @@ Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + new WhitespaceAnalyzer(), true); modifier.setMaxBufferedDocs(100); modifier.setMaxBufferedDeleteTerms(100); @@ -223,7 +223,7 @@ boolean autoCommit = (0==pass); Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + new WhitespaceAnalyzer(), true); modifier.setMaxBufferedDocs(2); modifier.setMaxBufferedDeleteTerms(2); @@ -307,7 +307,7 @@ // First build up a starting index: MockRAMDirectory startDir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(startDir, autoCommit, - new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + new WhitespaceAnalyzer(), true); for (int i = 0; i < 157; i++) { Document d = new Document(); d.add(new Field("id", Integer.toString(i), Field.Store.YES, @@ -330,7 +330,7 @@ MockRAMDirectory dir = new MockRAMDirectory(startDir); dir.setPreventDoubleWrite(false); IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + new WhitespaceAnalyzer()); modifier.setMaxBufferedDocs(1000); // use flush or close modifier.setMaxBufferedDeleteTerms(1000); // use flush or close @@ -537,7 +537,7 @@ boolean autoCommit = (0==pass); MockRAMDirectory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + new WhitespaceAnalyzer(), true); modifier.setUseCompoundFile(true); modifier.setMaxBufferedDeleteTerms(2); @@ -647,7 +647,7 @@ boolean autoCommit = (0==pass); MockRAMDirectory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, autoCommit, - new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + new WhitespaceAnalyzer(), true); dir.failOn(failure.reset()); Index: src/test/org/apache/lucene/index/TestStressIndexing2.java =================================================================== --- src/test/org/apache/lucene/index/TestStressIndexing2.java (revision 698225) +++ src/test/org/apache/lucene/index/TestStressIndexing2.java (working copy) @@ -40,8 +40,8 @@ public class MockIndexWriter extends IndexWriter { - public MockIndexWriter(Directory dir, boolean autoCommit, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException { - super(dir, autoCommit, a, create, mfl); + public MockIndexWriter(Directory dir, boolean autoCommit, Analyzer a, boolean create) throws IOException { + super(dir, autoCommit, a, create); } boolean testPoint(String name) { @@ -103,7 +103,7 @@ public Map indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException { Map docs = new HashMap(); for(int iter=0;iter<3;iter++) { - IndexWriter w = new MockIndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new MockIndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); w.setUseCompoundFile(false); /*** Index: src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexWriter.java (revision 698225) +++ src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -104,7 +104,7 @@ reader.close(); // optimize the index and check that the new doc count is correct - writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, true, new WhitespaceAnalyzer()); assertEquals(100, writer.maxDoc()); assertEquals(60, writer.numDocs()); writer.optimize(); @@ -241,7 +241,7 @@ // Make a new dir that will enforce disk usage: MockRAMDirectory dir = new MockRAMDirectory(startDir); - writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false); IOException err = null; MergeScheduler ms = writer.getMergeScheduler(); @@ -478,7 +478,7 @@ System.out.println("TEST: cycle: diskFree=" + diskFree); MockRAMDirectory dir = new MockRAMDirectory(); dir.setMaxSizeInBytes(diskFree); - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); MergeScheduler ms = writer.getMergeScheduler(); if (ms instanceof ConcurrentMergeScheduler) @@ -1027,7 +1027,7 @@ IndexReader reader = IndexReader.open(dir); - writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); for(int i=0;i<3;i++) { for(int j=0;j<11;j++) { addDoc(writer); @@ -1073,7 +1073,7 @@ assertEquals("first number of hits", 14, hits.length); searcher.close(); - writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(10); for(int j=0;j<17;j++) { addDoc(writer); @@ -1098,7 +1098,7 @@ // Now make sure we can re-open the index, add docs, // and all is good: - writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(10); // On abort, writer in fact may write to the same @@ -1141,7 +1141,7 @@ dir.resetMaxUsedSizeInBytes(); long startDiskUsage = dir.getMaxUsedSizeInBytes(); - writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(10); writer.setMergeScheduler(new SerialMergeScheduler()); for(int j=0;j<1470;j++) { @@ -1183,7 +1183,7 @@ } writer.close(); - writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); writer.optimize(); // Open a reader before closing (commiting) the writer: @@ -1205,7 +1205,7 @@ assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized()); reader.close(); - writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); writer.optimize(); writer.close(); assertNoUnreferencedFiles(dir, "aborted writer after optimize"); @@ -2130,7 +2130,7 @@ MockRAMDirectory dir = new MockRAMDirectory(); int delID = 0; for(int i=0;i<20;i++) { - IndexWriter writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); writer.setMergeFactor(2); writer.setUseCompoundFile(false); @@ -2166,7 +2166,7 @@ reader.close(); if (0 == i % 4) { - writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.setUseCompoundFile(false); writer.optimize(); writer.close(); @@ -2183,7 +2183,7 @@ for(int pass=0;pass<3;pass++) { boolean autoCommit = pass%2 == 0; - IndexWriter writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), true); //System.out.println("TEST: pass=" + pass + " ac=" + autoCommit + " cms=" + (pass >= 2)); for(int iter=0;iter<10;iter++) { @@ -2261,7 +2261,7 @@ reader.close(); // Reopen - writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(directory, autoCommit, new WhitespaceAnalyzer(), false); } writer.close(); } @@ -2513,7 +2513,7 @@ public void _testSingleThreadFailure(MockRAMDirectory.Failure failure) throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer()); writer.setMaxBufferedDocs(2); final Document doc = new Document(); doc.add(new Field("field", "aaa bbb ccc ddd eee fff ggg hhh iii jjj", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); @@ -2774,7 +2774,7 @@ public void testForceCommit() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); writer.setMergeFactor(5); @@ -2827,7 +2827,7 @@ FailOnlyInSync failure = new FailOnlyInSync(); dir.failOn(failure); - IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer()); failure.setDoFail(); ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); @@ -2858,8 +2858,7 @@ for(int iter=0;iter<4;iter++) { final boolean autoCommit = 1==iter/2; IndexWriter writer = new IndexWriter(dir, - autoCommit, new StandardAnalyzer(), - IndexWriter.MaxFieldLength.LIMITED); + autoCommit, new StandardAnalyzer()); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); writer.setMergeScheduler(new SerialMergeScheduler()); @@ -2892,8 +2891,7 @@ reader.close(); writer = new IndexWriter(dir, - autoCommit, new StandardAnalyzer(), - IndexWriter.MaxFieldLength.LIMITED); + autoCommit, new StandardAnalyzer()); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); writer.setMergeScheduler(new SerialMergeScheduler()); @@ -2912,8 +2910,7 @@ for(int iter=0;iter<4;iter++) { final boolean autoCommit = 1==iter/2; IndexWriter writer = new IndexWriter(dir, - autoCommit, new StandardAnalyzer(), - IndexWriter.MaxFieldLength.LIMITED); + autoCommit, new StandardAnalyzer()); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); writer.setMergeScheduler(new SerialMergeScheduler()); @@ -2950,7 +2947,7 @@ public void testTermVectorCorruption3() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, - false, new StandardAnalyzer(), + new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); @@ -2972,7 +2969,7 @@ writer.close(); writer = new IndexWriter(dir, - false, new StandardAnalyzer(), + new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); @@ -3020,7 +3017,7 @@ public void testExpungeDeletes() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, - false, new StandardAnalyzer(), + new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH); @@ -3048,7 +3045,7 @@ ir.close(); writer = new IndexWriter(dir, - false, new StandardAnalyzer(), + new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); assertEquals(8, writer.numDocs()); assertEquals(10, writer.maxDoc()); @@ -3066,7 +3063,7 @@ public void testExpungeDeletes2() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, - false, new StandardAnalyzer(), + new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); writer.setMergeFactor(50); @@ -3095,7 +3092,7 @@ ir.close(); writer = new IndexWriter(dir, - false, new StandardAnalyzer(), + new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.setMergeFactor(3); assertEquals(49, writer.numDocs()); @@ -3113,7 +3110,7 @@ public void testExpungeDeletes3() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, - false, new StandardAnalyzer(), + new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); writer.setMergeFactor(50); @@ -3142,7 +3139,7 @@ ir.close(); writer = new IndexWriter(dir, - false, new StandardAnalyzer(), + new StandardAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); // Force many merges to happen writer.setMergeFactor(3); @@ -3168,8 +3165,8 @@ // LUCENE-1198 public class MockIndexWriter extends IndexWriter { - public MockIndexWriter(Directory dir, boolean autoCommit, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException { - super(dir, autoCommit, a, create, mfl); + public MockIndexWriter(Directory dir, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException { + super(dir, a, create, mfl); } boolean doFail; @@ -3183,7 +3180,7 @@ public void testExceptionDocumentsWriterInit() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - MockIndexWriter w = new MockIndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + MockIndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); Document doc = new Document(); doc.add(new Field("field", "a field", Field.Store.YES, Field.Index.ANALYZED)); @@ -3203,7 +3200,7 @@ // LUCENE-1208 public void testExceptionJustBeforeFlush() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - MockIndexWriter w = new MockIndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + MockIndexWriter w = new MockIndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); w.setMaxBufferedDocs(2); Document doc = new Document(); doc.add(new Field("field", "a field", Field.Store.YES, @@ -3232,8 +3229,8 @@ public class MockIndexWriter2 extends IndexWriter { - public MockIndexWriter2(Directory dir, boolean autoCommit, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException { - super(dir, autoCommit, a, create, mfl); + public MockIndexWriter2(Directory dir, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException { + super(dir, a, create, mfl); } boolean doFail; @@ -3251,7 +3248,7 @@ // LUCENE-1210 public void testExceptionOnMergeInit() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - MockIndexWriter2 w = new MockIndexWriter2(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + MockIndexWriter2 w = new MockIndexWriter2(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); w.setMaxBufferedDocs(2); w.setMergeFactor(2); w.doFail = true; @@ -3274,8 +3271,8 @@ public class MockIndexWriter3 extends IndexWriter { - public MockIndexWriter3(Directory dir, boolean autoCommit, Analyzer a, boolean create, IndexWriter.MaxFieldLength mfl) throws IOException { - super(dir, autoCommit, a, create, mfl); + public MockIndexWriter3(Directory dir, Analyzer a, boolean create, IndexWriter.MaxFieldLength mfl) throws IOException { + super(dir, a, create, mfl); } boolean wasCalled; @@ -3288,7 +3285,7 @@ // LUCENE-1222 public void testDoAfterFlush() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - MockIndexWriter3 w = new MockIndexWriter3(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + MockIndexWriter3 w = new MockIndexWriter3(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); Document doc = new Document(); doc.add(new Field("field", "a field", Field.Store.YES, Field.Index.ANALYZED)); @@ -3340,7 +3337,7 @@ public void testExceptionsDuringCommit() throws Throwable { MockRAMDirectory dir = new MockRAMDirectory(); FailOnlyInCommit failure = new FailOnlyInCommit(); - IndexWriter w = new IndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); Document doc = new Document(); doc.add(new Field("field", "a field", Field.Store.YES, Field.Index.ANALYZED)); @@ -3388,7 +3385,7 @@ // LUCENE-510 public void testInvalidUTF16() throws Throwable { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); Document doc = new Document(); final int count = utf8Data.length/2; @@ -3592,7 +3589,7 @@ tokens.add(t); MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); Document doc = new Document(); doc.add(new Field("field", tokens)); w.addDocument(doc); @@ -3624,7 +3621,7 @@ public void testPrepareCommit() throws IOException { Directory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); writer.setMergeFactor(5); @@ -3676,7 +3673,7 @@ MockRAMDirectory dir = new MockRAMDirectory(); dir.setPreventDoubleWrite(false); - IndexWriter writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); writer.setMergeFactor(5); @@ -3701,7 +3698,7 @@ reader.close(); reader2.close(); - writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); for (int i = 0; i < 17; i++) addDoc(writer); @@ -3729,7 +3726,7 @@ public void testPrepareCommitNoChanges() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.prepareCommit(); writer.commit(); writer.close(); @@ -3756,14 +3753,14 @@ public RunAddIndexesThreads(int numCopy) throws Throwable { NUM_COPY = numCopy; dir = new MockRAMDirectory(); - IndexWriter writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.setMaxBufferedDocs(2); for (int i = 0; i < NUM_INIT_DOCS; i++) addDoc(writer); writer.close(); dir2 = new MockRAMDirectory(); - writer2 = new IndexWriter(dir2, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); cms = (ConcurrentMergeScheduler) writer2.getMergeScheduler(); readers = new IndexReader[NUM_COPY]; @@ -4015,8 +4012,8 @@ // LUCENE-1347 public class MockIndexWriter4 extends IndexWriter { - public MockIndexWriter4(Directory dir, boolean autoCommit, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException { - super(dir, autoCommit, a, create, mfl); + public MockIndexWriter4(Directory dir, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException { + super(dir, a, create, mfl); } boolean doFail; @@ -4031,7 +4028,7 @@ // LUCENE-1347 public void testRollbackExceptionHang() throws Throwable { MockRAMDirectory dir = new MockRAMDirectory(); - MockIndexWriter4 w = new MockIndexWriter4(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); + MockIndexWriter4 w = new MockIndexWriter4(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); addDoc(w); w.doFail = true; @@ -4050,7 +4047,7 @@ // LUCENE-1219 public void testBinaryFieldOffsetLength() throws IOException { MockRAMDirectory dir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); byte[] b = new byte[50]; for(int i=0;i<50;i++) b[i] = (byte) (i+77); @@ -4121,7 +4118,7 @@ public void testOptimizeExceptions() throws IOException { RAMDirectory startDir = new MockRAMDirectory(); - IndexWriter w = new IndexWriter(startDir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); + IndexWriter w = new IndexWriter(startDir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); w.setMaxBufferedDocs(2); w.setMergeFactor(100); for(int i=0;i<27;i++) @@ -4130,7 +4127,7 @@ for(int i=0;i<200;i++) { MockRAMDirectory dir = new MockRAMDirectory(startDir); - w = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); + w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); ((ConcurrentMergeScheduler) w.getMergeScheduler()).setSuppressExceptions(); dir.setRandomIOExceptionRate(0.5, 100); try { Index: src/test/org/apache/lucene/index/TestIndexWriterExceptions.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (revision 698225) +++ src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (working copy) @@ -114,7 +114,7 @@ Random r = new java.util.Random(17); public MockIndexWriter(Directory dir, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException { - super(dir, false, a, create, mfl); + super(dir, a, create, mfl); } boolean testPoint(String name) { Index: src/test/org/apache/lucene/index/TestTransactions.java =================================================================== --- src/test/org/apache/lucene/index/TestTransactions.java (revision 698225) +++ src/test/org/apache/lucene/index/TestTransactions.java (working copy) @@ -83,12 +83,12 @@ public void doWork() throws Throwable { - IndexWriter writer1 = new IndexWriter(dir1, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer1 = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer1.setMaxBufferedDocs(3); writer1.setMergeFactor(2); ((ConcurrentMergeScheduler) writer1.getMergeScheduler()).setSuppressExceptions(); - IndexWriter writer2 = new IndexWriter(dir2, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); // Intentionally use different params so flush/merge // happen @ different times writer2.setMaxBufferedDocs(2); @@ -172,7 +172,7 @@ } public void initIndex(Directory dir) throws Throwable { - IndexWriter writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); for(int j=0; j<7; j++) { Document d = new Document(); int n = RANDOM.nextInt(); Index: src/test/org/apache/lucene/index/TestStressIndexing.java =================================================================== --- src/test/org/apache/lucene/index/TestStressIndexing.java (revision 698225) +++ src/test/org/apache/lucene/index/TestStressIndexing.java (working copy) @@ -116,7 +116,7 @@ stress test. */ public void runStressTest(Directory directory, boolean autoCommit, MergeScheduler mergeScheduler) throws Exception { - IndexWriter modifier = new IndexWriter(directory, autoCommit, ANALYZER, true, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter modifier = new IndexWriter(directory, autoCommit, ANALYZER, true); modifier.setMaxBufferedDocs(10); Index: src/test/org/apache/lucene/index/TestDeletionPolicy.java =================================================================== --- src/test/org/apache/lucene/index/TestDeletionPolicy.java (revision 698225) +++ src/test/org/apache/lucene/index/TestDeletionPolicy.java (working copy) @@ -206,7 +206,7 @@ Directory dir = new RAMDirectory(); ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS); - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy); writer.setUseCompoundFile(useCompoundFile); writer.close(); @@ -215,7 +215,7 @@ // Record last time when writer performed deletes of // past commits lastDeleteTime = System.currentTimeMillis(); - writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy); writer.setUseCompoundFile(useCompoundFile); for(int j=0;j<17;j++) { addDoc(writer); @@ -277,7 +277,7 @@ Directory dir = new RAMDirectory(); policy.dir = dir; - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy); writer.setMaxBufferedDocs(10); writer.setUseCompoundFile(useCompoundFile); writer.setMergeScheduler(new SerialMergeScheduler()); @@ -288,7 +288,7 @@ } writer.close(); - writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy); writer.setUseCompoundFile(useCompoundFile); writer.optimize(); writer.close(); @@ -333,7 +333,7 @@ // Open & close a writer and assert that it // actually removed something: int preCount = dir.list().length; - writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.LIMITED); writer.close(); int postCount = dir.list().length; assertTrue(postCount < preCount); @@ -359,7 +359,7 @@ Directory dir = new RAMDirectory(); - IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy, IndexWriter.MaxFieldLength.LIMITED); + IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true, policy); writer.setMaxBufferedDocs(10); writer.setUseCompoundFile(useCompoundFile); for(int i=0;i<107;i++) { @@ -367,7 +367,7 @@ } writer.close(); - writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy, IndexWriter.MaxFieldLength.LIMITED); + writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false, policy); writer.setUseCompoundFile(useCompoundFile); writer.optimize(); writer.close(); @@ -404,7 +404,7 @@ KeepLastNDeletionPolicy policy = new KeepLastNDeletionPolicy(N); for(int j=0;jWhen autoCommit is true then the writer will periodically commit on its own. [Deprecated: Note that in 3.0, IndexWriter will no longer accept autoCommit=true (it will be hardwired to - false). You can always call {@link IndexWriter#commit()} yourself + false). You can always call {@link #commit()} yourself when needed]. There is no guarantee when exactly an auto commit will occur (it used to be after every flush, but it is now after every @@ -523,9 +523,12 @@ * Text will be analyzed with a. If create * is true, then a new, empty index will be created in * path, replacing the index already there, - * if any. Note that autoCommit defaults to true, but - * starting in 3.0 it will be hardwired to false. + * if any. * + *

NOTE: autoCommit (see above) is set to false with this + * constructor. + * * @param path the path to the index directory * @param a the analyzer to use * @param create true to create the index or overwrite @@ -544,7 +547,7 @@ */ public IndexWriter(String path, Analyzer a, boolean create, MaxFieldLength mfl) throws CorruptIndexException, LockObtainFailedException, IOException { - init(FSDirectory.getDirectory(path), a, create, true, null, true, mfl.getLimit()); + init(FSDirectory.getDirectory(path), a, create, true, null, false, mfl.getLimit()); } /** @@ -567,7 +570,9 @@ * false or if there is any other low-level * IO error * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link #IndexWriter(String,Analyzer,boolean,MaxFieldLength)} instead. + * Use {@link + * #IndexWriter(String,Analyzer,boolean,MaxFieldLength)} + * instead, and call {@link #commit()} when needed. */ public IndexWriter(String path, Analyzer a, boolean create) throws CorruptIndexException, LockObtainFailedException, IOException { @@ -579,9 +584,11 @@ * Text will be analyzed with a. If create * is true, then a new, empty index will be created in * path, replacing the index already there, if any. - * Note that autoCommit defaults to true, but starting in 3.0 - * it will be hardwired to false. * + *

NOTE: autoCommit (see above) is set to false with this + * constructor. + * * @param path the path to the index directory * @param a the analyzer to use * @param create true to create the index or overwrite @@ -600,7 +607,7 @@ */ public IndexWriter(File path, Analyzer a, boolean create, MaxFieldLength mfl) throws CorruptIndexException, LockObtainFailedException, IOException { - init(FSDirectory.getDirectory(path), a, create, true, null, true, mfl.getLimit()); + init(FSDirectory.getDirectory(path), a, create, true, null, false, mfl.getLimit()); } /** @@ -623,7 +630,9 @@ * false or if there is any other low-level * IO error * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link #IndexWriter(File,Analyzer,boolean,MaxFieldLength)} instead. + * Use {@link + * #IndexWriter(File,Analyzer,boolean,MaxFieldLength)} + * instead, and call {@link #commit()} when needed. */ public IndexWriter(File path, Analyzer a, boolean create) throws CorruptIndexException, LockObtainFailedException, IOException { @@ -635,9 +644,11 @@ * Text will be analyzed with a. If create * is true, then a new, empty index will be created in * d, replacing the index already there, if any. - * Note that autoCommit defaults to true, but starting in 3.0 - * it will be hardwired to false. * + *

NOTE: autoCommit (see above) is set to false with this + * constructor. + * * @param d the index directory * @param a the analyzer to use * @param create true to create the index or overwrite @@ -656,7 +667,7 @@ */ public IndexWriter(Directory d, Analyzer a, boolean create, MaxFieldLength mfl) throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, create, false, null, true, mfl.getLimit()); + init(d, a, create, false, null, false, mfl.getLimit()); } /** @@ -678,7 +689,8 @@ * if it does not exist and create is * false or if there is any other low-level * IO error - * @deprecated This constructor will be removed in the 3.0 release. + * @deprecated This constructor will be removed in the 3.0 + * release, and call {@link #commit()} when needed. * Use {@link #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)} instead. */ public IndexWriter(Directory d, Analyzer a, boolean create) @@ -691,9 +703,11 @@ * path, first creating it if it does not * already exist. Text will be analyzed with * a. - * Note that autoCommit defaults to true, but starting in 3.0 - * it will be hardwired to false. * + *

NOTE: autoCommit (see above) is set to false with this + * constructor. + * * @param path the path to the index directory * @param a the analyzer to use * @param mfl Maximum field length: LIMITED, UNLIMITED, or user-specified @@ -708,7 +722,7 @@ */ public IndexWriter(String path, Analyzer a, MaxFieldLength mfl) throws CorruptIndexException, LockObtainFailedException, IOException { - init(FSDirectory.getDirectory(path), a, true, null, true, mfl.getLimit()); + init(FSDirectory.getDirectory(path), a, true, null, false, mfl.getLimit()); } /** @@ -726,7 +740,8 @@ * @throws IOException if the directory cannot be * read/written to or if there is any other low-level * IO error - * @deprecated This constructor will be removed in the 3.0 release. + * @deprecated This constructor will be removed in the 3.0 + * release, and call {@link #commit()} when needed. * Use {@link #IndexWriter(String,Analyzer,MaxFieldLength)} instead. */ public IndexWriter(String path, Analyzer a) @@ -739,9 +754,11 @@ * path, first creating it if it does not * already exist. Text will be analyzed with * a. - * Note that autoCommit defaults to true, but starting in 3.0 - * it will be hardwired to false. * + *

NOTE: autoCommit (see above) is set to false with this + * constructor. + * * @param path the path to the index directory * @param a the analyzer to use * @param mfl Maximum field length: LIMITED, UNLIMITED, or user-specified @@ -756,7 +773,7 @@ */ public IndexWriter(File path, Analyzer a, MaxFieldLength mfl) throws CorruptIndexException, LockObtainFailedException, IOException { - init(FSDirectory.getDirectory(path), a, true, null, true, mfl.getLimit()); + init(FSDirectory.getDirectory(path), a, true, null, false, mfl.getLimit()); } /** @@ -775,7 +792,8 @@ * read/written to or if there is any other low-level * IO error * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link #IndexWriter(File,Analyzer,MaxFieldLength)} instead. + * Use {@link #IndexWriter(File,Analyzer,MaxFieldLength)} + * instead, and call {@link #commit()} when needed. */ public IndexWriter(File path, Analyzer a) throws CorruptIndexException, LockObtainFailedException, IOException { @@ -787,9 +805,11 @@ * d, first creating it if it does not * already exist. Text will be analyzed with * a. - * Note that autoCommit defaults to true, but starting in 3.0 - * it will be hardwired to false. * + *

NOTE: autoCommit (see above) is set to false with this + * constructor. + * * @param d the index directory * @param a the analyzer to use * @param mfl Maximum field length: LIMITED, UNLIMITED, or user-specified @@ -804,7 +824,7 @@ */ public IndexWriter(Directory d, Analyzer a, MaxFieldLength mfl) throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, false, null, true, mfl.getLimit()); + init(d, a, false, null, false, mfl.getLimit()); } /** @@ -823,7 +843,9 @@ * read/written to or if there is any other low-level * IO error * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link #IndexWriter(Directory,Analyzer,MaxFieldLength)} instead. + * Use {@link + * #IndexWriter(Directory,Analyzer,MaxFieldLength)} + * instead, and call {@link #commit()} when needed. */ public IndexWriter(Directory d, Analyzer a) throws CorruptIndexException, LockObtainFailedException, IOException { @@ -839,8 +861,6 @@ * @param d the index directory * @param autoCommit see above * @param a the analyzer to use - * @param mfl Maximum field length: LIMITED, UNLIMITED, or user-specified - * via the MaxFieldLength constructor. * @throws CorruptIndexException if the index is corrupt * @throws LockObtainFailedException if another writer * has this index open (write.lock could not @@ -848,34 +868,10 @@ * @throws IOException if the directory cannot be * read/written to or if there is any other low-level * IO error - * @deprecated This will be removed in 3.0, when - * autoCommit will be hardwired to false. Use {@link - * #IndexWriter(Directory,Analyzer,MaxFieldLength)} - * instead, and call {@link #commit()} when needed. - */ - public IndexWriter(Directory d, boolean autoCommit, Analyzer a, MaxFieldLength mfl) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, false, null, autoCommit, mfl.getLimit()); - } - - /** - * Constructs an IndexWriter for the index in - * d, first creating it if it does not - * already exist. Text will be analyzed with - * a. - * - * @param d the index directory - * @param autoCommit see above - * @param a the analyzer to use - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be - * read/written to or if there is any other low-level - * IO error * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link #IndexWriter(Directory,Analyzer,MaxFieldLength)} instead. + * Use {@link + * #IndexWriter(Directory,Analyzer,MaxFieldLength)} + * instead, and call {@link #commit()} when needed. */ public IndexWriter(Directory d, boolean autoCommit, Analyzer a) throws CorruptIndexException, LockObtainFailedException, IOException { @@ -894,8 +890,6 @@ * @param create true to create the index or overwrite * the existing one; false to append to the existing * index - * @param mfl Maximum field length: LIMITED, UNLIMITED, or user-specified - * via the MaxFieldLength constructor. * @throws CorruptIndexException if the index is corrupt * @throws LockObtainFailedException if another writer * has this index open (write.lock could not @@ -904,38 +898,10 @@ * if it does not exist and create is * false or if there is any other low-level * IO error - * @deprecated This will be removed in 3.0, when - * autoCommit will be hardwired to false. Use {@link - * #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)} - * instead, and call {@link #commit()} when needed. - */ - public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create, MaxFieldLength mfl) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, create, false, null, autoCommit, mfl.getLimit()); - } - - /** - * Constructs an IndexWriter for the index in d. - * Text will be analyzed with a. If create - * is true, then a new, empty index will be created in - * d, replacing the index already there, if any. - * - * @param d the index directory - * @param autoCommit see above - * @param a the analyzer to use - * @param create true to create the index or overwrite - * the existing one; false to append to the existing - * index - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be read/written to, or - * if it does not exist and create is - * false or if there is any other low-level - * IO error * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)} instead. + * Use {@link + * #IndexWriter(Directory,Analyzer,boolean,MaxFieldLength)} + * instead, and call {@link #commit()} when needed. */ public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create) throws CorruptIndexException, LockObtainFailedException, IOException { @@ -947,9 +913,11 @@ * IndexDeletionPolicy}, for the index in d, * first creating it if it does not already exist. Text * will be analyzed with a. - * Note that autoCommit defaults to true, but starting in 3.0 - * it will be hardwired to false. * + *

NOTE: autoCommit (see above) is set to false with this + * constructor. + * * @param d the index directory * @param a the analyzer to use * @param deletionPolicy see above @@ -964,7 +932,7 @@ */ public IndexWriter(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl) throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, false, deletionPolicy, true, mfl.getLimit()); + init(d, a, false, deletionPolicy, false, mfl.getLimit()); } /** @@ -977,8 +945,6 @@ * @param autoCommit see above * @param a the analyzer to use * @param deletionPolicy see above - * @param mfl Maximum field length: LIMITED, UNLIMITED, or user-specified - * via the MaxFieldLength constructor. * @throws CorruptIndexException if the index is corrupt * @throws LockObtainFailedException if another writer * has this index open (write.lock could not @@ -986,35 +952,10 @@ * @throws IOException if the directory cannot be * read/written to or if there is any other low-level * IO error - * @deprecated This will be removed in 3.0, when - * autoCommit will be hardwired to false. Use {@link - * #IndexWriter(Directory,Analyzer,IndexDeletionPolicy,MaxFieldLength)} - * instead, and call {@link #commit()} when needed. - */ - public IndexWriter(Directory d, boolean autoCommit, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, false, deletionPolicy, autoCommit, mfl.getLimit()); - } - - /** - * Expert: constructs an IndexWriter with a custom {@link - * IndexDeletionPolicy}, for the index in d, - * first creating it if it does not already exist. Text - * will be analyzed with a. - * - * @param d the index directory - * @param autoCommit see above - * @param a the analyzer to use - * @param deletionPolicy see above - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be - * read/written to or if there is any other low-level - * IO error * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link #IndexWriter(Directory,Analyzer,IndexDeletionPolicy,MaxFieldLength)} instead. + * Use {@link + * #IndexWriter(Directory,Analyzer,IndexDeletionPolicy,MaxFieldLength)} + * instead, and call {@link #commit()} when needed. */ public IndexWriter(Directory d, boolean autoCommit, Analyzer a, IndexDeletionPolicy deletionPolicy) throws CorruptIndexException, LockObtainFailedException, IOException { @@ -1028,9 +969,11 @@ * create is true, then a new, empty index * will be created in d, replacing the index * already there, if any. - * Note that autoCommit defaults to true, but starting in 3.0 - * it will be hardwired to false. * + *

NOTE: autoCommit (see above) is set to false with this + * constructor. + * * @param d the index directory * @param a the analyzer to use * @param create true to create the index or overwrite @@ -1049,7 +992,7 @@ */ public IndexWriter(Directory d, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl) throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, create, false, deletionPolicy, true, mfl.getLimit()); + init(d, a, create, false, deletionPolicy, false, mfl.getLimit()); } /** @@ -1067,8 +1010,6 @@ * the existing one; false to append to the existing * index * @param deletionPolicy see above - * @param mfl Maximum field length: LIMITED, UNLIMITED, or user-specified - * via the MaxFieldLength constructor. * @throws CorruptIndexException if the index is corrupt * @throws LockObtainFailedException if another writer * has this index open (write.lock could not @@ -1077,41 +1018,10 @@ * if it does not exist and create is * false or if there is any other low-level * IO error - * @deprecated This will be removed in 3.0, when - * autoCommit will be hardwired to false. Use {@link - * #IndexWriter(Directory,Analyzer,boolean,IndexDeletionPolicy,MaxFieldLength)} - * instead, and call {@link #commit()} when needed. - */ - public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl) - throws CorruptIndexException, LockObtainFailedException, IOException { - init(d, a, create, false, deletionPolicy, autoCommit, mfl.getLimit()); - } - - /** - * Expert: constructs an IndexWriter with a custom {@link - * IndexDeletionPolicy}, for the index in d. - * Text will be analyzed with a. If - * create is true, then a new, empty index - * will be created in d, replacing the index - * already there, if any. - * - * @param d the index directory - * @param autoCommit see above - * @param a the analyzer to use - * @param create true to create the index or overwrite - * the existing one; false to append to the existing - * index - * @param deletionPolicy see above - * @throws CorruptIndexException if the index is corrupt - * @throws LockObtainFailedException if another writer - * has this index open (write.lock could not - * be obtained) - * @throws IOException if the directory cannot be read/written to, or - * if it does not exist and create is - * false or if there is any other low-level - * IO error * @deprecated This constructor will be removed in the 3.0 release. - * Use {@link #IndexWriter(Directory,Analyzer,boolean,IndexDeletionPolicy,MaxFieldLength)} instead. + * Use {@link + * #IndexWriter(Directory,Analyzer,boolean,IndexDeletionPolicy,MaxFieldLength)} + * instead, and call {@link #commit()} when needed. */ public IndexWriter(Directory d, boolean autoCommit, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy) throws CorruptIndexException, LockObtainFailedException, IOException { @@ -1309,7 +1219,7 @@ * documents are large, be sure to set this value high enough to accomodate * the expected size. If you set it to Integer.MAX_VALUE, then the only limit * is your memory, but you should anticipate an OutOfMemoryError.

- * By default, no more than {@link IndexWriter#DEFAULT_MAX_FIELD_LENGTH} terms + * By default, no more than {@link #DEFAULT_MAX_FIELD_LENGTH} terms * will be indexed for a field. */ public void setMaxFieldLength(int maxFieldLength) { @@ -4795,7 +4705,7 @@ /** * Specifies maximum field length in {@link IndexWriter} constructors. - * {@link IndexWriter#setMaxFieldLength(int)} overrides the value set by + * {@link #setMaxFieldLength(int)} overrides the value set by * the constructor. */ public static final class MaxFieldLength { @@ -4838,7 +4748,7 @@ /** * Sets the maximum field length to - * {@link IndexWriter#DEFAULT_MAX_FIELD_LENGTH} + * {@link #DEFAULT_MAX_FIELD_LENGTH} * */ public static final MaxFieldLength LIMITED = new MaxFieldLength("LIMITED", DEFAULT_MAX_FIELD_LENGTH); Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OpenIndexTask.java =================================================================== --- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OpenIndexTask.java (revision 698225) +++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OpenIndexTask.java (working copy) @@ -50,7 +50,7 @@ IndexWriter writer = new IndexWriter(runData.getDirectory(), config.get("autocommit", DEFAULT_AUTO_COMMIT), runData.getAnalyzer(), - false, IndexWriter.MaxFieldLength.LIMITED); + false); CreateIndexTask.setIndexWriterConfig(writer, config); runData.setIndexWriter(writer); return 1; Index: contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java =================================================================== --- contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java (revision 698225) +++ contrib/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/CreateIndexTask.java (working copy) @@ -97,8 +97,7 @@ Config config = runData.getConfig(); IndexWriter writer = new IndexWriter(runData.getDirectory(), runData.getConfig().get("autocommit", OpenIndexTask.DEFAULT_AUTO_COMMIT), - runData.getAnalyzer(), - true, IndexWriter.MaxFieldLength.LIMITED); + runData.getAnalyzer()); CreateIndexTask.setIndexWriterConfig(writer, config); runData.setIndexWriter(writer); return 1;