Index: lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java	(revision 1132419)
+++ lucene/src/test/org/apache/lucene/search/TestMultiValuedNumericRangeQuery.java	(working copy)
@@ -46,7 +46,7 @@
     
     DecimalFormat format = new DecimalFormat("00000000000", new DecimalFormatSymbols(Locale.US));
     
-    int num = 5000 * RANDOM_MULTIPLIER;
+    int num = (TEST_NIGHTLY ? 5000 : 500) * RANDOM_MULTIPLIER;
     for (int l = 0; l < num; l++) {
       Document doc = new Document();
       for (int m=0, c=random.nextInt(10); m<=c; m++) {
Index: lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java	(revision 1132419)
+++ lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery32.java	(working copy)
@@ -44,7 +44,7 @@
   // shift the starting of the values to the left, to also have negative values:
   private static final int startOffset = - 1 << 15;
   // number of docs to generate for testing
-  private static final int noDocs = 10000 * RANDOM_MULTIPLIER;
+  private static final int noDocs = (TEST_NIGHTLY ? 10000 : 5000) * RANDOM_MULTIPLIER;
   
   private static Directory directory = null;
   private static IndexReader reader = null;
Index: lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java	(revision 1132419)
+++ lucene/src/test/org/apache/lucene/search/TestNumericRangeQuery64.java	(working copy)
@@ -41,7 +41,7 @@
   // shift the starting of the values to the left, to also have negative values:
   private static final long startOffset = - 1L << 31;
   // number of docs to generate for testing
-  private static final int noDocs = 10000 * RANDOM_MULTIPLIER;
+  private static final int noDocs = (TEST_NIGHTLY ? 10000 : 5000) * RANDOM_MULTIPLIER;
   
   private static Directory directory = null;
   private static IndexReader reader = null;
Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java	(revision 0)
+++ lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java	(revision 0)
@@ -0,0 +1,668 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.io.Reader;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockFixedLengthPayloadFilter;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestIndexWriterCommit extends LuceneTestCase {
+  /*
+   * Simple test for "commit on close": open writer then
+   * add a bunch of docs, making sure reader does not see
+   * these docs until writer is closed.
+   */
+  public void testCommitOnClose() throws IOException {
+      Directory dir = newDirectory();
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      for (int i = 0; i < 14; i++) {
+        TestIndexWriter.addDoc(writer);
+      }
+      writer.close();
+
+      Term searchTerm = new Term("content", "aaa");
+      IndexSearcher searcher = new IndexSearcher(dir, false);
+      ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+      assertEquals("first number of hits", 14, hits.length);
+      searcher.close();
+
+      IndexReader reader = IndexReader.open(dir, true);
+
+      writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+      for(int i=0;i<3;i++) {
+        for(int j=0;j<11;j++) {
+          TestIndexWriter.addDoc(writer);
+        }
+        searcher = new IndexSearcher(dir, false);
+        hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+        assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
+        searcher.close();
+        assertTrue("reader should have still been current", reader.isCurrent());
+      }
+
+      // Now, close the writer:
+      writer.close();
+      assertFalse("reader should not be current now", reader.isCurrent());
+
+      searcher = new IndexSearcher(dir, false);
+      hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+      assertEquals("reader did not see changes after writer was closed", 47, hits.length);
+      searcher.close();
+      reader.close();
+      dir.close();
+  }
+
+  /*
+   * Simple test for "commit on close": open writer, then
+   * add a bunch of docs, making sure reader does not see
+   * them until writer has closed.  Then instead of
+   * closing the writer, call abort and verify reader sees
+   * nothing was added.  Then verify we can open the index
+   * and add docs to it.
+   */
+  public void testCommitOnCloseAbort() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10));
+    for (int i = 0; i < 14; i++) {
+      TestIndexWriter.addDoc(writer);
+    }
+    writer.close();
+
+    Term searchTerm = new Term("content", "aaa");
+    IndexSearcher searcher = new IndexSearcher(dir, false);
+    ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+    assertEquals("first number of hits", 14, hits.length);
+    searcher.close();
+
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
+      .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
+    for(int j=0;j<17;j++) {
+      TestIndexWriter.addDoc(writer);
+    }
+    // Delete all docs:
+    writer.deleteDocuments(searchTerm);
+
+    searcher = new IndexSearcher(dir, false);
+    hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+    assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
+    searcher.close();
+
+    // Now, close the writer:
+    writer.rollback();
+
+    TestIndexWriter.assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
+
+    searcher = new IndexSearcher(dir, false);
+    hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+    assertEquals("saw changes after writer.abort", 14, hits.length);
+    searcher.close();
+
+    // Now make sure we can re-open the index, add docs,
+    // and all is good:
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
+      .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
+
+    // On abort, writer in fact may write to the same
+    // segments_N file:
+    dir.setPreventDoubleWrite(false);
+
+    for(int i=0;i<12;i++) {
+      for(int j=0;j<17;j++) {
+        TestIndexWriter.addDoc(writer);
+      }
+      searcher = new IndexSearcher(dir, false);
+      hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+      assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
+      searcher.close();
+    }
+
+    writer.close();
+    searcher = new IndexSearcher(dir, false);
+    hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+    assertEquals("didn't see changes after close", 218, hits.length);
+    searcher.close();
+
+    dir.close();
+  }
+
+  /*
+   * Verify that a writer with "commit on close" indeed
+   * cleans up the temp segments created after opening
+   * that are not referenced by the starting segments
+   * file.  We check this by using MockDirectoryWrapper to
+   * measure max temp disk space used.
+   */
+  public void testCommitOnCloseDiskUsage() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+    Analyzer analyzer;
+    if (random.nextBoolean()) {
+      // no payloads
+     analyzer = new Analyzer() {
+        @Override
+        public TokenStream tokenStream(String fieldName, Reader reader) {
+          return new MockTokenizer(reader, MockTokenizer.WHITESPACE, true);
+        }
+      };
+    } else {
+      // fixed length payloads
+      final int length = random.nextInt(200);
+      analyzer = new Analyzer() {
+        @Override
+        public TokenStream tokenStream(String fieldName, Reader reader) {
+          return new MockFixedLengthPayloadFilter(random,
+              new MockTokenizer(reader, MockTokenizer.WHITESPACE, true),
+              length);
+        }
+      };
+    }
+    
+    IndexWriter writer  = new IndexWriter(
+        dir,
+        newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).
+            setMaxBufferedDocs(10).
+            setReaderPooling(false).
+            setMergePolicy(newLogMergePolicy(10))
+    );
+    for(int j=0;j<30;j++) {
+      TestIndexWriter.addDocWithIndex(writer, j);
+    }
+    writer.close();
+    dir.resetMaxUsedSizeInBytes();
+
+    dir.setTrackDiskUsage(true);
+    long startDiskUsage = dir.getMaxUsedSizeInBytes();
+    writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)
+            .setOpenMode(OpenMode.APPEND).
+            setMaxBufferedDocs(10).
+            setMergeScheduler(new SerialMergeScheduler()).
+            setReaderPooling(false).
+            setMergePolicy(newLogMergePolicy(10))
+
+    );
+    for(int j=0;j<1470;j++) {
+      TestIndexWriter.addDocWithIndex(writer, j);
+    }
+    long midDiskUsage = dir.getMaxUsedSizeInBytes();
+    dir.resetMaxUsedSizeInBytes();
+    writer.optimize();
+    writer.close();
+
+    IndexReader.open(dir, true).close();
+
+    long endDiskUsage = dir.getMaxUsedSizeInBytes();
+
+    // Ending index is 50X as large as starting index; due
+    // to 3X disk usage normally we allow 150X max
+    // transient usage.  If something is wrong w/ deleter
+    // and it doesn't delete intermediate segments then it
+    // will exceed this 150X:
+    // System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
+    assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage*150),
+               midDiskUsage < 150*startDiskUsage);
+    assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage*150),
+               endDiskUsage < 150*startDiskUsage);
+    dir.close();
+  }
+
+
+  /*
+   * Verify that calling optimize when writer is open for
+   * "commit on close" works correctly both for rollback()
+   * and close().
+   */
+  public void testCommitOnCloseOptimize() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+    // Must disable throwing exc on double-write: this
+    // test uses IW.rollback which easily results in
+    // writing to same file more than once
+    dir.setPreventDoubleWrite(false);
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(10).
+            setMergePolicy(newLogMergePolicy(10))
+    );
+    for(int j=0;j<17;j++) {
+      TestIndexWriter.addDocWithIndex(writer, j);
+    }
+    writer.close();
+
+    writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+    writer.optimize();
+
+    if (VERBOSE) {
+      writer.setInfoStream(System.out);
+    }
+
+    // Open a reader before closing (commiting) the writer:
+    IndexReader reader = IndexReader.open(dir, true);
+
+    // Reader should see index as unoptimized at this
+    // point:
+    assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
+    reader.close();
+
+    // Abort the writer:
+    writer.rollback();
+    TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after optimize");
+
+    // Open a reader after aborting writer:
+    reader = IndexReader.open(dir, true);
+
+    // Reader should still see index as unoptimized:
+    assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
+    reader.close();
+
+    if (VERBOSE) {
+      System.out.println("TEST: do real optimize");
+    }
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+    if (VERBOSE) {
+      writer.setInfoStream(System.out);
+    }
+    writer.optimize();
+    writer.close();
+
+    if (VERBOSE) {
+      System.out.println("TEST: writer closed");
+    }
+    TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after optimize");
+
+    // Open a reader after aborting writer:
+    reader = IndexReader.open(dir, true);
+
+    // Reader should still see index as unoptimized:
+    assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
+    reader.close();
+    dir.close();
+  }
+  
+  // LUCENE-2095: make sure with multiple threads commit
+  // doesn't return until all changes are in fact in the
+  // index
+  public void testCommitThreadSafety() throws Throwable {
+    final int NUM_THREADS = 5;
+    final double RUN_SEC = 0.5;
+    final Directory dir = newDirectory();
+    final RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig(
+                                                                                        TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    _TestUtil.reduceOpenFiles(w.w);
+    w.commit();
+    final AtomicBoolean failed = new AtomicBoolean();
+    Thread[] threads = new Thread[NUM_THREADS];
+    final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
+    for(int i=0;i<NUM_THREADS;i++) {
+      final int finalI = i;
+      threads[i] = new Thread() {
+          @Override
+          public void run() {
+            try {
+              final Document doc = new Document();
+              IndexReader r = IndexReader.open(dir);
+              Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
+              doc.add(f);
+              int count = 0;
+              do {
+                if (failed.get()) break;
+                for(int j=0;j<10;j++) {
+                  final String s = finalI + "_" + String.valueOf(count++);
+                  f.setValue(s);
+                  w.addDocument(doc);
+                  w.commit();
+                  IndexReader r2 = r.reopen();
+                  assertTrue(r2 != r);
+                  r.close();
+                  r = r2;
+                  assertEquals("term=f:" + s + "; r=" + r, 1, r.docFreq(new Term("f", s)));
+                }
+              } while(System.currentTimeMillis() < endTime);
+              r.close();
+            } catch (Throwable t) {
+              failed.set(true);
+              throw new RuntimeException(t);
+            }
+          }
+        };
+      threads[i].start();
+    }
+    for(int i=0;i<NUM_THREADS;i++) {
+      threads[i].join();
+    }
+    assertFalse(failed.get());
+    w.close();
+    dir.close();
+  }
+
+  // LUCENE-1044: test writer.commit() when ac=false
+  public void testForceCommit() throws IOException {
+    Directory dir = newDirectory();
+
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(2).
+            setMergePolicy(newLogMergePolicy(5))
+    );
+    writer.commit();
+
+    for (int i = 0; i < 23; i++)
+      TestIndexWriter.addDoc(writer);
+
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(0, reader.numDocs());
+    writer.commit();
+    IndexReader reader2 = reader.reopen();
+    assertEquals(0, reader.numDocs());
+    assertEquals(23, reader2.numDocs());
+    reader.close();
+
+    for (int i = 0; i < 17; i++)
+      TestIndexWriter.addDoc(writer);
+    assertEquals(23, reader2.numDocs());
+    reader2.close();
+    reader = IndexReader.open(dir, true);
+    assertEquals(23, reader.numDocs());
+    reader.close();
+    writer.commit();
+
+    reader = IndexReader.open(dir, true);
+    assertEquals(40, reader.numDocs());
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+  
+  public void testFutureCommit() throws Exception {
+    Directory dir = newDirectory();
+
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
+    Document doc = new Document();
+    w.addDocument(doc);
+
+    // commit to "first"
+    Map<String,String> commitData = new HashMap<String,String>();
+    commitData.put("tag", "first");
+    w.commit(commitData);
+
+    // commit to "second"
+    w.addDocument(doc);
+    commitData.put("tag", "second");
+    w.commit(commitData);
+    w.close();
+
+    // open "first" with IndexWriter
+    IndexCommit commit = null;
+    for(IndexCommit c : IndexReader.listCommits(dir)) {
+      if (c.getUserData().get("tag").equals("first")) {
+        commit = c;
+        break;
+      }
+    }
+
+    assertNotNull(commit);
+
+    w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
+
+    assertEquals(1, w.numDocs());
+
+    // commit IndexWriter to "third"
+    w.addDocument(doc);
+    commitData.put("tag", "third");
+    w.commit(commitData);
+    w.close();
+
+    // make sure "second" commit is still there
+    commit = null;
+    for(IndexCommit c : IndexReader.listCommits(dir)) {
+      if (c.getUserData().get("tag").equals("second")) {
+        commit = c;
+        break;
+      }
+    }
+
+    assertNotNull(commit);
+
+    IndexReader r = IndexReader.open(commit, true);
+    assertEquals(2, r.numDocs());
+    r.close();
+
+    // open "second", w/ writeable IndexReader & commit
+    r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
+    assertEquals(2, r.numDocs());
+    r.deleteDocument(0);
+    r.deleteDocument(1);
+    commitData.put("tag", "fourth");
+    r.commit(commitData);
+    r.close();
+
+    // make sure "third" commit is still there
+    commit = null;
+    for(IndexCommit c : IndexReader.listCommits(dir)) {
+      if (c.getUserData().get("tag").equals("third")) {
+        commit = c;
+        break;
+      }
+    }
+    assertNotNull(commit);
+
+    dir.close();
+  }
+  
+  public void testNoCommits() throws Exception {
+    // Tests that if we don't call commit(), the directory has 0 commits. This has
+    // changed since LUCENE-2386, where before IW would always commit on a fresh
+    // new index.
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    try {
+      IndexReader.listCommits(dir);
+      fail("listCommits should have thrown an exception over empty index");
+    } catch (IndexNotFoundException e) {
+      // that's expected !
+    }
+    // No changes still should generate a commit, because it's a new index.
+    writer.close();
+    assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
+    dir.close();
+  }
+  
+  // LUCENE-1274: test writer.prepareCommit()
+  public void testPrepareCommit() throws IOException {
+    Directory dir = newDirectory();
+
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(2).
+            setMergePolicy(newLogMergePolicy(5))
+    );
+    writer.commit();
+
+    for (int i = 0; i < 23; i++)
+      TestIndexWriter.addDoc(writer);
+
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(0, reader.numDocs());
+
+    writer.prepareCommit();
+
+    IndexReader reader2 = IndexReader.open(dir, true);
+    assertEquals(0, reader2.numDocs());
+
+    writer.commit();
+
+    IndexReader reader3 = reader.reopen();
+    assertEquals(0, reader.numDocs());
+    assertEquals(0, reader2.numDocs());
+    assertEquals(23, reader3.numDocs());
+    reader.close();
+    reader2.close();
+
+    for (int i = 0; i < 17; i++)
+      TestIndexWriter.addDoc(writer);
+
+    assertEquals(23, reader3.numDocs());
+    reader3.close();
+    reader = IndexReader.open(dir, true);
+    assertEquals(23, reader.numDocs());
+    reader.close();
+
+    writer.prepareCommit();
+
+    reader = IndexReader.open(dir, true);
+    assertEquals(23, reader.numDocs());
+    reader.close();
+
+    writer.commit();
+    reader = IndexReader.open(dir, true);
+    assertEquals(40, reader.numDocs());
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+
+  // LUCENE-1274: test writer.prepareCommit()
+  public void testPrepareCommitRollback() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+    dir.setPreventDoubleWrite(false);
+
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(2).
+            setMergePolicy(newLogMergePolicy(5))
+    );
+    writer.commit();
+
+    for (int i = 0; i < 23; i++)
+      TestIndexWriter.addDoc(writer);
+
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(0, reader.numDocs());
+
+    writer.prepareCommit();
+
+    IndexReader reader2 = IndexReader.open(dir, true);
+    assertEquals(0, reader2.numDocs());
+
+    writer.rollback();
+
+    IndexReader reader3 = reader.reopen();
+    assertEquals(0, reader.numDocs());
+    assertEquals(0, reader2.numDocs());
+    assertEquals(0, reader3.numDocs());
+    reader.close();
+    reader2.close();
+
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    for (int i = 0; i < 17; i++)
+      TestIndexWriter.addDoc(writer);
+
+    assertEquals(0, reader3.numDocs());
+    reader3.close();
+    reader = IndexReader.open(dir, true);
+    assertEquals(0, reader.numDocs());
+    reader.close();
+
+    writer.prepareCommit();
+
+    reader = IndexReader.open(dir, true);
+    assertEquals(0, reader.numDocs());
+    reader.close();
+
+    writer.commit();
+    reader = IndexReader.open(dir, true);
+    assertEquals(17, reader.numDocs());
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+
+  // LUCENE-1274
+  public void testPrepareCommitNoChanges() throws IOException {
+    Directory dir = newDirectory();
+
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    writer.prepareCommit();
+    writer.commit();
+    writer.close();
+
+    IndexReader reader = IndexReader.open(dir, true);
+    assertEquals(0, reader.numDocs());
+    reader.close();
+    dir.close();
+  }
+  
+  // LUCENE-1382
+  public void testCommitUserData() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
+    for(int j=0;j<17;j++)
+      TestIndexWriter.addDoc(w);
+    w.close();
+
+    assertEquals(0, IndexReader.getCommitUserData(dir).size());
+
+    IndexReader r = IndexReader.open(dir, true);
+    // commit(Map) never called for this index
+    assertEquals(0, r.getCommitUserData().size());
+    r.close();
+
+    w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
+    for(int j=0;j<17;j++)
+      TestIndexWriter.addDoc(w);
+    Map<String,String> data = new HashMap<String,String>();
+    data.put("label", "test1");
+    w.commit(data);
+    w.close();
+
+    assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
+
+    r = IndexReader.open(dir, true);
+    assertEquals("test1", r.getCommitUserData().get("label"));
+    r.close();
+
+    w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    w.optimize();
+    w.close();
+
+    assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
+
+    dir.close();
+  }
+}

Property changes on: lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java
___________________________________________________________________
Added: svn:eol-style
   + native

Index: lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java	(revision 1132419)
+++ lucene/src/test/org/apache/lucene/index/TestDocsAndPositions.java	(working copy)
@@ -57,7 +57,7 @@
     IndexReader reader = writer.getReader();
     writer.close();
 
-    for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) {
+    for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13) * RANDOM_MULTIPLIER; i++) {
       BytesRef bytes = new BytesRef("1");
       ReaderContext topReaderContext = reader.getTopReaderContext();
       AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
@@ -120,7 +120,8 @@
       Document doc = new Document();
       ArrayList<Integer> positions = new ArrayList<Integer>();
       StringBuilder builder = new StringBuilder();
-      for (int j = 0; j < 3049; j++) {
+      int num = TEST_NIGHTLY ? 3049 : 499;
+      for (int j = 0; j < num; j++) {
         int nextInt = random.nextInt(max);
         builder.append(nextInt).append(" ");
         if (nextInt == term) {
@@ -129,7 +130,7 @@
       }
       if (positions.size() == 0) {
         builder.append(term);
-        positions.add(3049);
+        positions.add(num);
       }
       doc.add(newField(fieldName, builder.toString(), Field.Store.YES,
           Field.Index.ANALYZED));
@@ -140,7 +141,7 @@
     IndexReader reader = writer.getReader();
     writer.close();
 
-    for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) {
+    for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13) * RANDOM_MULTIPLIER; i++) {
       BytesRef bytes = new BytesRef("" + term);
       ReaderContext topReaderContext = reader.getTopReaderContext();
       AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
@@ -214,7 +215,7 @@
     IndexReader reader = writer.getReader();
     writer.close();
 
-    for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) {
+    for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13) * RANDOM_MULTIPLIER; i++) {
       BytesRef bytes = new BytesRef("" + term);
       ReaderContext topReaderContext = reader.getTopReaderContext();
       AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
@@ -290,7 +291,7 @@
     IndexReader reader = writer.getReader();
     writer.close();
 
-    for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) {
+    for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13) * RANDOM_MULTIPLIER; i++) {
       BytesRef bytes = new BytesRef("even");
 
       ReaderContext topReaderContext = reader.getTopReaderContext();
Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java	(revision 0)
+++ lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java	(revision 0)
@@ -0,0 +1,215 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestIndexWriterOptimize extends LuceneTestCase {
+  public void testOptimizeMaxNumSegments() throws IOException {
+
+    MockDirectoryWrapper dir = newDirectory();
+
+    final Document doc = new Document();
+    doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
+    final int incrMin = TEST_NIGHTLY ? 15 : 40;
+    for(int numDocs=10;numDocs<500;numDocs += _TestUtil.nextInt(random, incrMin, 5*incrMin)) {
+      LogDocMergePolicy ldmp = new LogDocMergePolicy();
+      ldmp.setMinMergeDocs(1);
+      ldmp.setMergeFactor(5);
+      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
+            ldmp));
+      for(int j=0;j<numDocs;j++)
+        writer.addDocument(doc);
+      writer.close();
+
+      SegmentInfos sis = new SegmentInfos();
+      sis.read(dir);
+      final int segCount = sis.size();
+
+      ldmp = new LogDocMergePolicy();
+      ldmp.setMergeFactor(5);
+      writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
+        new MockAnalyzer(random)).setMergePolicy(ldmp));
+      writer.optimize(3);
+      writer.close();
+
+      sis = new SegmentInfos();
+      sis.read(dir);
+      final int optSegCount = sis.size();
+
+      if (segCount < 3)
+        assertEquals(segCount, optSegCount);
+      else
+        assertEquals(3, optSegCount);
+    }
+    dir.close();
+  }
+
+  public void testOptimizeMaxNumSegments2() throws IOException {
+    MockDirectoryWrapper dir = newDirectory();
+
+    final Document doc = new Document();
+    doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
+
+    LogDocMergePolicy ldmp = new LogDocMergePolicy();
+    ldmp.setMinMergeDocs(1);
+    ldmp.setMergeFactor(4);
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+      TEST_VERSION_CURRENT, new MockAnalyzer(random))
+      .setMaxBufferedDocs(2).setMergePolicy(ldmp).setMergeScheduler(new ConcurrentMergeScheduler()));
+
+    for(int iter=0;iter<10;iter++) {
+      for(int i=0;i<19;i++)
+        writer.addDocument(doc);
+
+      writer.commit();
+      writer.waitForMerges();
+      writer.commit();
+
+      SegmentInfos sis = new SegmentInfos();
+      sis.read(dir);
+
+      final int segCount = sis.size();
+
+      writer.optimize(7);
+      writer.commit();
+      writer.waitForMerges();
+
+      sis = new SegmentInfos();
+      sis.read(dir);
+      final int optSegCount = sis.size();
+
+      if (segCount < 7)
+        assertEquals(segCount, optSegCount);
+      else
+        assertEquals(7, optSegCount);
+    }
+    writer.close();
+    dir.close();
+  }
+
+  /**
+   * Make sure optimize doesn't use any more than 1X
+   * starting index size as its temporary free space
+   * required.
+   */
+  public void testOptimizeTempSpaceUsage() throws IOException {
+
+    MockDirectoryWrapper dir = newDirectory();
+    IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10).setMergePolicy(newLogMergePolicy()));
+    if (VERBOSE) {
+      System.out.println("TEST: config1=" + writer.getConfig());
+    }
+
+    for(int j=0;j<500;j++) {
+      TestIndexWriter.addDocWithIndex(writer, j);
+    }
+    final int termIndexInterval = writer.getConfig().getTermIndexInterval();
+    // force one extra segment w/ different doc store so
+    // we see the doc stores get merged
+    writer.commit();
+    TestIndexWriter.addDocWithIndex(writer, 500);
+    writer.close();
+
+    if (VERBOSE) {
+      System.out.println("TEST: start disk usage");
+    }
+    long startDiskUsage = 0;
+    String[] files = dir.listAll();
+    for(int i=0;i<files.length;i++) {
+      startDiskUsage += dir.fileLength(files[i]);
+      if (VERBOSE) {
+        System.out.println(files[i] + ": " + dir.fileLength(files[i]));
+      }
+    }
+
+    dir.resetMaxUsedSizeInBytes();
+    dir.setTrackDiskUsage(true);
+
+    // Import to use same term index interval else a
+    // smaller one here could increase the disk usage and
+    // cause a false failure:
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval).setMergePolicy(newLogMergePolicy()));
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    writer.optimize();
+    writer.close();
+    long maxDiskUsage = dir.getMaxUsedSizeInBytes();
+    assertTrue("optimize used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)",
+               maxDiskUsage <= 4*startDiskUsage);
+    dir.close();
+  }
+  
+  // Test calling optimize(false) whereby optimize is kicked
+  // off but we don't wait for it to finish (but
+  // writer.close()) does wait
+  public void testBackgroundOptimize() throws IOException {
+
+    Directory dir = newDirectory();
+    for(int pass=0;pass<2;pass++) {
+      IndexWriter writer = new IndexWriter(
+          dir,
+          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+              setOpenMode(OpenMode.CREATE).
+              setMaxBufferedDocs(2).
+              setMergePolicy(newLogMergePolicy(51))
+      );
+      Document doc = new Document();
+      doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
+      for(int i=0;i<100;i++)
+        writer.addDocument(doc);
+      writer.optimize(false);
+
+      if (0 == pass) {
+        writer.close();
+        IndexReader reader = IndexReader.open(dir, true);
+        assertTrue(reader.isOptimized());
+        reader.close();
+      } else {
+        // Get another segment to flush so we can verify it is
+        // NOT included in the optimization
+        writer.addDocument(doc);
+        writer.addDocument(doc);
+        writer.close();
+
+        IndexReader reader = IndexReader.open(dir, true);
+        assertTrue(!reader.isOptimized());
+        reader.close();
+
+        SegmentInfos infos = new SegmentInfos();
+        infos.read(dir);
+        assertEquals(2, infos.size());
+      }
+    }
+
+    dir.close();
+  }
+}

Property changes on: lucene/src/test/org/apache/lucene/index/TestIndexWriterOptimize.java
___________________________________________________________________
Added: svn:eol-style
   + native

Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java	(revision 0)
+++ lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java	(revision 0)
@@ -0,0 +1,228 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Similarity;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestIndexReaderOnDiskFull extends LuceneTestCase {
+  /**
+   * Make sure if reader tries to commit but hits disk
+   * full that reader remains consistent and usable.
+   */
+  public void testDiskFull() throws IOException {
+
+    Term searchTerm = new Term("content", "aaa");
+    int START_COUNT = 157;
+    int END_COUNT = 144;
+    
+    // First build up a starting index:
+    MockDirectoryWrapper startDir = newDirectory();
+    IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+    if (VERBOSE) {
+      System.out.println("TEST: create initial index");
+      writer.setInfoStream(System.out);
+    }
+    for(int i=0;i<157;i++) {
+      Document d = new Document();
+      d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
+      d.add(newField("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED));
+      writer.addDocument(d);
+      if (0==i%10)
+        writer.commit();
+    }
+    writer.close();
+
+    {
+      IndexReader r = IndexReader.open(startDir);
+      IndexSearcher searcher = newSearcher(r);
+      ScoreDoc[] hits = null;
+      try {
+        hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+      } catch (IOException e) {
+        e.printStackTrace();
+        fail("exception when init searching: " + e);
+      }
+      searcher.close();
+      r.close();
+    }
+
+    long diskUsage = startDir.getRecomputedActualSizeInBytes();
+    long diskFree = diskUsage+100;
+
+    IOException err = null;
+
+    boolean done = false;
+    boolean gotExc = false;
+
+    // Iterate w/ ever increasing free disk space:
+    while(!done) {
+      MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir));
+
+      // If IndexReader hits disk full, it can write to
+      // the same files again.
+      dir.setPreventDoubleWrite(false);
+
+      IndexReader reader = IndexReader.open(dir, false);
+
+      // For each disk size, first try to commit against
+      // dir that will hit random IOExceptions & disk
+      // full; after, give it infinite disk space & turn
+      // off random IOExceptions & retry w/ same reader:
+      boolean success = false;
+
+      for(int x=0;x<2;x++) {
+
+        double rate = 0.05;
+        double diskRatio = ((double) diskFree)/diskUsage;
+        long thisDiskFree;
+        String testName;
+
+        if (0 == x) {
+          thisDiskFree = diskFree;
+          if (diskRatio >= 2.0) {
+            rate /= 2;
+          }
+          if (diskRatio >= 4.0) {
+            rate /= 2;
+          }
+          if (diskRatio >= 6.0) {
+            rate = 0.0;
+          }
+          if (VERBOSE) {
+            System.out.println("\ncycle: " + diskFree + " bytes");
+          }
+          testName = "disk full during reader.close() @ " + thisDiskFree + " bytes";
+        } else {
+          thisDiskFree = 0;
+          rate = 0.0;
+          if (VERBOSE) {
+            System.out.println("\ncycle: same writer: unlimited disk space");
+          }
+          testName = "reader re-use after disk full";
+        }
+
+        dir.setMaxSizeInBytes(thisDiskFree);
+        dir.setRandomIOExceptionRate(rate);
+        Similarity sim = new DefaultSimilarity();
+        try {
+          if (0 == x) {
+            int docId = 12;
+            for(int i=0;i<13;i++) {
+              reader.deleteDocument(docId);
+              reader.setNorm(docId, "content", sim.encodeNormValue(2.0f));
+              docId += 12;
+            }
+          }
+          reader.close();
+          success = true;
+          if (0 == x) {
+            done = true;
+          }
+        } catch (IOException e) {
+          if (VERBOSE) {
+            System.out.println("  hit IOException: " + e);
+            e.printStackTrace(System.out);
+          }
+          err = e;
+          gotExc = true;
+          if (1 == x) {
+            e.printStackTrace();
+            fail(testName + " hit IOException after disk space was freed up");
+          }
+        }
+
+        // Finally, verify index is not corrupt, and, if
+        // we succeeded, we see all docs changed, and if
+        // we failed, we see either all docs or no docs
+        // changed (transactional semantics):
+        IndexReader newReader = null;
+        try {
+          newReader = IndexReader.open(dir, false);
+        } catch (IOException e) {
+          e.printStackTrace();
+          fail(testName + ":exception when creating IndexReader after disk full during close: " + e);
+        }
+        /*
+        int result = newReader.docFreq(searchTerm);
+        if (success) {
+          if (result != END_COUNT) {
+            fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT);
+          }
+        } else {
+          // On hitting exception we still may have added
+          // all docs:
+          if (result != START_COUNT && result != END_COUNT) {
+            err.printStackTrace();
+            fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
+          }
+        }
+        */
+
+        IndexSearcher searcher = newSearcher(newReader);
+        ScoreDoc[] hits = null;
+        try {
+          hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
+        } catch (IOException e) {
+          e.printStackTrace();
+          fail(testName + ": exception when searching: " + e);
+        }
+        int result2 = hits.length;
+        if (success) {
+          if (result2 != END_COUNT) {
+            fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT);
+          }
+        } else {
+          // On hitting exception we still may have added
+          // all docs:
+          if (result2 != START_COUNT && result2 != END_COUNT) {
+            err.printStackTrace();
+            fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT);
+          }
+        }
+
+        searcher.close();
+        newReader.close();
+
+        if (result2 == END_COUNT) {
+          if (!gotExc)
+            fail("never hit disk full");
+          break;
+        }
+      }
+
+      dir.close();
+
+      // Try again with 10 more bytes of free space:
+      diskFree += 10;
+    }
+
+    startDir.close();
+  }
+}

Property changes on: lucene/src/test/org/apache/lucene/index/TestIndexReaderOnDiskFull.java
___________________________________________________________________
Added: svn:eol-style
   + native

Index: lucene/src/test/org/apache/lucene/index/TestIndexReader.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexReader.java	(revision 1132419)
+++ lucene/src/test/org/apache/lucene/index/TestIndexReader.java	(working copy)
@@ -871,200 +871,8 @@
       deleteReaderReaderConflict(true);
     }
 
-    /**
-     * Make sure if reader tries to commit but hits disk
-     * full that reader remains consistent and usable.
-     */
-    public void testDiskFull() throws IOException {
 
-      Term searchTerm = new Term("content", "aaa");
-      int START_COUNT = 157;
-      int END_COUNT = 144;
-      
-      // First build up a starting index:
-      MockDirectoryWrapper startDir = newDirectory();
-      IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
-      if (VERBOSE) {
-        System.out.println("TEST: create initial index");
-        writer.setInfoStream(System.out);
-      }
-      for(int i=0;i<157;i++) {
-        Document d = new Document();
-        d.add(newField("id", Integer.toString(i), Field.Store.YES, Field.Index.NOT_ANALYZED));
-        d.add(newField("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED));
-        writer.addDocument(d);
-        if (0==i%10)
-          writer.commit();
-      }
-      writer.close();
 
-      {
-        IndexReader r = IndexReader.open(startDir);
-        IndexSearcher searcher = newSearcher(r);
-        ScoreDoc[] hits = null;
-        try {
-          hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
-        } catch (IOException e) {
-          e.printStackTrace();
-          fail("exception when init searching: " + e);
-        }
-        searcher.close();
-        r.close();
-      }
-
-      long diskUsage = startDir.getRecomputedActualSizeInBytes();
-      long diskFree = diskUsage+100;
-
-      IOException err = null;
-
-      boolean done = false;
-      boolean gotExc = false;
-
-      // Iterate w/ ever increasing free disk space:
-      while(!done) {
-        MockDirectoryWrapper dir = new MockDirectoryWrapper(random, new RAMDirectory(startDir));
-
-        // If IndexReader hits disk full, it can write to
-        // the same files again.
-        dir.setPreventDoubleWrite(false);
-
-        IndexReader reader = IndexReader.open(dir, false);
-
-        // For each disk size, first try to commit against
-        // dir that will hit random IOExceptions & disk
-        // full; after, give it infinite disk space & turn
-        // off random IOExceptions & retry w/ same reader:
-        boolean success = false;
-
-        for(int x=0;x<2;x++) {
-
-          double rate = 0.05;
-          double diskRatio = ((double) diskFree)/diskUsage;
-          long thisDiskFree;
-          String testName;
-
-          if (0 == x) {
-            thisDiskFree = diskFree;
-            if (diskRatio >= 2.0) {
-              rate /= 2;
-            }
-            if (diskRatio >= 4.0) {
-              rate /= 2;
-            }
-            if (diskRatio >= 6.0) {
-              rate = 0.0;
-            }
-            if (VERBOSE) {
-              System.out.println("\ncycle: " + diskFree + " bytes");
-            }
-            testName = "disk full during reader.close() @ " + thisDiskFree + " bytes";
-          } else {
-            thisDiskFree = 0;
-            rate = 0.0;
-            if (VERBOSE) {
-              System.out.println("\ncycle: same writer: unlimited disk space");
-            }
-            testName = "reader re-use after disk full";
-          }
-
-          dir.setMaxSizeInBytes(thisDiskFree);
-          dir.setRandomIOExceptionRate(rate);
-          Similarity sim = new DefaultSimilarity();
-          try {
-            if (0 == x) {
-              int docId = 12;
-              for(int i=0;i<13;i++) {
-                reader.deleteDocument(docId);
-                reader.setNorm(docId, "content", sim.encodeNormValue(2.0f));
-                docId += 12;
-              }
-            }
-            reader.close();
-            success = true;
-            if (0 == x) {
-              done = true;
-            }
-          } catch (IOException e) {
-            if (VERBOSE) {
-              System.out.println("  hit IOException: " + e);
-              e.printStackTrace(System.out);
-            }
-            err = e;
-            gotExc = true;
-            if (1 == x) {
-              e.printStackTrace();
-              fail(testName + " hit IOException after disk space was freed up");
-            }
-          }
-
-          // Finally, verify index is not corrupt, and, if
-          // we succeeded, we see all docs changed, and if
-          // we failed, we see either all docs or no docs
-          // changed (transactional semantics):
-          IndexReader newReader = null;
-          try {
-            newReader = IndexReader.open(dir, false);
-          } catch (IOException e) {
-            e.printStackTrace();
-            fail(testName + ":exception when creating IndexReader after disk full during close: " + e);
-          }
-          /*
-          int result = newReader.docFreq(searchTerm);
-          if (success) {
-            if (result != END_COUNT) {
-              fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT);
-            }
-          } else {
-            // On hitting exception we still may have added
-            // all docs:
-            if (result != START_COUNT && result != END_COUNT) {
-              err.printStackTrace();
-              fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT);
-            }
-          }
-          */
-
-          IndexSearcher searcher = newSearcher(newReader);
-          ScoreDoc[] hits = null;
-          try {
-            hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
-          } catch (IOException e) {
-            e.printStackTrace();
-            fail(testName + ": exception when searching: " + e);
-          }
-          int result2 = hits.length;
-          if (success) {
-            if (result2 != END_COUNT) {
-              fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT);
-            }
-          } else {
-            // On hitting exception we still may have added
-            // all docs:
-            if (result2 != START_COUNT && result2 != END_COUNT) {
-              err.printStackTrace();
-              fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT);
-            }
-          }
-
-          searcher.close();
-          newReader.close();
-
-          if (result2 == END_COUNT) {
-            if (!gotExc)
-              fail("never hit disk full");
-            break;
-          }
-        }
-
-        dir.close();
-
-        // Try again with 10 more bytes of free space:
-        diskFree += 10;
-      }
-
-      startDir.close();
-    }
-
     public void testDocsOutOfOrderJIRA140() throws IOException {
       Directory dir = newDirectory();      
       IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
@@ -1692,54 +1500,6 @@
     dir.close();
   }
 
-  // LUCENE-1579: Make sure all SegmentReaders are new when
-  // reopen switches readOnly
-  public void testReopenChangeReadonly() throws Exception {
-    Directory dir = newDirectory();
-    IndexWriter writer = new IndexWriter(
-        dir,
-        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
-            setMaxBufferedDocs(-1).
-            setMergePolicy(newLogMergePolicy(10))
-    );
-    Document doc = new Document();
-    doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
-    writer.addDocument(doc);
-    writer.commit();
-
-    // Open reader1
-    IndexReader r = IndexReader.open(dir, false);
-    assertTrue(r instanceof DirectoryReader);
-    IndexReader r1 = getOnlySegmentReader(r);
-    final int[] ints = FieldCache.DEFAULT.getInts(r1, "number");
-    assertEquals(1, ints.length);
-    assertEquals(17, ints[0]);
-
-    // Reopen to readonly w/ no chnages
-    IndexReader r3 = r.reopen(true);
-    assertTrue(((DirectoryReader) r3).readOnly);
-    r3.close();
-
-    // Add new segment
-    writer.addDocument(doc);
-    writer.commit();
-
-    // Reopen reader1 --> reader2
-    IndexReader r2 = r.reopen(true);
-    r.close();
-    assertTrue(((DirectoryReader) r2).readOnly);
-    IndexReader[] subs = r2.getSequentialSubReaders();
-    final int[] ints2 = FieldCache.DEFAULT.getInts(subs[0], "number");
-    r2.close();
-
-    assertTrue(((SegmentReader) subs[0]).readOnly);
-    assertTrue(((SegmentReader) subs[1]).readOnly);
-    assertTrue(ints == ints2);
-
-    writer.close();
-    dir.close();
-  }
-
   // LUCENE-1586: getUniqueTermCount
   public void testUniqueTermCount() throws Exception {
     Directory dir = newDirectory();
Index: lucene/src/test/org/apache/lucene/index/TestGlobalFieldNumbers.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestGlobalFieldNumbers.java	(revision 1132419)
+++ lucene/src/test/org/apache/lucene/index/TestGlobalFieldNumbers.java	(working copy)
@@ -42,7 +42,7 @@
 public class TestGlobalFieldNumbers extends LuceneTestCase {
 
   public void testGlobalFieldNumberFiles() throws IOException {
-    for (int i = 0; i < 39; i++) {
+    for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13); i++) {
       Directory dir = newDirectory();
       {
         IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
@@ -113,7 +113,7 @@
   }
 
   public void testIndexReaderCommit() throws IOException {
-    for (int i = 0; i < 39; i++) {
+    for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13); i++) {
       Directory dir = newDirectory();
       {
         IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT,
@@ -156,7 +156,7 @@
   }
 
   public void testGlobalFieldNumberFilesAcrossCommits() throws IOException {
-    for (int i = 0; i < 39; i++) {
+    for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13); i++) {
       Directory dir = newDirectory();
       {
         IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
@@ -207,7 +207,7 @@
   }
 
   public void testGlobalFieldNumberOnOldCommit() throws IOException {
-    for (int i = 0; i < 39; i++) {
+    for (int i = 0; i < (TEST_NIGHTLY ? 39 : 13); i++) {
       Directory dir = newDirectory();
       IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
           TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(
Index: lucene/src/test/org/apache/lucene/index/TestIndexWriter.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexWriter.java	(revision 1132419)
+++ lucene/src/test/org/apache/lucene/index/TestIndexWriter.java	(working copy)
@@ -133,14 +133,14 @@
         dir.close();
     }
 
-    private void addDoc(IndexWriter writer) throws IOException
+    static void addDoc(IndexWriter writer) throws IOException
     {
         Document doc = new Document();
         doc.add(newField("content", "aaa", Field.Store.NO, Field.Index.ANALYZED));
         writer.addDocument(doc);
     }
 
-    private void addDocWithIndex(IndexWriter writer, int index) throws IOException
+    static void addDocWithIndex(IndexWriter writer, int index) throws IOException
     {
         Document doc = new Document();
         doc.add(newField("content", "aaa " + index, Field.Store.YES, Field.Index.ANALYZED));
@@ -165,142 +165,6 @@
       }
     }
 
-    public void testOptimizeMaxNumSegments() throws IOException {
-
-      MockDirectoryWrapper dir = newDirectory();
-
-      final Document doc = new Document();
-      doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
-      final int incrMin = TEST_NIGHTLY ? 15 : 40;
-      for(int numDocs=10;numDocs<500;numDocs += _TestUtil.nextInt(random, incrMin, 5*incrMin)) {
-        LogDocMergePolicy ldmp = new LogDocMergePolicy();
-        ldmp.setMinMergeDocs(1);
-        ldmp.setMergeFactor(5);
-        IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
-          TEST_VERSION_CURRENT, new MockAnalyzer(random))
-          .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
-              ldmp));
-        for(int j=0;j<numDocs;j++)
-          writer.addDocument(doc);
-        writer.close();
-
-        SegmentInfos sis = new SegmentInfos();
-        sis.read(dir);
-        final int segCount = sis.size();
-
-        ldmp = new LogDocMergePolicy();
-        ldmp.setMergeFactor(5);
-        writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
-          new MockAnalyzer(random)).setMergePolicy(ldmp));
-        writer.optimize(3);
-        writer.close();
-
-        sis = new SegmentInfos();
-        sis.read(dir);
-        final int optSegCount = sis.size();
-
-        if (segCount < 3)
-          assertEquals(segCount, optSegCount);
-        else
-          assertEquals(3, optSegCount);
-      }
-      dir.close();
-    }
-
-    public void testOptimizeMaxNumSegments2() throws IOException {
-      MockDirectoryWrapper dir = newDirectory();
-
-      final Document doc = new Document();
-      doc.add(newField("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
-
-      LogDocMergePolicy ldmp = new LogDocMergePolicy();
-      ldmp.setMinMergeDocs(1);
-      ldmp.setMergeFactor(4);
-      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
-        TEST_VERSION_CURRENT, new MockAnalyzer(random))
-        .setMaxBufferedDocs(2).setMergePolicy(ldmp).setMergeScheduler(new ConcurrentMergeScheduler()));
-
-      for(int iter=0;iter<10;iter++) {
-        for(int i=0;i<19;i++)
-          writer.addDocument(doc);
-
-        writer.commit();
-        writer.waitForMerges();
-        writer.commit();
-
-        SegmentInfos sis = new SegmentInfos();
-        sis.read(dir);
-
-        final int segCount = sis.size();
-
-        writer.optimize(7);
-        writer.commit();
-        writer.waitForMerges();
-
-        sis = new SegmentInfos();
-        sis.read(dir);
-        final int optSegCount = sis.size();
-
-        if (segCount < 7)
-          assertEquals(segCount, optSegCount);
-        else
-          assertEquals(7, optSegCount);
-      }
-      writer.close();
-      dir.close();
-    }
-
-    /**
-     * Make sure optimize doesn't use any more than 1X
-     * starting index size as its temporary free space
-     * required.
-     */
-    public void testOptimizeTempSpaceUsage() throws IOException {
-
-      MockDirectoryWrapper dir = newDirectory();
-      IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10).setMergePolicy(newLogMergePolicy()));
-      if (VERBOSE) {
-        System.out.println("TEST: config1=" + writer.getConfig());
-      }
-
-      for(int j=0;j<500;j++) {
-        addDocWithIndex(writer, j);
-      }
-      final int termIndexInterval = writer.getConfig().getTermIndexInterval();
-      // force one extra segment w/ different doc store so
-      // we see the doc stores get merged
-      writer.commit();
-      addDocWithIndex(writer, 500);
-      writer.close();
-
-      if (VERBOSE) {
-        System.out.println("TEST: start disk usage");
-      }
-      long startDiskUsage = 0;
-      String[] files = dir.listAll();
-      for(int i=0;i<files.length;i++) {
-        startDiskUsage += dir.fileLength(files[i]);
-        if (VERBOSE) {
-          System.out.println(files[i] + ": " + dir.fileLength(files[i]));
-        }
-      }
-
-      dir.resetMaxUsedSizeInBytes();
-      dir.setTrackDiskUsage(true);
-
-      // Import to use same term index interval else a
-      // smaller one here could increase the disk usage and
-      // cause a false failure:
-      writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setTermIndexInterval(termIndexInterval).setMergePolicy(newLogMergePolicy()));
-      writer.setInfoStream(VERBOSE ? System.out : null);
-      writer.optimize();
-      writer.close();
-      long maxDiskUsage = dir.getMaxUsedSizeInBytes();
-      assertTrue("optimize used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4*startDiskUsage) + " (= 4X starting usage)",
-                 maxDiskUsage <= 4*startDiskUsage);
-      dir.close();
-    }
-
     static String arrayToString(String[] l) {
       String s = "";
       for(int i=0;i<l.length;i++) {
@@ -361,278 +225,8 @@
         dir.close();
     }
 
-    /*
-     * Simple test for "commit on close": open writer then
-     * add a bunch of docs, making sure reader does not see
-     * these docs until writer is closed.
-     */
-    public void testCommitOnClose() throws IOException {
-        Directory dir = newDirectory();
-        IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
-        for (int i = 0; i < 14; i++) {
-          addDoc(writer);
-        }
-        writer.close();
 
-        Term searchTerm = new Term("content", "aaa");
-        IndexSearcher searcher = new IndexSearcher(dir, false);
-        ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
-        assertEquals("first number of hits", 14, hits.length);
-        searcher.close();
 
-        IndexReader reader = IndexReader.open(dir, true);
-
-        writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
-        for(int i=0;i<3;i++) {
-          for(int j=0;j<11;j++) {
-            addDoc(writer);
-          }
-          searcher = new IndexSearcher(dir, false);
-          hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
-          assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
-          searcher.close();
-          assertTrue("reader should have still been current", reader.isCurrent());
-        }
-
-        // Now, close the writer:
-        writer.close();
-        assertFalse("reader should not be current now", reader.isCurrent());
-
-        searcher = new IndexSearcher(dir, false);
-        hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
-        assertEquals("reader did not see changes after writer was closed", 47, hits.length);
-        searcher.close();
-        reader.close();
-        dir.close();
-    }
-
-    /*
-     * Simple test for "commit on close": open writer, then
-     * add a bunch of docs, making sure reader does not see
-     * them until writer has closed.  Then instead of
-     * closing the writer, call abort and verify reader sees
-     * nothing was added.  Then verify we can open the index
-     * and add docs to it.
-     */
-    public void testCommitOnCloseAbort() throws IOException {
-      MockDirectoryWrapper dir = newDirectory();
-      IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(10));
-      for (int i = 0; i < 14; i++) {
-        addDoc(writer);
-      }
-      writer.close();
-
-      Term searchTerm = new Term("content", "aaa");
-      IndexSearcher searcher = new IndexSearcher(dir, false);
-      ScoreDoc[] hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
-      assertEquals("first number of hits", 14, hits.length);
-      searcher.close();
-
-      writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
-        .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
-      for(int j=0;j<17;j++) {
-        addDoc(writer);
-      }
-      // Delete all docs:
-      writer.deleteDocuments(searchTerm);
-
-      searcher = new IndexSearcher(dir, false);
-      hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
-      assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
-      searcher.close();
-
-      // Now, close the writer:
-      writer.rollback();
-
-      assertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()");
-
-      searcher = new IndexSearcher(dir, false);
-      hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
-      assertEquals("saw changes after writer.abort", 14, hits.length);
-      searcher.close();
-
-      // Now make sure we can re-open the index, add docs,
-      // and all is good:
-      writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random))
-        .setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
-
-      // On abort, writer in fact may write to the same
-      // segments_N file:
-      dir.setPreventDoubleWrite(false);
-
-      for(int i=0;i<12;i++) {
-        for(int j=0;j<17;j++) {
-          addDoc(writer);
-        }
-        searcher = new IndexSearcher(dir, false);
-        hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
-        assertEquals("reader incorrectly sees changes from writer", 14, hits.length);
-        searcher.close();
-      }
-
-      writer.close();
-      searcher = new IndexSearcher(dir, false);
-      hits = searcher.search(new TermQuery(searchTerm), null, 1000).scoreDocs;
-      assertEquals("didn't see changes after close", 218, hits.length);
-      searcher.close();
-
-      dir.close();
-    }
-
-    /*
-     * Verify that a writer with "commit on close" indeed
-     * cleans up the temp segments created after opening
-     * that are not referenced by the starting segments
-     * file.  We check this by using MockDirectoryWrapper to
-     * measure max temp disk space used.
-     */
-    public void testCommitOnCloseDiskUsage() throws IOException {
-      MockDirectoryWrapper dir = newDirectory();
-      Analyzer analyzer;
-      if (random.nextBoolean()) {
-        // no payloads
-       analyzer = new Analyzer() {
-          @Override
-          public TokenStream tokenStream(String fieldName, Reader reader) {
-            return new MockTokenizer(reader, MockTokenizer.WHITESPACE, true);
-          }
-        };
-      } else {
-        // fixed length payloads
-        final int length = random.nextInt(200);
-        analyzer = new Analyzer() {
-          @Override
-          public TokenStream tokenStream(String fieldName, Reader reader) {
-            return new MockFixedLengthPayloadFilter(random,
-                new MockTokenizer(reader, MockTokenizer.WHITESPACE, true),
-                length);
-          }
-        };
-      }
-      
-      IndexWriter writer  = new IndexWriter(
-          dir,
-          newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).
-              setMaxBufferedDocs(10).
-              setReaderPooling(false).
-              setMergePolicy(newLogMergePolicy(10))
-      );
-      for(int j=0;j<30;j++) {
-        addDocWithIndex(writer, j);
-      }
-      writer.close();
-      dir.resetMaxUsedSizeInBytes();
-
-      dir.setTrackDiskUsage(true);
-      long startDiskUsage = dir.getMaxUsedSizeInBytes();
-      writer = new IndexWriter(
-          dir,
-          newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer)
-              .setOpenMode(OpenMode.APPEND).
-              setMaxBufferedDocs(10).
-              setMergeScheduler(new SerialMergeScheduler()).
-              setReaderPooling(false).
-              setMergePolicy(newLogMergePolicy(10))
-
-      );
-      for(int j=0;j<1470;j++) {
-        addDocWithIndex(writer, j);
-      }
-      long midDiskUsage = dir.getMaxUsedSizeInBytes();
-      dir.resetMaxUsedSizeInBytes();
-      writer.optimize();
-      writer.close();
-
-      IndexReader.open(dir, true).close();
-
-      long endDiskUsage = dir.getMaxUsedSizeInBytes();
-
-      // Ending index is 50X as large as starting index; due
-      // to 3X disk usage normally we allow 150X max
-      // transient usage.  If something is wrong w/ deleter
-      // and it doesn't delete intermediate segments then it
-      // will exceed this 150X:
-      // System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);
-      assertTrue("writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage*150),
-                 midDiskUsage < 150*startDiskUsage);
-      assertTrue("writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage*150),
-                 endDiskUsage < 150*startDiskUsage);
-      dir.close();
-    }
-
-
-    /*
-     * Verify that calling optimize when writer is open for
-     * "commit on close" works correctly both for rollback()
-     * and close().
-     */
-    public void testCommitOnCloseOptimize() throws IOException {
-      MockDirectoryWrapper dir = newDirectory();
-      // Must disable throwing exc on double-write: this
-      // test uses IW.rollback which easily results in
-      // writing to same file more than once
-      dir.setPreventDoubleWrite(false);
-      IndexWriter writer = new IndexWriter(
-          dir,
-          newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
-              setMaxBufferedDocs(10).
-              setMergePolicy(newLogMergePolicy(10))
-      );
-      for(int j=0;j<17;j++) {
-        addDocWithIndex(writer, j);
-      }
-      writer.close();
-
-      writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
-      writer.optimize();
-
-      if (VERBOSE) {
-        writer.setInfoStream(System.out);
-      }
-
-      // Open a reader before closing (commiting) the writer:
-      IndexReader reader = IndexReader.open(dir, true);
-
-      // Reader should see index as unoptimized at this
-      // point:
-      assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
-      reader.close();
-
-      // Abort the writer:
-      writer.rollback();
-      assertNoUnreferencedFiles(dir, "aborted writer after optimize");
-
-      // Open a reader after aborting writer:
-      reader = IndexReader.open(dir, true);
-
-      // Reader should still see index as unoptimized:
-      assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
-      reader.close();
-
-      if (VERBOSE) {
-        System.out.println("TEST: do real optimize");
-      }
-      writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
-      if (VERBOSE) {
-        writer.setInfoStream(System.out);
-      }
-      writer.optimize();
-      writer.close();
-
-      if (VERBOSE) {
-        System.out.println("TEST: writer closed");
-      }
-      assertNoUnreferencedFiles(dir, "aborted writer after optimize");
-
-      // Open a reader after aborting writer:
-      reader = IndexReader.open(dir, true);
-
-      // Reader should still see index as unoptimized:
-      assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
-      reader.close();
-      dir.close();
-    }
-
     public void testIndexNoDocuments() throws IOException {
       MockDirectoryWrapper dir = newDirectory();
       IndexWriter writer  = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
@@ -1034,51 +628,8 @@
       dir.close();
     }
 
-    // Test calling optimize(false) whereby optimize is kicked
-    // off but we don't wait for it to finish (but
-    // writer.close()) does wait
-    public void testBackgroundOptimize() throws IOException {
 
-      Directory dir = newDirectory();
-      for(int pass=0;pass<2;pass++) {
-        IndexWriter writer = new IndexWriter(
-            dir,
-            newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
-                setOpenMode(OpenMode.CREATE).
-                setMaxBufferedDocs(2).
-                setMergePolicy(newLogMergePolicy(51))
-        );
-        Document doc = new Document();
-        doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
-        for(int i=0;i<100;i++)
-          writer.addDocument(doc);
-        writer.optimize(false);
 
-        if (0 == pass) {
-          writer.close();
-          IndexReader reader = IndexReader.open(dir, true);
-          assertTrue(reader.isOptimized());
-          reader.close();
-        } else {
-          // Get another segment to flush so we can verify it is
-          // NOT included in the optimization
-          writer.addDocument(doc);
-          writer.addDocument(doc);
-          writer.close();
-
-          IndexReader reader = IndexReader.open(dir, true);
-          assertTrue(!reader.isOptimized());
-          reader.close();
-
-          SegmentInfos infos = new SegmentInfos();
-          infos.read(dir);
-          assertEquals(2, infos.size());
-        }
-      }
-
-      dir.close();
-    }
-
   /**
    * Test that no NullPointerException will be raised,
    * when adding one document with a single, empty field
@@ -1121,51 +672,6 @@
     }
   }
 
-  // Just intercepts all merges & verifies that we are never
-  // merging a segment with >= 20 (maxMergeDocs) docs
-  private class MyMergeScheduler extends MergeScheduler {
-    @Override
-    synchronized public void merge(IndexWriter writer)
-      throws CorruptIndexException, IOException {
-
-      while(true) {
-        MergePolicy.OneMerge merge = writer.getNextMerge();
-        if (merge == null) {
-          break;
-        }
-        for(int i=0;i<merge.segments.size();i++) {
-          assert merge.segments.get(i).docCount < 20;
-        }
-        writer.merge(merge);
-      }
-    }
-
-    @Override
-    public void close() {}
-  }
-
-  // LUCENE-1013
-  public void testSetMaxMergeDocs() throws IOException {
-    Directory dir = newDirectory();
-    IndexWriterConfig conf = newIndexWriterConfig(
-        TEST_VERSION_CURRENT, new MockAnalyzer(random))
-      .setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy());
-    LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
-    lmp.setMaxMergeDocs(20);
-    lmp.setMergeFactor(2);
-    IndexWriter iw = new IndexWriter(dir, conf);
-    iw.setInfoStream(VERBOSE ? System.out : null);
-    Document document = new Document();
-    document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
-                           Field.TermVector.YES));
-    for(int i=0;i<177;i++)
-      iw.addDocument(document);
-    iw.close();
-    dir.close();
-  }
-
-
-
   public void testVariableSchema() throws Exception {
     Directory dir = newDirectory();
     int delID = 0;
@@ -1339,185 +845,8 @@
     dir.close();
   }
 
-  // LUCENE-1044: test writer.commit() when ac=false
-  public void testForceCommit() throws IOException {
-    Directory dir = newDirectory();
 
-    IndexWriter writer = new IndexWriter(
-        dir,
-        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
-            setMaxBufferedDocs(2).
-            setMergePolicy(newLogMergePolicy(5))
-    );
-    writer.commit();
 
-    for (int i = 0; i < 23; i++)
-      addDoc(writer);
-
-    IndexReader reader = IndexReader.open(dir, true);
-    assertEquals(0, reader.numDocs());
-    writer.commit();
-    IndexReader reader2 = reader.reopen();
-    assertEquals(0, reader.numDocs());
-    assertEquals(23, reader2.numDocs());
-    reader.close();
-
-    for (int i = 0; i < 17; i++)
-      addDoc(writer);
-    assertEquals(23, reader2.numDocs());
-    reader2.close();
-    reader = IndexReader.open(dir, true);
-    assertEquals(23, reader.numDocs());
-    reader.close();
-    writer.commit();
-
-    reader = IndexReader.open(dir, true);
-    assertEquals(40, reader.numDocs());
-    reader.close();
-    writer.close();
-    dir.close();
-  }
-
-  // LUCENE-325: test expungeDeletes, when 2 singular merges
-  // are required
-  public void testExpungeDeletes() throws IOException {
-    Directory dir = newDirectory();
-    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
-        TEST_VERSION_CURRENT, new MockAnalyzer(random))
-        .setMaxBufferedDocs(2).setRAMBufferSizeMB(
-                                                  IndexWriterConfig.DISABLE_AUTO_FLUSH));
-    writer.setInfoStream(VERBOSE ? System.out : null);
-    Document document = new Document();
-
-    document = new Document();
-    Field storedField = newField("stored", "stored", Field.Store.YES,
-                                  Field.Index.NO);
-    document.add(storedField);
-    Field termVectorField = newField("termVector", "termVector",
-                                      Field.Store.NO, Field.Index.NOT_ANALYZED,
-                                      Field.TermVector.WITH_POSITIONS_OFFSETS);
-    document.add(termVectorField);
-    for(int i=0;i<10;i++)
-      writer.addDocument(document);
-    writer.close();
-
-    IndexReader ir = IndexReader.open(dir, false);
-    assertEquals(10, ir.maxDoc());
-    assertEquals(10, ir.numDocs());
-    ir.deleteDocument(0);
-    ir.deleteDocument(7);
-    assertEquals(8, ir.numDocs());
-    ir.close();
-
-    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
-    assertEquals(8, writer.numDocs());
-    assertEquals(10, writer.maxDoc());
-    writer.expungeDeletes();
-    assertEquals(8, writer.numDocs());
-    writer.close();
-    ir = IndexReader.open(dir, true);
-    assertEquals(8, ir.maxDoc());
-    assertEquals(8, ir.numDocs());
-    ir.close();
-    dir.close();
-  }
-
-  // LUCENE-325: test expungeDeletes, when many adjacent merges are required
-  public void testExpungeDeletes2() throws IOException {
-    Directory dir = newDirectory();
-    IndexWriter writer = new IndexWriter(
-        dir,
-        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
-            setMaxBufferedDocs(2).
-            setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
-            setMergePolicy(newLogMergePolicy(50))
-    );
-
-    Document document = new Document();
-
-    document = new Document();
-    Field storedField = newField("stored", "stored", Store.YES,
-                                  Index.NO);
-    document.add(storedField);
-    Field termVectorField = newField("termVector", "termVector",
-                                      Store.NO, Index.NOT_ANALYZED,
-                                      TermVector.WITH_POSITIONS_OFFSETS);
-    document.add(termVectorField);
-    for(int i=0;i<98;i++)
-      writer.addDocument(document);
-    writer.close();
-
-    IndexReader ir = IndexReader.open(dir, false);
-    assertEquals(98, ir.maxDoc());
-    assertEquals(98, ir.numDocs());
-    for(int i=0;i<98;i+=2)
-      ir.deleteDocument(i);
-    assertEquals(49, ir.numDocs());
-    ir.close();
-
-    writer = new IndexWriter(
-        dir,
-        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
-            setMergePolicy(newLogMergePolicy(3))
-    );
-    assertEquals(49, writer.numDocs());
-    writer.expungeDeletes();
-    writer.close();
-    ir = IndexReader.open(dir, true);
-    assertEquals(49, ir.maxDoc());
-    assertEquals(49, ir.numDocs());
-    ir.close();
-    dir.close();
-  }
-
-  // LUCENE-325: test expungeDeletes without waiting, when
-  // many adjacent merges are required
-  public void testExpungeDeletes3() throws IOException {
-    Directory dir = newDirectory();
-    IndexWriter writer = new IndexWriter(
-        dir,
-        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
-            setMaxBufferedDocs(2).
-            setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
-            setMergePolicy(newLogMergePolicy(50))
-    );
-
-    Document document = new Document();
-
-    document = new Document();
-    Field storedField = newField("stored", "stored", Field.Store.YES,
-                                  Field.Index.NO);
-    document.add(storedField);
-    Field termVectorField = newField("termVector", "termVector",
-                                      Field.Store.NO, Field.Index.NOT_ANALYZED,
-                                      Field.TermVector.WITH_POSITIONS_OFFSETS);
-    document.add(termVectorField);
-    for(int i=0;i<98;i++)
-      writer.addDocument(document);
-    writer.close();
-
-    IndexReader ir = IndexReader.open(dir, false);
-    assertEquals(98, ir.maxDoc());
-    assertEquals(98, ir.numDocs());
-    for(int i=0;i<98;i+=2)
-      ir.deleteDocument(i);
-    assertEquals(49, ir.numDocs());
-    ir.close();
-
-    writer = new IndexWriter(
-        dir,
-        newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
-            setMergePolicy(newLogMergePolicy(3))
-    );
-    writer.expungeDeletes(false);
-    writer.close();
-    ir = IndexReader.open(dir, true);
-    assertEquals(49, ir.maxDoc());
-    assertEquals(49, ir.numDocs());
-    ir.close();
-    dir.close();
-  }
-
   // LUCENE-1179
   public void testEmptyFieldName() throws IOException {
     Directory dir = newDirectory();
@@ -1797,133 +1126,8 @@
     dir.close();
   }
 
-  // LUCENE-1274: test writer.prepareCommit()
-  public void testPrepareCommit() throws IOException {
-    Directory dir = newDirectory();
 
-    IndexWriter writer = new IndexWriter(
-        dir,
-        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
-            setMaxBufferedDocs(2).
-            setMergePolicy(newLogMergePolicy(5))
-    );
-    writer.commit();
 
-    for (int i = 0; i < 23; i++)
-      addDoc(writer);
-
-    IndexReader reader = IndexReader.open(dir, true);
-    assertEquals(0, reader.numDocs());
-
-    writer.prepareCommit();
-
-    IndexReader reader2 = IndexReader.open(dir, true);
-    assertEquals(0, reader2.numDocs());
-
-    writer.commit();
-
-    IndexReader reader3 = reader.reopen();
-    assertEquals(0, reader.numDocs());
-    assertEquals(0, reader2.numDocs());
-    assertEquals(23, reader3.numDocs());
-    reader.close();
-    reader2.close();
-
-    for (int i = 0; i < 17; i++)
-      addDoc(writer);
-
-    assertEquals(23, reader3.numDocs());
-    reader3.close();
-    reader = IndexReader.open(dir, true);
-    assertEquals(23, reader.numDocs());
-    reader.close();
-
-    writer.prepareCommit();
-
-    reader = IndexReader.open(dir, true);
-    assertEquals(23, reader.numDocs());
-    reader.close();
-
-    writer.commit();
-    reader = IndexReader.open(dir, true);
-    assertEquals(40, reader.numDocs());
-    reader.close();
-    writer.close();
-    dir.close();
-  }
-
-  // LUCENE-1274: test writer.prepareCommit()
-  public void testPrepareCommitRollback() throws IOException {
-    MockDirectoryWrapper dir = newDirectory();
-    dir.setPreventDoubleWrite(false);
-
-    IndexWriter writer = new IndexWriter(
-        dir,
-        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
-            setMaxBufferedDocs(2).
-            setMergePolicy(newLogMergePolicy(5))
-    );
-    writer.commit();
-
-    for (int i = 0; i < 23; i++)
-      addDoc(writer);
-
-    IndexReader reader = IndexReader.open(dir, true);
-    assertEquals(0, reader.numDocs());
-
-    writer.prepareCommit();
-
-    IndexReader reader2 = IndexReader.open(dir, true);
-    assertEquals(0, reader2.numDocs());
-
-    writer.rollback();
-
-    IndexReader reader3 = reader.reopen();
-    assertEquals(0, reader.numDocs());
-    assertEquals(0, reader2.numDocs());
-    assertEquals(0, reader3.numDocs());
-    reader.close();
-    reader2.close();
-
-    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
-    for (int i = 0; i < 17; i++)
-      addDoc(writer);
-
-    assertEquals(0, reader3.numDocs());
-    reader3.close();
-    reader = IndexReader.open(dir, true);
-    assertEquals(0, reader.numDocs());
-    reader.close();
-
-    writer.prepareCommit();
-
-    reader = IndexReader.open(dir, true);
-    assertEquals(0, reader.numDocs());
-    reader.close();
-
-    writer.commit();
-    reader = IndexReader.open(dir, true);
-    assertEquals(17, reader.numDocs());
-    reader.close();
-    writer.close();
-    dir.close();
-  }
-
-  // LUCENE-1274
-  public void testPrepareCommitNoChanges() throws IOException {
-    Directory dir = newDirectory();
-
-    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
-    writer.prepareCommit();
-    writer.commit();
-    writer.close();
-
-    IndexReader reader = IndexReader.open(dir, true);
-    assertEquals(0, reader.numDocs());
-    reader.close();
-    dir.close();
-  }
-
   // LUCENE-1219
   public void testBinaryFieldOffsetLength() throws IOException {
     Directory dir = newDirectory();
@@ -1954,45 +1158,6 @@
     dir.close();
   }
 
-  // LUCENE-1382
-  public void testCommitUserData() throws IOException {
-    Directory dir = newDirectory();
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
-    for(int j=0;j<17;j++)
-      addDoc(w);
-    w.close();
-
-    assertEquals(0, IndexReader.getCommitUserData(dir).size());
-
-    IndexReader r = IndexReader.open(dir, true);
-    // commit(Map) never called for this index
-    assertEquals(0, r.getCommitUserData().size());
-    r.close();
-
-    w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
-    for(int j=0;j<17;j++)
-      addDoc(w);
-    Map<String,String> data = new HashMap<String,String>();
-    data.put("label", "test1");
-    w.commit(data);
-    w.close();
-
-    assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
-
-    r = IndexReader.open(dir, true);
-    assertEquals("test1", r.getCommitUserData().get("label"));
-    r.close();
-
-    w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
-    w.optimize();
-    w.close();
-
-    assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
-
-    dir.close();
-  }
-
-
   // LUCENE-2529
   public void testPositionIncrementGapEmptyField() throws Exception {
     Directory dir = newDirectory();
@@ -2330,62 +1495,6 @@
     dir.close();
   }
 
-  // LUCENE-2095: make sure with multiple threads commit
-  // doesn't return until all changes are in fact in the
-  // index
-  public void testCommitThreadSafety() throws Throwable {
-    final int NUM_THREADS = 5;
-    final double RUN_SEC = 0.5;
-    final Directory dir = newDirectory();
-    final RandomIndexWriter w = new RandomIndexWriter(random, dir, newIndexWriterConfig(
-                                                                                        TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
-    _TestUtil.reduceOpenFiles(w.w);
-    w.commit();
-    final AtomicBoolean failed = new AtomicBoolean();
-    Thread[] threads = new Thread[NUM_THREADS];
-    final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
-    for(int i=0;i<NUM_THREADS;i++) {
-      final int finalI = i;
-      threads[i] = new Thread() {
-          @Override
-          public void run() {
-            try {
-              final Document doc = new Document();
-              IndexReader r = IndexReader.open(dir);
-              Field f = newField("f", "", Field.Store.NO, Field.Index.NOT_ANALYZED);
-              doc.add(f);
-              int count = 0;
-              do {
-                if (failed.get()) break;
-                for(int j=0;j<10;j++) {
-                  final String s = finalI + "_" + String.valueOf(count++);
-                  f.setValue(s);
-                  w.addDocument(doc);
-                  w.commit();
-                  IndexReader r2 = r.reopen();
-                  assertTrue(r2 != r);
-                  r.close();
-                  r = r2;
-                  assertEquals("term=f:" + s + "; r=" + r, 1, r.docFreq(new Term("f", s)));
-                }
-              } while(System.currentTimeMillis() < endTime);
-              r.close();
-            } catch (Throwable t) {
-              failed.set(true);
-              throw new RuntimeException(t);
-            }
-          }
-        };
-      threads[i].start();
-    }
-    for(int i=0;i<NUM_THREADS;i++) {
-      threads[i].join();
-    }
-    assertFalse(failed.get());
-    w.close();
-    dir.close();
-  }
-
   // both start & end are inclusive
   private final int getInt(Random r, int start, int end) {
     return start + r.nextInt(1+end-start);
@@ -2638,67 +1747,6 @@
     dir.close();
   }
 
-  public void testIndexingThenDeleting() throws Exception {
-    final Random r = random;
-    Directory dir = newDirectory();
-    // note this test explicitly disables payloads
-    final Analyzer analyzer = new Analyzer() {
-      @Override
-      public TokenStream tokenStream(String fieldName, Reader reader) {
-        return new MockTokenizer(reader, MockTokenizer.WHITESPACE, true);
-      }
-    };
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setRAMBufferSizeMB(1.0).setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH).setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH));
-    w.setInfoStream(VERBOSE ? System.out : null);
-    Document doc = new Document();
-    doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
-    int num = TEST_NIGHTLY ? 6 * RANDOM_MULTIPLIER : 3 * RANDOM_MULTIPLIER;
-    for (int iter = 0; iter < num; iter++) {
-      int count = 0;
-
-      final boolean doIndexing = r.nextBoolean();
-      if (VERBOSE) {
-        System.out.println("TEST: iter doIndexing=" + doIndexing);
-      }
-      if (doIndexing) {
-        // Add docs until a flush is triggered
-        final int startFlushCount = w.getFlushCount();
-        while(w.getFlushCount() == startFlushCount) {
-          w.addDocument(doc);
-          count++;
-        }
-      } else {
-        // Delete docs until a flush is triggered
-        final int startFlushCount = w.getFlushCount();
-        while(w.getFlushCount() == startFlushCount) {
-          w.deleteDocuments(new Term("foo", ""+count));
-          count++;
-        }
-      }
-      assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 3000);
-    }
-    w.close();
-    dir.close();
-  }
-
-  public void testNoCommits() throws Exception {
-    // Tests that if we don't call commit(), the directory has 0 commits. This has
-    // changed since LUCENE-2386, where before IW would always commit on a fresh
-    // new index.
-    Directory dir = newDirectory();
-    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
-    try {
-      IndexReader.listCommits(dir);
-      fail("listCommits should have thrown an exception over empty index");
-    } catch (IndexNotFoundException e) {
-      // that's expected !
-    }
-    // No changes still should generate a commit, because it's a new index.
-    writer.close();
-    assertEquals("expected 1 commits!", 1, IndexReader.listCommits(dir).size());
-    dir.close();
-  }
-
   public void testEmptyFSDirWithNoLock() throws Exception {
     // Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF),
     // then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed
@@ -2775,82 +1823,6 @@
     dir.close();
   }
 
-  public void testFutureCommit() throws Exception {
-    Directory dir = newDirectory();
-
-    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE));
-    Document doc = new Document();
-    w.addDocument(doc);
-
-    // commit to "first"
-    Map<String,String> commitData = new HashMap<String,String>();
-    commitData.put("tag", "first");
-    w.commit(commitData);
-
-    // commit to "second"
-    w.addDocument(doc);
-    commitData.put("tag", "second");
-    w.commit(commitData);
-    w.close();
-
-    // open "first" with IndexWriter
-    IndexCommit commit = null;
-    for(IndexCommit c : IndexReader.listCommits(dir)) {
-      if (c.getUserData().get("tag").equals("first")) {
-        commit = c;
-        break;
-      }
-    }
-
-    assertNotNull(commit);
-
-    w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE).setIndexCommit(commit));
-
-    assertEquals(1, w.numDocs());
-
-    // commit IndexWriter to "third"
-    w.addDocument(doc);
-    commitData.put("tag", "third");
-    w.commit(commitData);
-    w.close();
-
-    // make sure "second" commit is still there
-    commit = null;
-    for(IndexCommit c : IndexReader.listCommits(dir)) {
-      if (c.getUserData().get("tag").equals("second")) {
-        commit = c;
-        break;
-      }
-    }
-
-    assertNotNull(commit);
-
-    IndexReader r = IndexReader.open(commit, true);
-    assertEquals(2, r.numDocs());
-    r.close();
-
-    // open "second", w/ writeable IndexReader & commit
-    r = IndexReader.open(commit, NoDeletionPolicy.INSTANCE, false);
-    assertEquals(2, r.numDocs());
-    r.deleteDocument(0);
-    r.deleteDocument(1);
-    commitData.put("tag", "fourth");
-    r.commit(commitData);
-    r.close();
-
-    // make sure "third" commit is still there
-    commit = null;
-    for(IndexCommit c : IndexReader.listCommits(dir)) {
-      if (c.getUserData().get("tag").equals("third")) {
-        commit = c;
-        break;
-      }
-    }
-    assertNotNull(commit);
-
-    dir.close();
-  }
-
   public void testRandomStoredFields() throws IOException {
     Directory dir = newDirectory();
     Random rand = random;
@@ -2980,38 +1952,6 @@
     dir.close();
   }
 
-  public void testDeleteAllSlowly() throws Exception {
-    final Directory dir = newDirectory();
-    RandomIndexWriter w = new RandomIndexWriter(random, dir);
-    final int NUM_DOCS = 1000 * RANDOM_MULTIPLIER;
-    final List<Integer> ids = new ArrayList<Integer>(NUM_DOCS);
-    for(int id=0;id<NUM_DOCS;id++) {
-      ids.add(id);
-    }
-    Collections.shuffle(ids, random);
-    for(int id : ids) {
-      Document doc = new Document();
-      doc.add(newField("id", ""+id, Field.Index.NOT_ANALYZED));
-      w.addDocument(doc);
-    }
-    Collections.shuffle(ids, random);
-    int upto = 0;
-    while(upto < ids.size()) {
-      final int left = ids.size() - upto;
-      final int inc = Math.min(left, _TestUtil.nextInt(random, 1, 20));
-      final int limit = upto + inc;
-      while(upto < limit) {
-        w.deleteDocuments(new Term("id", ""+ids.get(upto++)));
-      }
-      final IndexReader r = w.getReader();
-      assertEquals(NUM_DOCS - upto, r.numDocs());
-      r.close();
-    }
-
-    w.close();
-    dir.close();
-  }
-
   private static class StringSplitAnalyzer extends Analyzer {
     @Override
     public TokenStream tokenStream(String fieldName, Reader reader) {
Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java	(revision 1132419)
+++ lucene/src/test/org/apache/lucene/index/TestIndexWriterMergePolicy.java	(working copy)
@@ -104,9 +104,12 @@
         dir,
         newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
             setMaxBufferedDocs(10).
-            setMergePolicy(newLogMergePolicy())
+            setMergePolicy(newLogMergePolicy()).
+            setMergeScheduler(new SerialMergeScheduler())
     );
 
+    writer.setInfoStream(VERBOSE ? System.out : null);
+
     for (int i = 0; i < 250; i++) {
       addDoc(writer);
       checkInvariants(writer);
Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java	(revision 1132419)
+++ lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java	(working copy)
@@ -18,11 +18,21 @@
  */
 
 import java.io.IOException;
+import java.io.Reader;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
 
+import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.analysis.TokenStream;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TermQuery;
@@ -860,4 +870,79 @@
     modifier.close();
     dir.close();
   }
+  
+  public void testDeleteAllSlowly() throws Exception {
+    final Directory dir = newDirectory();
+    RandomIndexWriter w = new RandomIndexWriter(random, dir);
+    final int NUM_DOCS = 1000 * RANDOM_MULTIPLIER;
+    final List<Integer> ids = new ArrayList<Integer>(NUM_DOCS);
+    for(int id=0;id<NUM_DOCS;id++) {
+      ids.add(id);
+    }
+    Collections.shuffle(ids, random);
+    for(int id : ids) {
+      Document doc = new Document();
+      doc.add(newField("id", ""+id, Field.Index.NOT_ANALYZED));
+      w.addDocument(doc);
+    }
+    Collections.shuffle(ids, random);
+    int upto = 0;
+    while(upto < ids.size()) {
+      final int left = ids.size() - upto;
+      final int inc = Math.min(left, _TestUtil.nextInt(random, 1, 20));
+      final int limit = upto + inc;
+      while(upto < limit) {
+        w.deleteDocuments(new Term("id", ""+ids.get(upto++)));
+      }
+      final IndexReader r = w.getReader();
+      assertEquals(NUM_DOCS - upto, r.numDocs());
+      r.close();
+    }
+
+    w.close();
+    dir.close();
+  }
+  
+  public void testIndexingThenDeleting() throws Exception {
+    final Random r = random;
+    Directory dir = newDirectory();
+    // note this test explicitly disables payloads
+    final Analyzer analyzer = new Analyzer() {
+      @Override
+      public TokenStream tokenStream(String fieldName, Reader reader) {
+        return new MockTokenizer(reader, MockTokenizer.WHITESPACE, true);
+      }
+    };
+    IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setRAMBufferSizeMB(1.0).setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH).setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH));
+    w.setInfoStream(VERBOSE ? System.out : null);
+    Document doc = new Document();
+    doc.add(newField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO, Field.Index.ANALYZED));
+    int num = TEST_NIGHTLY ? 6 * RANDOM_MULTIPLIER : 3 * RANDOM_MULTIPLIER;
+    for (int iter = 0; iter < num; iter++) {
+      int count = 0;
+
+      final boolean doIndexing = r.nextBoolean();
+      if (VERBOSE) {
+        System.out.println("TEST: iter doIndexing=" + doIndexing);
+      }
+      if (doIndexing) {
+        // Add docs until a flush is triggered
+        final int startFlushCount = w.getFlushCount();
+        while(w.getFlushCount() == startFlushCount) {
+          w.addDocument(doc);
+          count++;
+        }
+      } else {
+        // Delete docs until a flush is triggered
+        final int startFlushCount = w.getFlushCount();
+        while(w.getFlushCount() == startFlushCount) {
+          w.deleteDocuments(new Term("foo", ""+count));
+          count++;
+        }
+      }
+      assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 3000);
+    }
+    w.close();
+    dir.close();
+  }
 }
Index: lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java	(revision 1132419)
+++ lucene/src/test/org/apache/lucene/index/TestRollingUpdates.java	(working copy)
@@ -36,7 +36,7 @@
     final LineFileDocs docs = new LineFileDocs(random);
 
     final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
-    final int SIZE = 200 * RANDOM_MULTIPLIER;
+    final int SIZE = (TEST_NIGHTLY ? 200 : 20) * RANDOM_MULTIPLIER;
     int id = 0;
     IndexReader r = null;
     final int numUpdates = (int) (SIZE * (2+random.nextDouble()));
@@ -82,9 +82,8 @@
     for (int r = 0; r < 3; r++) {
       final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
           TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMaxBufferedDocs(2));
-      final int SIZE = 200 * RANDOM_MULTIPLIER;
-      final int numUpdates = (int) (SIZE * (2 + random.nextDouble()));
-      int numThreads = 3 + random.nextInt(12);
+      final int numUpdates = (TEST_NIGHTLY ? 200 : 20) * RANDOM_MULTIPLIER;
+      int numThreads = _TestUtil.nextInt(random, 2, 6);
       IndexingThread[] threads = new IndexingThread[numThreads];
       for (int i = 0; i < numThreads; i++) {
         threads[i] = new IndexingThread(docs, w, numUpdates);
@@ -97,6 +96,7 @@
 
       w.close();
     }
+
     IndexReader open = IndexReader.open(dir);
     assertEquals(1, open.numDocs());
     open.close();
@@ -123,9 +123,10 @@
           Document doc = new Document();// docs.nextDoc();
           doc.add(newField("id", "test", Index.NOT_ANALYZED));
           writer.updateDocument(new Term("id", "test"), doc);
-          if (random.nextInt(10) == 0) {
-            if (open == null)
+          if (random.nextInt(3) == 0) {
+            if (open == null) {
               open = IndexReader.open(writer, true);
+            }
             IndexReader reader = open.reopen();
             if (reader != open) {
               open.close();
@@ -134,11 +135,12 @@
             assertEquals("iter: " + i + " numDocs: "+ open.numDocs() + " del: " + open.numDeletedDocs() + " max: " + open.maxDoc(), 1, open.numDocs());
           }
         }
-        open.close();
+        if (open != null) {
+          open.close();
+        }
       } catch (Exception e) {
-        fail(e.getMessage());
+        throw new RuntimeException(e);
       }
-      
     }
   }
 }
Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java	(revision 1132419)
+++ lucene/src/test/org/apache/lucene/index/TestIndexReaderReopen.java	(working copy)
@@ -36,6 +36,7 @@
 import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.FieldCache;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.Similarity;
@@ -1244,4 +1245,52 @@
     r.close();
     dir.close();
   }
+  
+  // LUCENE-1579: Make sure all SegmentReaders are new when
+  // reopen switches readOnly
+  public void testReopenChangeReadonly() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(-1).
+            setMergePolicy(newLogMergePolicy(10))
+    );
+    Document doc = new Document();
+    doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
+    writer.addDocument(doc);
+    writer.commit();
+
+    // Open reader1
+    IndexReader r = IndexReader.open(dir, false);
+    assertTrue(r instanceof DirectoryReader);
+    IndexReader r1 = getOnlySegmentReader(r);
+    final int[] ints = FieldCache.DEFAULT.getInts(r1, "number");
+    assertEquals(1, ints.length);
+    assertEquals(17, ints[0]);
+
+    // Reopen to readonly w/ no chnages
+    IndexReader r3 = r.reopen(true);
+    assertTrue(((DirectoryReader) r3).readOnly);
+    r3.close();
+
+    // Add new segment
+    writer.addDocument(doc);
+    writer.commit();
+
+    // Reopen reader1 --> reader2
+    IndexReader r2 = r.reopen(true);
+    r.close();
+    assertTrue(((DirectoryReader) r2).readOnly);
+    IndexReader[] subs = r2.getSequentialSubReaders();
+    final int[] ints2 = FieldCache.DEFAULT.getInts(subs[0], "number");
+    r2.close();
+
+    assertTrue(((SegmentReader) subs[0]).readOnly);
+    assertTrue(((SegmentReader) subs[1]).readOnly);
+    assertTrue(ints == ints2);
+
+    writer.close();
+    dir.close();
+  }
 }
Index: lucene/src/test/org/apache/lucene/index/TestLazyBug.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestLazyBug.java	(revision 1132419)
+++ lucene/src/test/org/apache/lucene/index/TestLazyBug.java	(working copy)
@@ -25,6 +25,8 @@
 import org.apache.lucene.document.*;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
 
 
 /**
@@ -49,6 +51,18 @@
   private static Set<String> dataset = asSet(data);
 
   private static String MAGIC_FIELD = "f"+(NUM_FIELDS/3);
+  
+  private static Directory directory;
+  
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    directory = makeIndex();
+  }
+  
+  @AfterClass
+  public static void afterClass() throws Exception {
+    directory.close();
+  }
 
   private static FieldSelector SELECTOR = new FieldSelector() {
       public FieldSelectorResult accept(String f) {
@@ -59,7 +73,7 @@
       }
     };
 
-  private Directory makeIndex() throws Exception {
+  private static Directory makeIndex() throws Exception {
     Directory dir = newDirectory();
     try {
       IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
@@ -85,8 +99,7 @@
   }
 
   public void doTest(int[] docs) throws Exception {
-    Directory dir = makeIndex();
-    IndexReader reader = IndexReader.open(dir, true);
+    IndexReader reader = IndexReader.open(directory, true);
     for (int i = 0; i < docs.length; i++) {
       Document d = reader.document(docs[i], SELECTOR);
       d.get(MAGIC_FIELD);
@@ -109,7 +122,6 @@
       }
     }
     reader.close();
-    dir.close();
   }
 
   public void testLazyWorks() throws Exception {
Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java	(revision 1132419)
+++ lucene/src/test/org/apache/lucene/index/TestIndexWriterMerging.java	(working copy)
@@ -19,6 +19,9 @@
 import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.document.Field.TermVector;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.util.LuceneTestCase;
 
@@ -113,4 +116,187 @@
     }
     writer.close();
   }
+  
+  // LUCENE-325: test expungeDeletes, when 2 singular merges
+  // are required
+  public void testExpungeDeletes() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+        .setMaxBufferedDocs(2).setRAMBufferSizeMB(
+                                                  IndexWriterConfig.DISABLE_AUTO_FLUSH));
+    writer.setInfoStream(VERBOSE ? System.out : null);
+    Document document = new Document();
+
+    document = new Document();
+    Field storedField = newField("stored", "stored", Field.Store.YES,
+                                  Field.Index.NO);
+    document.add(storedField);
+    Field termVectorField = newField("termVector", "termVector",
+                                      Field.Store.NO, Field.Index.NOT_ANALYZED,
+                                      Field.TermVector.WITH_POSITIONS_OFFSETS);
+    document.add(termVectorField);
+    for(int i=0;i<10;i++)
+      writer.addDocument(document);
+    writer.close();
+
+    IndexReader ir = IndexReader.open(dir, false);
+    assertEquals(10, ir.maxDoc());
+    assertEquals(10, ir.numDocs());
+    ir.deleteDocument(0);
+    ir.deleteDocument(7);
+    assertEquals(8, ir.numDocs());
+    ir.close();
+
+    writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    assertEquals(8, writer.numDocs());
+    assertEquals(10, writer.maxDoc());
+    writer.expungeDeletes();
+    assertEquals(8, writer.numDocs());
+    writer.close();
+    ir = IndexReader.open(dir, true);
+    assertEquals(8, ir.maxDoc());
+    assertEquals(8, ir.numDocs());
+    ir.close();
+    dir.close();
+  }
+
+  // LUCENE-325: test expungeDeletes, when many adjacent merges are required
+  public void testExpungeDeletes2() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(2).
+            setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
+            setMergePolicy(newLogMergePolicy(50))
+    );
+
+    Document document = new Document();
+
+    document = new Document();
+    Field storedField = newField("stored", "stored", Store.YES,
+                                  Index.NO);
+    document.add(storedField);
+    Field termVectorField = newField("termVector", "termVector",
+                                      Store.NO, Index.NOT_ANALYZED,
+                                      TermVector.WITH_POSITIONS_OFFSETS);
+    document.add(termVectorField);
+    for(int i=0;i<98;i++)
+      writer.addDocument(document);
+    writer.close();
+
+    IndexReader ir = IndexReader.open(dir, false);
+    assertEquals(98, ir.maxDoc());
+    assertEquals(98, ir.numDocs());
+    for(int i=0;i<98;i+=2)
+      ir.deleteDocument(i);
+    assertEquals(49, ir.numDocs());
+    ir.close();
+
+    writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMergePolicy(newLogMergePolicy(3))
+    );
+    assertEquals(49, writer.numDocs());
+    writer.expungeDeletes();
+    writer.close();
+    ir = IndexReader.open(dir, true);
+    assertEquals(49, ir.maxDoc());
+    assertEquals(49, ir.numDocs());
+    ir.close();
+    dir.close();
+  }
+
+  // LUCENE-325: test expungeDeletes without waiting, when
+  // many adjacent merges are required
+  public void testExpungeDeletes3() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMaxBufferedDocs(2).
+            setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
+            setMergePolicy(newLogMergePolicy(50))
+    );
+
+    Document document = new Document();
+
+    document = new Document();
+    Field storedField = newField("stored", "stored", Field.Store.YES,
+                                  Field.Index.NO);
+    document.add(storedField);
+    Field termVectorField = newField("termVector", "termVector",
+                                      Field.Store.NO, Field.Index.NOT_ANALYZED,
+                                      Field.TermVector.WITH_POSITIONS_OFFSETS);
+    document.add(termVectorField);
+    for(int i=0;i<98;i++)
+      writer.addDocument(document);
+    writer.close();
+
+    IndexReader ir = IndexReader.open(dir, false);
+    assertEquals(98, ir.maxDoc());
+    assertEquals(98, ir.numDocs());
+    for(int i=0;i<98;i+=2)
+      ir.deleteDocument(i);
+    assertEquals(49, ir.numDocs());
+    ir.close();
+
+    writer = new IndexWriter(
+        dir,
+        newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).
+            setMergePolicy(newLogMergePolicy(3))
+    );
+    writer.expungeDeletes(false);
+    writer.close();
+    ir = IndexReader.open(dir, true);
+    assertEquals(49, ir.maxDoc());
+    assertEquals(49, ir.numDocs());
+    ir.close();
+    dir.close();
+  }
+  
+  // Just intercepts all merges & verifies that we are never
+  // merging a segment with >= 20 (maxMergeDocs) docs
+  private class MyMergeScheduler extends MergeScheduler {
+    @Override
+    synchronized public void merge(IndexWriter writer)
+      throws CorruptIndexException, IOException {
+
+      while(true) {
+        MergePolicy.OneMerge merge = writer.getNextMerge();
+        if (merge == null) {
+          break;
+        }
+        for(int i=0;i<merge.segments.size();i++) {
+          assert merge.segments.get(i).docCount < 20;
+        }
+        writer.merge(merge);
+      }
+    }
+
+    @Override
+    public void close() {}
+  }
+
+  // LUCENE-1013
+  public void testSetMaxMergeDocs() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriterConfig conf = newIndexWriterConfig(
+        TEST_VERSION_CURRENT, new MockAnalyzer(random))
+      .setMergeScheduler(new MyMergeScheduler()).setMaxBufferedDocs(2).setMergePolicy(newLogMergePolicy());
+    LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy();
+    lmp.setMaxMergeDocs(20);
+    lmp.setMergeFactor(2);
+    IndexWriter iw = new IndexWriter(dir, conf);
+    iw.setInfoStream(VERBOSE ? System.out : null);
+    Document document = new Document();
+    document.add(newField("tvtest", "a b c", Field.Store.NO, Field.Index.ANALYZED,
+                           Field.TermVector.YES));
+    for(int i=0;i<177;i++)
+      iw.addDocument(document);
+    iw.close();
+    dir.close();
+  }
 }
Index: lucene/src/test/org/apache/lucene/index/codecs/preflex/TestSurrogates.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/codecs/preflex/TestSurrogates.java	(revision 1132419)
+++ lucene/src/test/org/apache/lucene/index/codecs/preflex/TestSurrogates.java	(working copy)
@@ -287,7 +287,7 @@
 
     for(int f=0;f<numField;f++) {
       String field = "f" + f;
-      final int numTerms = 10000 * RANDOM_MULTIPLIER;
+      final int numTerms = (TEST_NIGHTLY ? 10000 : 1000) * RANDOM_MULTIPLIER;
 
       final Set<String> uniqueTerms = new HashSet<String>();
 
Index: lucene/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java	(revision 1132419)
+++ lucene/src/test/org/apache/lucene/index/TestFlushByRamOrCountsPolicy.java	(working copy)
@@ -231,8 +231,8 @@
     for (int i = 0; i < numThreads.length; i++) {
       AtomicInteger numDocs = new AtomicInteger(numDocumentsToIndex);
       MockDirectoryWrapper dir = newDirectory();
-      // mock a very slow harddisk here so that flushing is very slow
-      dir.setThrottling(MockDirectoryWrapper.Throttling.ALWAYS);
+      // mock a very slow harddisk sometimes here so that flushing is very slow
+      dir.setThrottling(MockDirectoryWrapper.Throttling.SOMETIMES);
       IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT,
           new MockAnalyzer(random));
       iwc.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
Index: lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java
===================================================================
--- lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java	(revision 1132419)
+++ lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java	(working copy)
@@ -272,7 +272,7 @@
   public void close() throws IOException {
     // if someone isn't using getReader() API, we want to be sure to
     // maybeOptimize since presumably they might open a reader on the dir.
-    if (getReaderCalled == false && r.nextInt(4) == 2) {
+    if (getReaderCalled == false && r.nextInt(8) == 2) {
       doRandomOptimize();
     }
     w.close();
Index: lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java
===================================================================
--- lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java	(revision 1132419)
+++ lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java	(working copy)
@@ -836,14 +836,22 @@
       c.setMergeScheduler(new SerialMergeScheduler());
     }
     if (r.nextBoolean()) {
-      if (r.nextInt(20) == 17) {
-        c.setMaxBufferedDocs(2);
+      if ((TEST_NIGHTLY && random.nextBoolean()) || r.nextInt(20) == 17) {
+        // crazy value
+        c.setMaxBufferedDocs(_TestUtil.nextInt(r, 2, 7));
       } else {
-        c.setMaxBufferedDocs(_TestUtil.nextInt(r, 2, 1000));
+        // reasonable value
+        c.setMaxBufferedDocs(_TestUtil.nextInt(r, 8, 1000));
       }
     }
     if (r.nextBoolean()) {
-      c.setTermIndexInterval(_TestUtil.nextInt(r, 1, 1000));
+      if ((TEST_NIGHTLY && random.nextBoolean()) || r.nextInt(20) == 17) {
+        // crazy value
+        c.setTermIndexInterval(random.nextBoolean() ? _TestUtil.nextInt(r, 1, 31) : _TestUtil.nextInt(r, 129, 1000));
+      } else {
+        // reasonable value
+        c.setTermIndexInterval(_TestUtil.nextInt(r, 32, 128));
+      }
     }
     if (r.nextBoolean()) {
       c.setIndexerThreadPool(new ThreadAffinityDocumentsWriterThreadPool(_TestUtil.nextInt(r, 1, 20)));
@@ -874,22 +882,22 @@
     LogMergePolicy logmp = r.nextBoolean() ? new LogDocMergePolicy() : new LogByteSizeMergePolicy();
     logmp.setUseCompoundFile(r.nextBoolean());
     logmp.setCalibrateSizeByDeletes(r.nextBoolean());
-    if (r.nextInt(3) == 2) {
-      logmp.setMergeFactor(2);
+    if ((TEST_NIGHTLY && random.nextBoolean()) || r.nextInt(20) == 17) {
+      logmp.setMergeFactor(_TestUtil.nextInt(r, 2, 4));
     } else {
-      logmp.setMergeFactor(_TestUtil.nextInt(r, 2, 20));
+      logmp.setMergeFactor(_TestUtil.nextInt(r, 5, 50));
     }
     return logmp;
   }
 
   public static TieredMergePolicy newTieredMergePolicy(Random r) {
     TieredMergePolicy tmp = new TieredMergePolicy();
-    if (r.nextInt(3) == 2) {
-      tmp.setMaxMergeAtOnce(2);
-      tmp.setMaxMergeAtOnceExplicit(2);
+    if ((TEST_NIGHTLY && random.nextBoolean()) || r.nextInt(20) == 17) {
+      tmp.setMaxMergeAtOnce(_TestUtil.nextInt(r, 2, 4));
+      tmp.setMaxMergeAtOnceExplicit(_TestUtil.nextInt(r, 2, 4));
     } else {
-      tmp.setMaxMergeAtOnce(_TestUtil.nextInt(r, 2, 20));
-      tmp.setMaxMergeAtOnceExplicit(_TestUtil.nextInt(r, 2, 30));
+      tmp.setMaxMergeAtOnce(_TestUtil.nextInt(r, 5, 50));
+      tmp.setMaxMergeAtOnceExplicit(_TestUtil.nextInt(r, 5, 50));
     }
     tmp.setMaxMergedSegmentMB(0.2 + r.nextDouble() * 2.0);
     tmp.setFloorSegmentMB(0.2 + r.nextDouble() * 2.0);
@@ -1115,7 +1123,7 @@
   };
 
   public static String randomDirectory(Random random) {
-    if (random.nextInt(10) == 0) {
+    if (random.nextInt(20) == 0) {
       return CORE_DIRECTORIES[random.nextInt(CORE_DIRECTORIES.length)];
     } else {
       return "RAMDirectory";
@@ -1179,7 +1187,7 @@
   public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) throws IOException {
 
     if (random.nextBoolean()) {
-      if (maybeWrap && random.nextBoolean()) {
+      if (maybeWrap && random.nextInt(20) == 0) {
         return new IndexSearcher(new SlowMultiReaderWrapper(r));
       } else {
         return new IndexSearcher(r);
@@ -1408,6 +1416,10 @@
       Codec codec = previousMappings.get(name);
       if (codec == null) {
         codec = knownCodecs.get(Math.abs(perFieldSeed ^ name.hashCode()) % knownCodecs.size());
+        if (codec instanceof SimpleTextCodec && perFieldSeed % 5 != 0) {
+          // make simpletext rarer, choose again
+          codec = knownCodecs.get(Math.abs(perFieldSeed ^ name.toUpperCase(Locale.ENGLISH).hashCode()) % knownCodecs.size());
+        }
         previousMappings.put(name, codec);
       }
       return codec.name;
