Index: modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java
===================================================================
--- modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java (revision 1200448)
+++ modules/suggest/src/test/org/apache/lucene/search/spell/TestLuceneDictionary.java (working copy)
@@ -70,7 +70,7 @@
doc.add(newField("zzz", "bar", TextField.TYPE_STORED));
writer.addDocument(doc);
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
}
Index: modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java
===================================================================
--- modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java (revision 1200448)
+++ modules/suggest/src/java/org/apache/lucene/search/spell/SpellChecker.java (working copy)
@@ -481,11 +481,11 @@
* Indexes the data from the given {@link Dictionary}.
* @param dict Dictionary to index
* @param config {@link IndexWriterConfig} to use
- * @param optimize whether or not the spellcheck index should be optimized
+ * @param fullMerge whether or not the spellcheck index should be fully merged
* @throws AlreadyClosedException if the Spellchecker is already closed
* @throws IOException
*/
- public final void indexDictionary(Dictionary dict, IndexWriterConfig config, boolean optimize) throws IOException {
+ public final void indexDictionary(Dictionary dict, IndexWriterConfig config, boolean fullMerge) throws IOException {
synchronized (modifyCurrentIndexLock) {
ensureOpen();
final Directory dir = this.spellIndex;
@@ -536,9 +536,10 @@
} finally {
releaseSearcher(indexSearcher);
}
+ if (fullMerge) {
+ writer.forceMerge(1);
+ }
// close writer
- if (optimize)
- writer.optimize();
writer.close();
// TODO: this isn't that great, maybe in the future SpellChecker should take
// IWC in its ctor / keep its writer open?
Index: modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java
===================================================================
--- modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java (revision 1200448)
+++ modules/queryparser/src/test/org/apache/lucene/queryparser/xml/TestQueryTemplateManager.java (working copy)
@@ -145,7 +145,7 @@
for (String docFieldValue : docFieldValues) {
w.addDocument(getDocumentFromString(docFieldValue));
}
- w.optimize();
+ w.forceMerge(1);
w.close();
searcher = new IndexSearcher(dir, true);
Index: modules/facet/src/java/org/apache/lucene/facet/index/FacetsPayloadProcessorProvider.java
===================================================================
--- modules/facet/src/java/org/apache/lucene/facet/index/FacetsPayloadProcessorProvider.java (revision 1200448)
+++ modules/facet/src/java/org/apache/lucene/facet/index/FacetsPayloadProcessorProvider.java (working copy)
@@ -58,7 +58,7 @@
* conf.setMergePolicy(new ForceOptimizeMergePolicy());
* IndexWriter writer = new IndexWriter(oldDir, conf);
* writer.setPayloadProcessorProvider(fppp);
- * writer.optimize();
+ * writer.forceMerge(1);
* writer.close();
*
* // merge that directory with the new index.
Index: modules/benchmark/conf/sort-standard.alg
===================================================================
--- modules/benchmark/conf/sort-standard.alg (revision 1200448)
+++ modules/benchmark/conf/sort-standard.alg (working copy)
@@ -50,7 +50,7 @@
{ "Populate"
-CreateIndex
{ "MAddDocs" AddDoc(100) > : 500000
- -Optimize
+ -ForcMerge(1)
-CloseIndex
}
Index: modules/benchmark/conf/collector-small.alg
===================================================================
--- modules/benchmark/conf/collector-small.alg (revision 1200448)
+++ modules/benchmark/conf/collector-small.alg (working copy)
@@ -52,7 +52,7 @@
{ "Populate"
CreateIndex
{ "MAddDocs" AddDoc } : 200000
- Optimize
+ ForcMerge(1)
CloseIndex
}
Index: modules/benchmark/conf/highlight-profile.alg
===================================================================
--- modules/benchmark/conf/highlight-profile.alg (revision 1200448)
+++ modules/benchmark/conf/highlight-profile.alg (working copy)
@@ -44,7 +44,7 @@
{ "Populate"
CreateIndex
{ "MAddDocs" AddDoc } : 20000
- Optimize
+ ForcMerge(1)
CloseIndex
}
{ "Rounds"
Index: modules/benchmark/conf/indexing.alg
===================================================================
--- modules/benchmark/conf/indexing.alg (revision 1200448)
+++ modules/benchmark/conf/indexing.alg (working copy)
@@ -54,7 +54,7 @@
{ "Populate"
CreateIndex
{ "MAddDocs" AddDoc } : 20000
- Optimize
+ ForcMerge(1)
CloseIndex
}
Index: modules/benchmark/conf/indexing-flush-by-RAM.alg
===================================================================
--- modules/benchmark/conf/indexing-flush-by-RAM.alg (revision 1200448)
+++ modules/benchmark/conf/indexing-flush-by-RAM.alg (working copy)
@@ -54,7 +54,7 @@
{ "Populate"
CreateIndex
{ "MAddDocs" AddDoc } : 20000
- Optimize
+ ForcMerge(1)
CloseIndex
}
Index: modules/benchmark/conf/sample.alg
===================================================================
--- modules/benchmark/conf/sample.alg (revision 1200448)
+++ modules/benchmark/conf/sample.alg (working copy)
@@ -62,7 +62,7 @@
{ "PopulateShort"
CreateIndex
{ AddDoc(4000) > : 20000
- Optimize
+ ForcMerge(1)
CloseIndex
>
@@ -71,7 +71,7 @@
{ "PopulateLong"
CreateIndex
{ AddDoc(8000) > : 10000
- Optimize
+ ForcMerge(1)
CloseIndex
>
Index: modules/benchmark/conf/highlight-vs-vector-highlight.alg
===================================================================
--- modules/benchmark/conf/highlight-vs-vector-highlight.alg (revision 1200448)
+++ modules/benchmark/conf/highlight-vs-vector-highlight.alg (working copy)
@@ -48,7 +48,7 @@
{ "Populate"
CreateIndex
{ "MAddDocs" AddDoc } : 20000
- Optimize
+ ForcMerge(1)
CloseIndex
}
{
Index: modules/benchmark/conf/standard-highlights-notv.alg
===================================================================
--- modules/benchmark/conf/standard-highlights-notv.alg (revision 1200448)
+++ modules/benchmark/conf/standard-highlights-notv.alg (working copy)
@@ -44,7 +44,7 @@
{ "Populate"
CreateIndex
{ "MAddDocs" AddDoc } : 20000
- Optimize
+ ForcMerge(1)
CloseIndex
}
{ "Rounds"
Index: modules/benchmark/conf/analyzer.alg
===================================================================
--- modules/benchmark/conf/analyzer.alg (revision 1200448)
+++ modules/benchmark/conf/analyzer.alg (working copy)
@@ -55,7 +55,7 @@
{ "Populate"
CreateIndex
{ "MAddDocs" AddDoc > : 2000
- Optimize
+ ForcMerge(1)
CloseIndex
}
Index: modules/benchmark/conf/collector.alg
===================================================================
--- modules/benchmark/conf/collector.alg (revision 1200448)
+++ modules/benchmark/conf/collector.alg (working copy)
@@ -52,7 +52,7 @@
{ "Populate"
CreateIndex
{ "MAddDocs" AddDoc } : 2000000
- Optimize
+ ForcMerge(1)
CloseIndex
}
Index: modules/benchmark/conf/standard-flush-by-RAM.alg
===================================================================
--- modules/benchmark/conf/standard-flush-by-RAM.alg (revision 1200448)
+++ modules/benchmark/conf/standard-flush-by-RAM.alg (working copy)
@@ -53,7 +53,7 @@
{ "Populate"
CreateIndex
{ "MAddDocs" AddDoc } : 20000
- Optimize
+ ForcMerge(1)
CloseIndex
}
Index: modules/benchmark/conf/micro-standard.alg
===================================================================
--- modules/benchmark/conf/micro-standard.alg (revision 1200448)
+++ modules/benchmark/conf/micro-standard.alg (working copy)
@@ -52,7 +52,7 @@
{ "Populate"
-CreateIndex
{ "MAddDocs" AddDoc > : 2000
- -Optimize
+ -ForcMerge(1)
-CloseIndex
}
Index: modules/benchmark/conf/standard-highlights-tv.alg
===================================================================
--- modules/benchmark/conf/standard-highlights-tv.alg (revision 1200448)
+++ modules/benchmark/conf/standard-highlights-tv.alg (working copy)
@@ -44,7 +44,7 @@
{ "Populate"
CreateIndex
{ "MAddDocs" AddDoc } : 20000
- Optimize
+ ForcMerge(1)
CloseIndex
}
{ "Rounds"
Index: modules/benchmark/conf/deletes.alg
===================================================================
--- modules/benchmark/conf/deletes.alg (revision 1200448)
+++ modules/benchmark/conf/deletes.alg (working copy)
@@ -58,7 +58,7 @@
{ "Populate"
OpenIndex
{ AddDoc(10) > : 200000
- Optimize
+ ForcMerge(1)
CloseIndex
>
Index: modules/benchmark/conf/indexing-multithreaded.alg
===================================================================
--- modules/benchmark/conf/indexing-multithreaded.alg (revision 1200448)
+++ modules/benchmark/conf/indexing-multithreaded.alg (working copy)
@@ -54,7 +54,7 @@
{ "Populate"
CreateIndex
[{ "MAddDocs" AddDoc } : 5000] : 4
- Optimize
+ ForcMerge(1)
CommitIndex(commit1)
CloseIndex
}
Index: modules/benchmark/conf/standard.alg
===================================================================
--- modules/benchmark/conf/standard.alg (revision 1200448)
+++ modules/benchmark/conf/standard.alg (working copy)
@@ -53,7 +53,7 @@
{ "Populate"
CreateIndex
{ "MAddDocs" AddDoc } : 20000
- Optimize
+ ForcMerge(1)
CloseIndex
}
Index: modules/benchmark/conf/indexing-flush-by-RAM-multithreaded.alg
===================================================================
--- modules/benchmark/conf/indexing-flush-by-RAM-multithreaded.alg (revision 1200448)
+++ modules/benchmark/conf/indexing-flush-by-RAM-multithreaded.alg (working copy)
@@ -54,7 +54,7 @@
{ "Populate"
CreateIndex
[{ "MAddDocs" AddDoc } : 5000] : 4
- Optimize
+ ForcMerge(1)
CloseIndex
}
Index: modules/benchmark/conf/facets.alg
===================================================================
--- modules/benchmark/conf/facets.alg (revision 1200448)
+++ modules/benchmark/conf/facets.alg (working copy)
@@ -52,7 +52,7 @@
-CreateIndex
-CreateTaxonomyIndex
{ "MAddDocs" AddFacetedDoc > : *
- -Optimize
+ -ForcMerge(1)
-CloseIndex
-CloseTaxonomyIndex
}
Index: modules/benchmark/conf/vector-highlight-profile.alg
===================================================================
--- modules/benchmark/conf/vector-highlight-profile.alg (revision 1200448)
+++ modules/benchmark/conf/vector-highlight-profile.alg (working copy)
@@ -44,7 +44,7 @@
{ "Populate"
CreateIndex
{ "MAddDocs" AddDoc } : 20000
- Optimize
+ ForcMerge(1)
CloseIndex
}
{ "Rounds"
Index: modules/benchmark/conf/sloppy-phrase.alg
===================================================================
--- modules/benchmark/conf/sloppy-phrase.alg (revision 1200448)
+++ modules/benchmark/conf/sloppy-phrase.alg (working copy)
@@ -52,7 +52,7 @@
{ "Populate"
CreateIndex
{ "MAddDocs" AddDoc(2000) > : 20000
- Optimize
+ ForcMerge(1)
CloseIndex
}
Index: modules/benchmark/conf/micro-standard-flush-by-ram.alg
===================================================================
--- modules/benchmark/conf/micro-standard-flush-by-ram.alg (revision 1200448)
+++ modules/benchmark/conf/micro-standard-flush-by-ram.alg (working copy)
@@ -53,7 +53,7 @@
{ "Populate"
CreateIndex
{ "MAddDocs" AddDoc > : 2000
- Optimize
+ ForcMerge(1)
CloseIndex
}
Index: modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java
===================================================================
--- modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java (revision 1200448)
+++ modules/benchmark/src/test/org/apache/lucene/benchmark/byTask/TestPerfTasksLogic.java (working copy)
@@ -77,7 +77,7 @@
"ResetSystemErase",
"CreateIndex",
"{ AddDoc } : 1000",
- "Optimize",
+ "ForceMerge(1)",
"CloseIndex",
"OpenReader",
"{ CountingSearchTest } : 200",
@@ -114,7 +114,7 @@
"ResetSystemErase",
"CreateIndex",
"{ AddDoc } : 100",
- "Optimize",
+ "ForceMerge(1)",
"CloseIndex",
"OpenReader",
"{ CountingSearchTest } : .5s",
@@ -137,7 +137,7 @@
"ResetSystemErase",
"CreateIndex",
"{ AddDoc } : 1000",
- "Optimize",
+ "ForceMerge(1)",
"CloseIndex",
"OpenReader",
"{",
@@ -163,7 +163,7 @@
"ResetSystemErase",
"CreateIndex",
"{ AddDoc } : 100",
- "Optimize",
+ "ForceMerge(1)",
"CloseIndex",
"OpenReader(true)",
"{ CountingHighlighterTest(size[1],highlight[1],mergeContiguous[true],maxFrags[1],fields[body]) } : 200",
@@ -202,7 +202,7 @@
"ResetSystemErase",
"CreateIndex",
"{ AddDoc } : 1000",
- "Optimize",
+ "ForceMerge(1)",
"CloseIndex",
"OpenReader(false)",
"{ CountingHighlighterTest(size[1],highlight[1],mergeContiguous[true],maxFrags[1],fields[body]) } : 200",
@@ -240,7 +240,7 @@
"ResetSystemErase",
"CreateIndex",
"{ AddDoc } : 1000",
- "Optimize",
+ "ForceMerge(1)",
"CloseIndex",
"OpenReader",
"{ CountingHighlighterTest(size[1],highlight[1],mergeContiguous[true],maxFrags[1],fields[body]) } : 200",
@@ -277,7 +277,7 @@
"# ----- alg ",
"CreateIndex",
"{ AddDoc } : * ",
- "Optimize",
+ "ForceMerge(1)",
"CloseIndex",
"OpenReader",
"{ CountingSearchTest } : 100",
@@ -818,9 +818,9 @@
}
/**
- * Test that we can call optimize(maxNumSegments).
+ * Test that we can call forceMerge(maxNumSegments).
*/
- public void testOptimizeMaxNumSegments() throws Exception {
+ public void testForceMerge() throws Exception {
// 1. alg definition (required in every "logic" test)
String algLines[] = {
"# ----- properties ",
@@ -841,7 +841,7 @@
" ResetSystemErase",
" CreateIndex",
" { \"AddDocs\" AddDoc > : * ",
- " Optimize(3)",
+ " ForceMerge(3)",
" CloseIndex()",
"} : 2",
};
Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ForceMergeTask.java
===================================================================
--- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ForceMergeTask.java (revision 0)
+++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ForceMergeTask.java (working copy)
@@ -0,0 +1,56 @@
+package org.apache.lucene.benchmark.byTask.tasks;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.benchmark.byTask.PerfRunData;
+import org.apache.lucene.index.IndexWriter;
+
+/**
+ * Runs forceMerge on the index.
+ *
Other side effects: none.
+ */
+public class ForceMergeTask extends PerfTask {
+
+ public ForceMergeTask(PerfRunData runData) {
+ super(runData);
+ }
+
+ int maxNumSegments = -1;
+
+ @Override
+ public int doLogic() throws Exception {
+ if (maxNumSegments == -1) {
+ throw new IllegalStateException("required argument (maxNumSegments) was not specified");
+ }
+ IndexWriter iw = getRunData().getIndexWriter();
+ iw.forceMerge(maxNumSegments);
+ //System.out.println("forceMerge called");
+ return 1;
+ }
+
+ @Override
+ public void setParams(String params) {
+ super.setParams(params);
+ maxNumSegments = Double.valueOf(params).intValue();
+ }
+
+ @Override
+ public boolean supportsParams() {
+ return true;
+ }
+}
Property changes on: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/ForceMergeTask.java
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
Index: modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OptimizeTask.java
===================================================================
--- modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OptimizeTask.java (revision 1200448)
+++ modules/benchmark/src/java/org/apache/lucene/benchmark/byTask/tasks/OptimizeTask.java (working copy)
@@ -1,53 +0,0 @@
-package org.apache.lucene.benchmark.byTask.tasks;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import org.apache.lucene.benchmark.byTask.PerfRunData;
-import org.apache.lucene.index.IndexWriter;
-
-/**
- * Optimize the index.
- *
Other side effects: none.
- */
-public class OptimizeTask extends PerfTask {
-
- public OptimizeTask(PerfRunData runData) {
- super(runData);
- }
-
- int maxNumSegments = 1;
-
- @Override
- public int doLogic() throws Exception {
- IndexWriter iw = getRunData().getIndexWriter();
- iw.optimize(maxNumSegments);
- //System.out.println("optimize called");
- return 1;
- }
-
- @Override
- public void setParams(String params) {
- super.setParams(params);
- maxNumSegments = Double.valueOf(params).intValue();
- }
-
- @Override
- public boolean supportsParams() {
- return true;
- }
-}
Index: modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java
===================================================================
--- modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java (revision 1200448)
+++ modules/queries/src/test/org/apache/lucene/queries/function/FunctionTestSetup.java (working copy)
@@ -116,9 +116,9 @@
}
if (!doMultiSegment) {
if (VERBOSE) {
- System.out.println("TEST: setUp optimize");
+ System.out.println("TEST: setUp full merge");
}
- iw.optimize();
+ iw.forceMerge(1);
}
iw.close();
if (VERBOSE) {
Index: lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java
===================================================================
--- lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/TestMergeSchedulerExternal.java (working copy)
@@ -142,7 +142,7 @@
writer.commit(); // trigger flush
writer.addDocument(new Document());
writer.commit(); // trigger flush
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
dir.close();
}
Index: lucene/src/test/org/apache/lucene/TestExternalCodecs.java
===================================================================
--- lucene/src/test/org/apache/lucene/TestExternalCodecs.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/TestExternalCodecs.java (working copy)
@@ -25,11 +25,9 @@
import org.apache.lucene.index.*;
import org.apache.lucene.index.codecs.*;
import org.apache.lucene.index.codecs.lucene40.Lucene40Codec;
-import org.apache.lucene.index.codecs.perfield.PerFieldPostingsFormat;
import org.apache.lucene.search.*;
import org.apache.lucene.store.*;
import org.apache.lucene.util.*;
-import org.apache.lucene.util.Bits;
/* Intentionally outside of oal.index to verify fully
external codecs work fine */
@@ -104,7 +102,7 @@
System.out.println("\nTEST: now delete 2nd doc");
}
w.deleteDocuments(new Term("id", "44"));
- w.optimize();
+ w.forceMerge(1);
r = IndexReader.open(w, true);
assertEquals(NUM_DOCS-2, r.maxDoc());
assertEquals(NUM_DOCS-2, r.numDocs());
Index: lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java
===================================================================
--- lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/store/TestRAMDirectory.java (working copy)
@@ -104,7 +104,7 @@
final IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
- writer.optimize();
+ writer.forceMerge(1);
assertEquals(ramDir.sizeInBytes(), ramDir.getRecomputedSizeInBytes());
@@ -131,7 +131,7 @@
for (int i=0; iThis method is not thread safe, be sure to only call it when no filter is running!
* If you re-use the same filter instance for another
* search, be sure to first reset the term counter
Index: lucene/src/test/org/apache/lucene/search/TestScorerPerf.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestScorerPerf.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/search/TestScorerPerf.java (working copy)
@@ -71,7 +71,7 @@
}
iw.addDocument(d);
}
- iw.optimize();
+ iw.forceMerge(1);
iw.close();
}
@@ -103,7 +103,7 @@
@Override
public void collect(int doc) {
count++;
- sum += docBase + doc; // use it to avoid any possibility of being optimized away
+ sum += docBase + doc; // use it to avoid any possibility of being eliminated by hotspot
}
public int getCount() { return count; }
Index: lucene/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestPayloadProcessorProvider.java (working copy)
@@ -245,7 +245,7 @@
processors.put(dir, new PerTermPayloadProcessor());
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
writer.setPayloadProcessorProvider(new PerDirPayloadProcessor(processors));
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
verifyPayloadExists(dir, "p", new BytesRef("p1"), 0);
Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestIndexWriterCommit.java (working copy)
@@ -224,7 +224,7 @@
}
long midDiskUsage = dir.getMaxUsedSizeInBytes();
dir.resetMaxUsedSizeInBytes();
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
IndexReader.open(dir, true).close();
@@ -246,11 +246,11 @@
/*
- * Verify that calling optimize when writer is open for
+ * Verify that calling forceMerge when writer is open for
* "commit on close" works correctly both for rollback()
* and close().
*/
- public void testCommitOnCloseOptimize() throws IOException {
+ public void testCommitOnCloseForceMerge() throws IOException {
MockDirectoryWrapper dir = newDirectory();
// Must disable throwing exc on double-write: this
// test uses IW.rollback which easily results in
@@ -268,44 +268,44 @@
writer.close();
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
- writer.optimize();
+ writer.forceMerge(1);
// Open a reader before closing (commiting) the writer:
IndexReader reader = IndexReader.open(dir, true);
- // Reader should see index as unoptimized at this
+ // Reader should see index as multi-seg at this
// point:
- assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
+ assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().length > 1);
reader.close();
// Abort the writer:
writer.rollback();
- TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after optimize");
+ TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after forceMerge");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
- // Reader should still see index as unoptimized:
- assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());
+ // Reader should still see index as multi-segment
+ assertTrue("Reader incorrectly sees one segment", reader.getSequentialSubReaders().length > 1);
reader.close();
if (VERBOSE) {
- System.out.println("TEST: do real optimize");
+ System.out.println("TEST: do real full merge");
}
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
if (VERBOSE) {
System.out.println("TEST: writer closed");
}
- TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after optimize");
+ TestIndexWriter.assertNoUnreferencedFiles(dir, "aborted writer after forceMerge");
// Open a reader after aborting writer:
reader = IndexReader.open(dir, true);
- // Reader should still see index as unoptimized:
- assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());
+ // Reader should see index as one segment
+ assertEquals("Reader incorrectly sees more than one segment", 1, reader.getSequentialSubReaders().length);
reader.close();
dir.close();
}
@@ -657,7 +657,7 @@
r.close();
w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
- w.optimize();
+ w.forceMerge(1);
w.close();
assertEquals("test1", IndexReader.getCommitUserData(dir).get("label"));
Index: lucene/src/test/org/apache/lucene/index/Test2BPostings.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/Test2BPostings.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/Test2BPostings.java (working copy)
@@ -73,7 +73,7 @@
System.out.println(i + " of " + numDocs + "...");
}
}
- w.optimize();
+ w.forceMerge(1);
w.close();
CheckIndex ci = new CheckIndex(dir);
if (VERBOSE) {
Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestIndexWriterOnDiskFull.java (working copy)
@@ -24,7 +24,6 @@
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-import org.apache.lucene.index.codecs.Codec;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
@@ -180,7 +179,7 @@
}
// Now, build a starting index that has START_COUNT docs. We
- // will then try to addIndexesNoOptimize into a copy of this:
+ // will then try to addIndexes into a copy of this:
MockDirectoryWrapper startDir = newDirectory();
IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
for(int j=0;j 0);
readers1[0].close();
readers1[1].close();
Index: lucene/src/test/org/apache/lucene/index/TestAddIndexes.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestAddIndexes.java (working copy)
@@ -39,13 +39,7 @@
import org.apache.lucene.index.codecs.PostingsFormat;
import org.apache.lucene.index.codecs.SegmentInfosFormat;
import org.apache.lucene.index.codecs.lucene40.Lucene40Codec;
-import org.apache.lucene.index.codecs.lucene40.Lucene40PostingsBaseFormat;
-import org.apache.lucene.index.codecs.lucene40.Lucene40PostingsFormat;
-import org.apache.lucene.index.codecs.mocksep.MockSepPostingsFormat;
-import org.apache.lucene.index.codecs.perfield.PerFieldPostingsFormat;
import org.apache.lucene.index.codecs.pulsing.Pulsing40PostingsFormat;
-import org.apache.lucene.index.codecs.pulsing.PulsingPostingsFormat;
-import org.apache.lucene.index.codecs.simpletext.SimpleTextPostingsFormat;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.store.AlreadyClosedException;
@@ -114,7 +108,7 @@
assertEquals(40, writer.maxDoc());
writer.close();
- // test doc count before segments are merged/index is optimized
+ // test doc count before segments are merged
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
assertEquals(190, writer.maxDoc());
writer.addIndexes(aux3);
@@ -128,9 +122,9 @@
verifyTermDocs(dir, new Term("content", "bbb"), 50);
- // now optimize it.
+ // now fully merge it.
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
// make sure the new index is correct
@@ -186,7 +180,7 @@
q.add(new Term("content", "14"));
writer.deleteDocuments(q);
- writer.optimize();
+ writer.forceMerge(1);
writer.commit();
verifyNumDocs(dir, 1039);
@@ -224,7 +218,7 @@
q.add(new Term("content", "14"));
writer.deleteDocuments(q);
- writer.optimize();
+ writer.forceMerge(1);
writer.commit();
verifyNumDocs(dir, 1039);
@@ -262,7 +256,7 @@
writer.addIndexes(aux);
- writer.optimize();
+ writer.forceMerge(1);
writer.commit();
verifyNumDocs(dir, 1039);
@@ -729,10 +723,10 @@
switch(j%5) {
case 0:
if (VERBOSE) {
- System.out.println(Thread.currentThread().getName() + ": TEST: addIndexes(Dir[]) then optimize");
+ System.out.println(Thread.currentThread().getName() + ": TEST: addIndexes(Dir[]) then full merge");
}
writer2.addIndexes(dirs);
- writer2.optimize();
+ writer2.forceMerge(1);
break;
case 1:
if (VERBOSE) {
@@ -834,10 +828,10 @@
switch(j%5) {
case 0:
if (VERBOSE) {
- System.out.println("TEST: " + Thread.currentThread().getName() + ": addIndexes + optimize");
+ System.out.println("TEST: " + Thread.currentThread().getName() + ": addIndexes + full merge");
}
writer2.addIndexes(dirs);
- writer2.optimize();
+ writer2.forceMerge(1);
break;
case 1:
if (VERBOSE) {
@@ -853,9 +847,9 @@
break;
case 3:
if (VERBOSE) {
- System.out.println("TEST: " + Thread.currentThread().getName() + ": optimize");
+ System.out.println("TEST: " + Thread.currentThread().getName() + ": full merge");
}
- writer2.optimize();
+ writer2.forceMerge(1);
break;
case 4:
if (VERBOSE) {
@@ -1214,7 +1208,7 @@
}
try {
- IndexReader indexReader = IndexReader.open(toAdd);
+ IndexReader.open(toAdd);
fail("no such codec");
} catch (IllegalArgumentException ex) {
// expected
Index: lucene/src/test/org/apache/lucene/index/TestPayloads.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestPayloads.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestPayloads.java (working copy)
@@ -134,7 +134,7 @@
writer.addDocument(d);
// force merge
- writer.optimize();
+ writer.forceMerge(1);
// flush
writer.close();
@@ -204,7 +204,7 @@
writer.addDocument(d, analyzer);
}
- writer.optimize();
+ writer.forceMerge(1);
// flush
writer.close();
@@ -322,7 +322,7 @@
writer.addDocument(d);
- writer.optimize();
+ writer.forceMerge(1);
// flush
writer.close();
@@ -621,7 +621,7 @@
doc.add(new Field("hasMaybepayload2", "here we go", TextField.TYPE_STORED));
writer.addDocument(doc);
writer.addDocument(doc);
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
dir.close();
Index: lucene/src/test/org/apache/lucene/index/TestThreadedForceMerge.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestThreadedForceMerge.java (revision 0)
+++ lucene/src/test/org/apache/lucene/index/TestThreadedForceMerge.java (working copy)
@@ -0,0 +1,143 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldType;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.util.English;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+import java.util.Random;
+
+public class TestThreadedForceMerge extends LuceneTestCase {
+
+ private static final Analyzer ANALYZER = new MockAnalyzer(random, MockTokenizer.SIMPLE, true);
+
+ private final static int NUM_THREADS = 3;
+ //private final static int NUM_THREADS = 5;
+
+ private final static int NUM_ITER = 1;
+
+ private final static int NUM_ITER2 = 1;
+
+ private volatile boolean failed;
+
+ private void setFailed() {
+ failed = true;
+ }
+
+ public void runTest(Random random, Directory directory) throws Exception {
+
+ IndexWriter writer = new IndexWriter(
+ directory,
+ newIndexWriterConfig(TEST_VERSION_CURRENT, ANALYZER).
+ setOpenMode(OpenMode.CREATE).
+ setMaxBufferedDocs(2).
+ setMergePolicy(newLogMergePolicy())
+ );
+
+ for(int iter=0;iter names = new ArrayList(oldNames.length + oldOptimizedNames.length);
+ List names = new ArrayList(oldNames.length + oldSingleSegmentNames.length);
names.addAll(Arrays.asList(oldNames));
- names.addAll(Arrays.asList(oldOptimizedNames));
+ names.addAll(Arrays.asList(oldSingleSegmentNames));
for(String name : names) {
if (VERBOSE) {
System.out.println("testUpgradeOldIndex: index=" +name);
@@ -764,16 +764,16 @@
}
}
- public void testUpgradeOldOptimizedIndexWithAdditions() throws Exception {
- for (String name : oldOptimizedNames) {
+ public void testUpgradeOldSingleSegmentIndexWithAdditions() throws Exception {
+ for (String name : oldSingleSegmentNames) {
if (VERBOSE) {
- System.out.println("testUpgradeOldOptimizedIndexWithAdditions: index=" +name);
+ System.out.println("testUpgradeOldSingleSegmentIndexWithAdditions: index=" +name);
}
File oldIndxeDir = _TestUtil.getTempDir(name);
_TestUtil.unzip(getDataFile("index." + name + ".zip"), oldIndxeDir);
Directory dir = newFSDirectory(oldIndxeDir);
- assertEquals("Original index must be optimized", 1, getNumberOfSegments(dir));
+ assertEquals("Original index must be single segment", 1, getNumberOfSegments(dir));
// create a bunch of dummy segments
int id = 40;
@@ -791,7 +791,8 @@
w.close(false);
}
- // add dummy segments (which are all in current version) to optimized index
+ // add dummy segments (which are all in current
+ // version) to single segment index
MergePolicy mp = random.nextBoolean() ? newLogMergePolicy() : newTieredMergePolicy();
IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, null)
.setMergePolicy(mp);
Index: lucene/src/test/org/apache/lucene/index/TestNorms.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestNorms.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestNorms.java (working copy)
@@ -36,7 +36,7 @@
/**
* Test that norms info is preserved during index life - including
- * separate norms, addDocument, addIndexes, optimize.
+ * separate norms, addDocument, addIndexes, forceMerge.
*/
public class TestNorms extends LuceneTestCase {
@@ -74,7 +74,7 @@
* Test that norms values are preserved as the index is maintained.
* Including separate norms.
* Including merging indexes with seprate norms.
- * Including optimize.
+ * Including forceMerge.
*/
public void testNorms() throws IOException {
Directory dir1 = newDirectory();
@@ -111,7 +111,7 @@
setMergePolicy(newLogMergePolicy(3))
);
iw.addIndexes(dir1,dir2);
- iw.optimize();
+ iw.forceMerge(1);
iw.close();
norms1.addAll(norms);
@@ -124,7 +124,7 @@
verifyIndex(dir3);
doTestNorms(random, dir3);
- // now with optimize
+ // now with single segment
iw = new IndexWriter(
dir3,
newIndexWriterConfig(TEST_VERSION_CURRENT, anlzr).
@@ -132,7 +132,7 @@
setMaxBufferedDocs(5).
setMergePolicy(newLogMergePolicy(3))
);
- iw.optimize();
+ iw.forceMerge(1);
iw.close();
verifyIndex(dir3);
Index: lucene/src/test/org/apache/lucene/index/TestIndexReader.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexReader.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestIndexReader.java (working copy)
@@ -95,18 +95,18 @@
IndexReader r3 = IndexReader.openIfChanged(r2);
assertNotNull(r3);
assertFalse(c.equals(r3.getIndexCommit()));
- assertFalse(r2.getIndexCommit().isOptimized());
+ assertFalse(r2.getIndexCommit().getSegmentCount() == 1);
r3.close();
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND));
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
r3 = IndexReader.openIfChanged(r2);
assertNotNull(r3);
- assertTrue(r3.getIndexCommit().isOptimized());
+ assertEquals(1, r3.getIndexCommit().getSegmentCount());
r2.close();
r3.close();
d.close();
@@ -381,11 +381,11 @@
assertEquals(bin[i], bytesRef.bytes[i + bytesRef.offset]);
}
reader.close();
- // force optimize
+ // force merge
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND).setMergePolicy(newLogMergePolicy()));
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
reader = IndexReader.open(dir, false);
doc2 = reader.document(reader.maxDoc() - 1);
@@ -721,7 +721,7 @@
// [incorrectly] hit a "docs out of order"
// IllegalStateException because above out-of-bounds
// deleteDocument corrupted the index:
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
if (!gotException) {
fail("delete of out-of-bounds doc number failed to hit exception");
@@ -846,7 +846,9 @@
assertEquals("IndexReaders have different values for numDocs.", index1.numDocs(), index2.numDocs());
assertEquals("IndexReaders have different values for maxDoc.", index1.maxDoc(), index2.maxDoc());
assertEquals("Only one IndexReader has deletions.", index1.hasDeletions(), index2.hasDeletions());
- assertEquals("Only one index is optimized.", index1.isOptimized(), index2.isOptimized());
+ if (!(index1 instanceof ParallelReader)) {
+ assertEquals("Single segment test differs.", index1.getSequentialSubReaders().length == 1, index2.getSequentialSubReaders().length == 1);
+ }
// check field names
Collection fields1 = index1.getFieldNames(FieldOption.ALL);
@@ -970,19 +972,19 @@
IndexReader r2 = IndexReader.openIfChanged(r);
assertNotNull(r2);
assertFalse(c.equals(r2.getIndexCommit()));
- assertFalse(r2.getIndexCommit().isOptimized());
+ assertFalse(r2.getIndexCommit().getSegmentCount() == 1);
r2.close();
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND));
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
r2 = IndexReader.openIfChanged(r);
assertNotNull(r2);
assertNull(IndexReader.openIfChanged(r2));
- assertTrue(r2.getIndexCommit().isOptimized());
+ assertEquals(1, r2.getIndexCommit().getSegmentCount());
r.close();
r2.close();
@@ -1032,7 +1034,7 @@
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random))
.setOpenMode(OpenMode.APPEND));
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
// Make sure reopen to a single segment is still readonly:
Index: lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestMultiLevelSkipList.java (working copy)
@@ -77,7 +77,7 @@
writer.addDocument(d1);
}
writer.commit();
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
IndexReader reader = getOnlySegmentReader(IndexReader.open(dir));
Index: lucene/src/test/org/apache/lucene/index/TestForceMergeForever.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestForceMergeForever.java (revision 0)
+++ lucene/src/test/org/apache/lucene/index/TestForceMergeForever.java (working copy)
@@ -0,0 +1,104 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LineFileDocs;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestForceMergeForever extends LuceneTestCase {
+
+ // Just counts how many merges are done
+ private static class MyIndexWriter extends IndexWriter {
+
+ AtomicInteger mergeCount = new AtomicInteger();
+ private boolean first;
+
+ public MyIndexWriter(Directory dir, IndexWriterConfig conf) throws Exception {
+ super(dir, conf);
+ }
+
+ @Override
+ public void merge(MergePolicy.OneMerge merge) throws CorruptIndexException, IOException {
+ if (merge.maxNumSegments != -1 && (first || merge.segments.size() == 1)) {
+ first = false;
+ if (VERBOSE) {
+ System.out.println("TEST: maxNumSegments merge");
+ }
+ mergeCount.incrementAndGet();
+ }
+ super.merge(merge);
+ }
+ }
+
+ public void test() throws Exception {
+ final Directory d = newDirectory();
+ final MyIndexWriter w = new MyIndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+
+ // Try to make an index that requires merging:
+ w.getConfig().setMaxBufferedDocs(_TestUtil.nextInt(random, 2, 11));
+ final int numStartDocs = atLeast(20);
+ final LineFileDocs docs = new LineFileDocs(random);
+ for(int docIDX=0;docIDX segmentsToOptimize)
+ public MergeSpecification findForcedMerges(SegmentInfos segmentInfos,
+ int maxSegmentCount, Map segmentsToMerge)
throws CorruptIndexException, IOException {
return null;
}
Index: lucene/src/test/org/apache/lucene/index/codecs/lucene3x/TestTermInfosReaderIndex.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/codecs/lucene3x/TestTermInfosReaderIndex.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/codecs/lucene3x/TestTermInfosReaderIndex.java (working copy)
@@ -181,7 +181,7 @@
}
writer.addDocument(document);
}
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
return config.getTermIndexInterval();
}
Index: lucene/src/test/org/apache/lucene/index/codecs/perfield/TestPerFieldPostingsFormat.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/codecs/perfield/TestPerFieldPostingsFormat.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/codecs/perfield/TestPerFieldPostingsFormat.java (working copy)
@@ -17,7 +17,6 @@
* limitations under the License.
*/
import java.io.IOException;
-import java.util.List;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
@@ -107,7 +106,7 @@
writer.commit();
assertEquals(30, writer.maxDoc());
_TestUtil.checkIndex(dir);
- writer.optimize();
+ writer.forceMerge(1);
assertEquals(30, writer.maxDoc());
writer.close();
dir.close();
@@ -158,8 +157,6 @@
addDocs2(writer, 10);
writer.commit();
codec = (Lucene40Codec)iwconf.getCodec();
- PostingsFormat origContentCodec = PostingsFormat.forName("MockSep");
- PostingsFormat newContentCodec = PostingsFormat.forName("Lucene40");
assertEquals(30, writer.maxDoc());
assertQuery(new Term("content", "bbb"), dir, 10);
assertQuery(new Term("content", "ccc"), dir, 10); ////
@@ -178,7 +175,7 @@
if (VERBOSE) {
System.out.println("TEST: now optimize");
}
- writer.optimize();
+ writer.forceMerge(1);
assertEquals(40, writer.maxDoc());
writer.close();
assertQuery(new Term("content", "ccc"), dir, 10);
@@ -260,7 +257,7 @@
writer.addDocument(doc);
}
if (random.nextBoolean()) {
- writer.optimize();
+ writer.forceMerge(1);
}
writer.commit();
assertEquals((i + 1) * docsPerRound, writer.maxDoc());
Index: lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java (working copy)
@@ -74,7 +74,7 @@
public void onCommit(List extends IndexCommit> commits) throws IOException {
IndexCommit lastCommit = commits.get(commits.size()-1);
IndexReader r = IndexReader.open(dir, true);
- assertEquals("lastCommit.isOptimized()=" + lastCommit.isOptimized() + " vs IndexReader.isOptimized=" + r.isOptimized(), r.isOptimized(), lastCommit.isOptimized());
+ assertEquals("lastCommit.segmentCount()=" + lastCommit.getSegmentCount() + " vs IndexReader.segmentCount=" + r.getSequentialSubReaders().length, r.getSequentialSubReaders().length, lastCommit.getSegmentCount());
r.close();
verifyCommitOrder(commits);
numOnCommit++;
@@ -317,13 +317,13 @@
}
writer.close();
- final boolean isOptimized;
+ final boolean needsMerging;
{
IndexReader r = IndexReader.open(dir);
- isOptimized = r.isOptimized();
+ needsMerging = r.getSequentialSubReaders().length != 1;
r.close();
}
- if (!isOptimized) {
+ if (needsMerging) {
conf = newIndexWriterConfig(TEST_VERSION_CURRENT,
new MockAnalyzer(random)).setOpenMode(
OpenMode.APPEND).setIndexDeletionPolicy(policy);
@@ -332,22 +332,22 @@
((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
}
if (VERBOSE) {
- System.out.println("TEST: open writer for optimize");
+ System.out.println("TEST: open writer for forceMerge");
}
writer = new IndexWriter(dir, conf);
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
}
- assertEquals(isOptimized ? 0:1, policy.numOnInit);
+ assertEquals(needsMerging ? 1:0, policy.numOnInit);
// If we are not auto committing then there should
// be exactly 2 commits (one per close above):
- assertEquals(1 + (isOptimized ? 0:1), policy.numOnCommit);
+ assertEquals(1 + (needsMerging ? 1:0), policy.numOnCommit);
// Test listCommits
Collection commits = IndexReader.listCommits(dir);
// 2 from closing writer
- assertEquals(1 + (isOptimized ? 0:1), commits.size());
+ assertEquals(1 + (needsMerging ? 1:0), commits.size());
// Make sure we can open a reader on each commit:
for (final IndexCommit commit : commits) {
@@ -418,16 +418,16 @@
}
assertTrue(lastCommit != null);
- // Now add 1 doc and optimize
+ // Now add 1 doc and merge
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(policy));
addDoc(writer);
assertEquals(11, writer.numDocs());
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
assertEquals(6, IndexReader.listCommits(dir).size());
- // Now open writer on the commit just before optimize:
+ // Now open writer on the commit just before merge:
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
.setIndexDeletionPolicy(policy).setIndexCommit(lastCommit));
assertEquals(10, writer.numDocs());
@@ -436,8 +436,8 @@
writer.rollback();
IndexReader r = IndexReader.open(dir, true);
- // Still optimized, still 11 docs
- assertTrue(r.isOptimized());
+ // Still merged, still 11 docs
+ assertEquals(1, r.getSequentialSubReaders().length);
assertEquals(11, r.numDocs());
r.close();
@@ -451,39 +451,39 @@
assertEquals(7, IndexReader.listCommits(dir).size());
r = IndexReader.open(dir, true);
- // Not optimized because we rolled it back, and now only
+ // Not fully merged because we rolled it back, and now only
// 10 docs
- assertTrue(!r.isOptimized());
+ assertTrue(r.getSequentialSubReaders().length > 1);
assertEquals(10, r.numDocs());
r.close();
- // Reoptimize
+ // Re-merge
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexDeletionPolicy(policy));
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
r = IndexReader.open(dir, true);
- assertTrue(r.isOptimized());
+ assertEquals(1, r.getSequentialSubReaders().length);
assertEquals(10, r.numDocs());
r.close();
- // Now open writer on the commit just before optimize,
+ // Now open writer on the commit just before merging,
// but this time keeping only the last commit:
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setIndexCommit(lastCommit));
assertEquals(10, writer.numDocs());
- // Reader still sees optimized index, because writer
+ // Reader still sees fully merged index, because writer
// opened on the prior commit has not yet committed:
r = IndexReader.open(dir, true);
- assertTrue(r.isOptimized());
+ assertEquals(1, r.getSequentialSubReaders().length);
assertEquals(10, r.numDocs());
r.close();
writer.close();
- // Now reader sees unoptimized index:
+ // Now reader sees not-fully-merged index:
r = IndexReader.open(dir, true);
- assertTrue(!r.isOptimized());
+ assertTrue(r.getSequentialSubReaders().length > 1);
assertEquals(10, r.numDocs());
r.close();
@@ -525,7 +525,7 @@
((LogMergePolicy) mp).setUseCompoundFile(true);
}
writer = new IndexWriter(dir, conf);
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
assertEquals(1, policy.numOnInit);
@@ -569,7 +569,7 @@
for(int i=0;i<17;i++) {
addDoc(writer);
}
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
}
@@ -673,15 +673,15 @@
((LogMergePolicy) mp).setUseCompoundFile(useCompoundFile);
}
IndexReader r = IndexReader.open(dir);
- final boolean wasOptimized = r.isOptimized();
+ final boolean wasFullyMerged = r.getSequentialSubReaders().length == 1 && !r.hasDeletions();
r.close();
writer = new IndexWriter(dir, conf);
- writer.optimize();
+ writer.forceMerge(1);
// this is a commit
writer.close();
assertEquals(2*(N+1)+1, policy.numOnInit);
- assertEquals(2*(N+2) - (wasOptimized ? 1:0), policy.numOnCommit);
+ assertEquals(2*(N+2) - (wasFullyMerged ? 1:0), policy.numOnCommit);
IndexSearcher searcher = new IndexSearcher(dir, false);
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;
Index: lucene/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestParallelReaderEmptyIndex.java (working copy)
@@ -58,7 +58,7 @@
// When unpatched, Lucene crashes here with a NoSuchElementException (caused by ParallelTermEnum)
iwOut.addIndexes(pr);
- iwOut.optimize();
+ iwOut.forceMerge(1);
iwOut.close();
rdOut.close();
rd1.close();
@@ -88,7 +88,7 @@
ir.close();
iw = new IndexWriter(rd1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
- iw.optimize();
+ iw.forceMerge(1);
iw.close();
}
@@ -116,7 +116,7 @@
rd1.close();
rd2.close();
- iwOut.optimize();
+ iwOut.forceMerge(1);
iwOut.close();
rdOut.close();
Index: lucene/src/test/org/apache/lucene/index/TestOptimizeForever.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestOptimizeForever.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestOptimizeForever.java (working copy)
@@ -1,105 +0,0 @@
-package org.apache.lucene.index;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LineFileDocs;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.util._TestUtil;
-
-
-public class TestOptimizeForever extends LuceneTestCase {
-
- // Just counts how many merges are done for optimize
- private static class MyIndexWriter extends IndexWriter {
-
- AtomicInteger optimizeMergeCount = new AtomicInteger();
- private boolean first;
-
- public MyIndexWriter(Directory dir, IndexWriterConfig conf) throws Exception {
- super(dir, conf);
- }
-
- @Override
- public void merge(MergePolicy.OneMerge merge) throws CorruptIndexException, IOException {
- if (merge.optimize && (first || merge.segments.size() == 1)) {
- first = false;
- if (VERBOSE) {
- System.out.println("TEST: optimized merge");
- }
- optimizeMergeCount.incrementAndGet();
- }
- super.merge(merge);
- }
- }
-
- public void test() throws Exception {
- final Directory d = newDirectory();
- final MyIndexWriter w = new MyIndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
-
- // Try to make an index that requires optimizing:
- w.getConfig().setMaxBufferedDocs(_TestUtil.nextInt(random, 2, 11));
- final int numStartDocs = atLeast(20);
- final LineFileDocs docs = new LineFileDocs(random);
- for(int docIDX=0;docIDX fieldNames = new HashSet();
final int numFields = 2 + (TEST_NIGHTLY ? random.nextInt(200) : random.nextInt(20));
@@ -285,7 +285,7 @@
FieldNumberBiMap globalFieldMap = writer.segmentInfos
.getOrLoadGlobalFieldNumberMap(base);
Set> entries = globalFieldMap.entries();
- writer.optimize();
+ writer.forceMerge(1);
writer.commit();
writer.close();
Set> afterOptmize = globalFieldMap.entries();
@@ -352,7 +352,7 @@
IndexWriter w = new IndexWriter(base, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(
new LogByteSizeMergePolicy()));
- w.optimize();
+ w.forceMerge(1);
w.close();
SegmentInfos sis = new SegmentInfos();
sis.read(base);
Index: lucene/src/test/org/apache/lucene/index/TestSegmentTermDocs.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestSegmentTermDocs.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestSegmentTermDocs.java (working copy)
@@ -120,7 +120,7 @@
addDoc(writer, "ccc ccc ccc ccc");
// assure that we deal with a single segment
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
IndexReader reader = IndexReader.open(dir, null, true, indexDivisor);
Index: lucene/src/test/org/apache/lucene/index/values/TestTypePromotion.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/values/TestTypePromotion.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/values/TestTypePromotion.java (working copy)
@@ -87,7 +87,7 @@
if (random.nextInt(4) == 0) {
// once in a while use addIndexes
- writer.optimize();
+ writer.forceMerge(1);
Directory dir_2 = newDirectory() ;
IndexWriter writer_2 = new IndexWriter(dir_2,
@@ -110,7 +110,7 @@
randomValueType(types, random), values, num_1 + num_2, num_3);
}
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
assertValues(type, dir, values);
dir.close();
@@ -119,7 +119,7 @@
private void assertValues(TestType type, Directory dir, long[] values)
throws CorruptIndexException, IOException {
IndexReader reader = IndexReader.open(dir);
- assertTrue(reader.isOptimized());
+ assertEquals(1, reader.getSequentialSubReaders().length);
ReaderContext topReaderContext = reader.getTopReaderContext();
ReaderContext[] children = topReaderContext.children();
IndexDocValues docValues = children[0].reader.docValues("promote");
@@ -292,14 +292,14 @@
writer.close();
writerConfig = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
if (writerConfig.getMergePolicy() instanceof NoMergePolicy) {
- writerConfig.setMergePolicy(newLogMergePolicy()); // make sure we optimize to one segment (merge everything together)
+ writerConfig.setMergePolicy(newLogMergePolicy()); // make sure we merge to one segment (merge everything together)
}
writer = new IndexWriter(dir, writerConfig);
- // now optimize
- writer.optimize();
+ // now merge
+ writer.forceMerge(1);
writer.close();
IndexReader reader = IndexReader.open(dir);
- assertTrue(reader.isOptimized());
+ assertEquals(1, reader.getSequentialSubReaders().length);
ReaderContext topReaderContext = reader.getTopReaderContext();
ReaderContext[] children = topReaderContext.children();
IndexDocValues docValues = children[0].reader.docValues("promote");
Index: lucene/src/test/org/apache/lucene/index/values/TestDocValuesIndexing.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/values/TestDocValuesIndexing.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/values/TestDocValuesIndexing.java (working copy)
@@ -58,7 +58,7 @@
*/
public class TestDocValuesIndexing extends LuceneTestCase {
/*
- * - add test for unoptimized case with deletes
+ * - add test for multi segment case with deletes
* - add multithreaded tests / integrate into stress indexing?
*/
@@ -83,12 +83,12 @@
writer.addDocument(doc);
}
writer.commit();
- writer.optimize(true);
+ writer.forceMerge(1, true);
writer.close(true);
IndexReader reader = IndexReader.open(dir, null, true, 1);
- assertTrue(reader.isOptimized());
+ assertEquals(1, reader.getSequentialSubReaders().length);
IndexSearcher searcher = new IndexSearcher(reader);
@@ -159,7 +159,7 @@
} else {
w.addIndexes(r_1, r_2);
}
- w.optimize(true);
+ w.forceMerge(1, true);
w.commit();
_TestUtil.checkIndex(target);
@@ -418,10 +418,10 @@
private IndexDocValues getDocValues(IndexReader reader, String field)
throws IOException {
- boolean optimized = reader.isOptimized();
- PerDocValues perDoc = optimized ? reader.getSequentialSubReaders()[0].perDocValues()
+ boolean singleSeg = reader.getSequentialSubReaders().length == 1;
+ PerDocValues perDoc = singleSeg ? reader.getSequentialSubReaders()[0].perDocValues()
: MultiPerDocValues.getPerDocs(reader);
- switch (random.nextInt(optimized ? 3 : 2)) { // case 2 only if optimized
+ switch (random.nextInt(singleSeg ? 3 : 2)) { // case 2 only if single seg
case 0:
return perDoc.docValues(field);
case 1:
@@ -430,7 +430,7 @@
return docValues;
}
throw new RuntimeException("no such field " + field);
- case 2:// this only works if we are on an optimized index!
+ case 2:// this only works if we are on a single seg index!
return reader.getSequentialSubReaders()[0].docValues(field);
}
throw new RuntimeException();
@@ -538,9 +538,9 @@
}
w.commit();
- // TODO test unoptimized with deletions
+ // TODO test multi seg with deletions
if (withDeletions || random.nextBoolean()) {
- w.optimize(true);
+ w.forceMerge(1, true);
}
return deleted;
}
@@ -565,7 +565,7 @@
doc = new Document();
doc.add(f);
w.addDocument(doc);
- w.optimize();
+ w.forceMerge(1);
IndexReader r = w.getReader();
w.close();
assertEquals(17, r.getSequentialSubReaders()[0].perDocValues().docValues("field").load().getInt(0));
@@ -595,7 +595,7 @@
doc = new Document();
doc.add(f);
w.addDocument(doc);
- w.optimize();
+ w.forceMerge(1);
IndexReader r = w.getReader();
w.close();
assertEquals(17, r.getSequentialSubReaders()[0].perDocValues().docValues("field").load().getInt(0));
Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java (working copy)
@@ -80,7 +80,7 @@
/**
* Test that norms values are preserved as the index is maintained. Including
* separate norms. Including merging indexes with seprate norms. Including
- * optimize.
+ * full merge.
*/
public void testNorms() throws IOException {
// test with a single index: index1
@@ -112,7 +112,7 @@
createIndex(random, dir3);
if (VERBOSE) {
- System.out.println("TEST: now addIndexes/optimize");
+ System.out.println("TEST: now addIndexes/full merge");
}
IndexWriter iw = new IndexWriter(
dir3,
@@ -122,7 +122,7 @@
setMergePolicy(newLogMergePolicy(3))
);
iw.addIndexes(dir1, dir2);
- iw.optimize();
+ iw.forceMerge(1);
iw.close();
norms1.addAll(norms);
@@ -135,7 +135,7 @@
verifyIndex(dir3);
doTestNorms(random, dir3);
- // now with optimize
+ // now with full merge
iw = new IndexWriter(
dir3,
newIndexWriterConfig(TEST_VERSION_CURRENT, anlzr).
@@ -143,7 +143,7 @@
setMaxBufferedDocs(5).
setMergePolicy(newLogMergePolicy(3))
);
- iw.optimize();
+ iw.forceMerge(1);
iw.close();
verifyIndex(dir3);
Index: lucene/src/test/org/apache/lucene/index/TestOmitTf.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestOmitTf.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestOmitTf.java (working copy)
@@ -81,7 +81,7 @@
d.add(f2);
writer.addDocument(d);
- writer.optimize();
+ writer.forceMerge(1);
// now we add another document which has term freq for field f2 and not for f1 and verify if the SegmentMerger
// keep things constant
d = new Document();
@@ -96,7 +96,7 @@
writer.addDocument(d);
// force merge
- writer.optimize();
+ writer.forceMerge(1);
// flush
writer.close();
@@ -148,7 +148,7 @@
writer.addDocument(d);
// force merge
- writer.optimize();
+ writer.forceMerge(1);
// flush
writer.close();
@@ -190,7 +190,7 @@
writer.addDocument(d);
// force merge
- writer.optimize();
+ writer.forceMerge(1);
// flush
writer.close();
@@ -233,7 +233,8 @@
assertNoPrx(ram);
- // now add some documents with positions, and check there is no prox after optimization
+ // now add some documents with positions, and check
+ // there is no prox after full merge
d = new Document();
f1 = newField("f1", "This field has positions", TextField.TYPE_UNSTORED);
d.add(f1);
@@ -242,7 +243,7 @@
writer.addDocument(d);
// force merge
- writer.optimize();
+ writer.forceMerge(1);
// flush
writer.close();
@@ -278,7 +279,7 @@
//System.out.println(d);
}
- writer.optimize();
+ writer.forceMerge(1);
// flush
writer.close();
@@ -413,7 +414,7 @@
@Override
public void collect(int doc) throws IOException {
count++;
- sum += doc + docBase; // use it to avoid any possibility of being optimized away
+ sum += doc + docBase; // use it to avoid any possibility of being merged away
}
public static int getCount() { return count; }
Index: lucene/src/test/org/apache/lucene/index/TestIndexCommit.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexCommit.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestIndexCommit.java (working copy)
@@ -42,7 +42,7 @@
@Override public long getTimestamp() throws IOException { return 1;}
@Override public Map getUserData() throws IOException { return null; }
@Override public boolean isDeleted() { return false; }
- @Override public boolean isOptimized() { return false; }
+ @Override public int getSegmentCount() { return 2; }
};
IndexCommit ic2 = new IndexCommit() {
@@ -55,7 +55,7 @@
@Override public long getTimestamp() throws IOException { return 1;}
@Override public Map getUserData() throws IOException { return null; }
@Override public boolean isDeleted() { return false; }
- @Override public boolean isOptimized() { return false; }
+ @Override public int getSegmentCount() { return 2; }
};
assertEquals(ic1, ic2);
Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java (working copy)
@@ -92,7 +92,7 @@
assertFalse(reader.isCurrent());
reader.close();
}
- writer.optimize(); // make sure all merging is done etc.
+ writer.forceMerge(1); // make sure all merging is done etc.
IndexReader reader = writer.getReader();
writer.commit(); // no changes that are not visible to the reader
assertTrue(reader.isCurrent());
@@ -110,7 +110,7 @@
}
public void testUpdateDocument() throws Exception {
- boolean optimize = true;
+ boolean doFullMerge = true;
Directory dir1 = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
@@ -129,7 +129,7 @@
IndexWriter writer = new IndexWriter(dir1, iwc);
// create the index
- createIndexNoClose(!optimize, "index1", writer);
+ createIndexNoClose(!doFullMerge, "index1", writer);
// writer.flush(false, true, true);
@@ -199,7 +199,7 @@
assertTrue(nrtReader.isCurrent());
writer.addDocument(doc);
assertFalse(nrtReader.isCurrent()); // should see the changes
- writer.optimize(); // make sure we don't have a merge going on
+ writer.forceMerge(1); // make sure we don't have a merge going on
assertFalse(nrtReader.isCurrent());
nrtReader.close();
@@ -225,7 +225,7 @@
* @throws Exception
*/
public void testAddIndexes() throws Exception {
- boolean optimize = false;
+ boolean doFullMerge = false;
Directory dir1 = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
@@ -241,13 +241,13 @@
IndexWriter writer = new IndexWriter(dir1, iwc);
// create the index
- createIndexNoClose(!optimize, "index1", writer);
+ createIndexNoClose(!doFullMerge, "index1", writer);
writer.flush(false, true);
// create a 2nd index
Directory dir2 = newDirectory();
IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
- createIndexNoClose(!optimize, "index2", writer2);
+ createIndexNoClose(!doFullMerge, "index2", writer2);
writer2.close();
IndexReader r0 = writer.getReader();
@@ -280,7 +280,7 @@
}
public void testAddIndexes2() throws Exception {
- boolean optimize = false;
+ boolean doFullMerge = false;
Directory dir1 = newDirectory();
IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
@@ -288,7 +288,7 @@
// create a 2nd index
Directory dir2 = newDirectory();
IndexWriter writer2 = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
- createIndexNoClose(!optimize, "index2", writer2);
+ createIndexNoClose(!doFullMerge, "index2", writer2);
writer2.close();
writer.addIndexes(dir2);
@@ -312,12 +312,12 @@
* @throws Exception
*/
public void testDeleteFromIndexWriter() throws Exception {
- boolean optimize = true;
+ boolean doFullMerge = true;
Directory dir1 = newDirectory();
IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)).setReaderTermsIndexDivisor(2));
// create the index
- createIndexNoClose(!optimize, "index1", writer);
+ createIndexNoClose(!doFullMerge, "index1", writer);
writer.flush(false, true);
// get a reader
IndexReader r1 = writer.getReader();
@@ -487,7 +487,7 @@
switch (j % 4) {
case 0:
mainWriter.addIndexes(dirs);
- mainWriter.optimize();
+ mainWriter.forceMerge(1);
break;
case 1:
mainWriter.addIndexes(dirs);
@@ -503,7 +503,7 @@
}
}
- public void testIndexWriterReopenSegmentOptimize() throws Exception {
+ public void testIndexWriterReopenSegmentFullMerge() throws Exception {
doTestIndexWriterReopenSegment(true);
}
@@ -515,13 +515,13 @@
* Tests creating a segment, then check to insure the segment can be seen via
* IW.getReader
*/
- public void doTestIndexWriterReopenSegment(boolean optimize) throws Exception {
+ public void doTestIndexWriterReopenSegment(boolean doFullMerge) throws Exception {
Directory dir1 = newDirectory();
IndexWriter writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
IndexReader r1 = writer.getReader();
assertEquals(0, r1.maxDoc());
createIndexNoClose(false, "index1", writer);
- writer.flush(!optimize, true);
+ writer.flush(!doFullMerge, true);
IndexReader iwr1 = writer.getReader();
assertEquals(100, iwr1.maxDoc());
@@ -581,7 +581,7 @@
w.addDocument(DocHelper.createDocument(i, indexName, 4));
}
if (!multiSegment) {
- w.optimize();
+ w.forceMerge(1);
}
w.close();
}
@@ -592,7 +592,7 @@
w.addDocument(DocHelper.createDocument(i, indexName, 4));
}
if (!multiSegment) {
- w.optimize();
+ w.forceMerge(1);
}
}
@@ -636,7 +636,7 @@
final int count = warmer.warmCount;
writer.addDocument(DocHelper.createDocument(17, "test", 4));
- writer.optimize();
+ writer.forceMerge(1);
assertTrue(warmer.warmCount > count);
writer.close();
Index: lucene/src/test/org/apache/lucene/index/TestOmitNorms.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestOmitNorms.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestOmitNorms.java (working copy)
@@ -49,7 +49,7 @@
d.add(f2);
writer.addDocument(d);
- writer.optimize();
+ writer.forceMerge(1);
// now we add another document which has term freq for field f2 and not for f1 and verify if the SegmentMerger
// keep things constant
d = new Document();
@@ -62,7 +62,7 @@
writer.addDocument(d);
// force merge
- writer.optimize();
+ writer.forceMerge(1);
// flush
writer.close();
@@ -116,7 +116,7 @@
}
// force merge
- writer.optimize();
+ writer.forceMerge(1);
// flush
writer.close();
@@ -163,7 +163,7 @@
}
// force merge
- writer.optimize();
+ writer.forceMerge(1);
// flush
writer.close();
@@ -209,7 +209,7 @@
assertNoNrm(ram);
// force merge
- writer.optimize();
+ writer.forceMerge(1);
// flush
writer.close();
@@ -221,7 +221,7 @@
* Tests various combinations of omitNorms=true/false, the field not existing at all,
* ensuring that only omitNorms is 'viral'.
* Internally checks that MultiNorms.norms() is consistent (returns the same bytes)
- * as the optimized equivalent.
+ * as the fully merged equivalent.
*/
public void testOmitNormsCombos() throws IOException {
// indexed with norms
@@ -290,8 +290,8 @@
IndexReader ir1 = riw.getReader();
byte[] norms1 = MultiNorms.norms(ir1, field);
- // optimize and validate MultiNorms against single segment.
- riw.optimize();
+ // fully merge and validate MultiNorms against single segment.
+ riw.forceMerge(1);
IndexReader ir2 = riw.getReader();
byte[] norms2 = ir2.getSequentialSubReaders()[0].norms(field);
Index: lucene/src/test/org/apache/lucene/index/TestTieredMergePolicy.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestTieredMergePolicy.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestTieredMergePolicy.java (working copy)
@@ -65,7 +65,7 @@
dir.close();
}
- public void testPartialOptimize() throws Exception {
+ public void testPartialMerge() throws Exception {
int num = atLeast(10);
for(int iter=0;iter aDocIDs = new ArrayList();
final List bDocIDs = new ArrayList();
Index: lucene/src/test/org/apache/lucene/index/TestSizeBoundedForceMerge.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestSizeBoundedForceMerge.java (revision 0)
+++ lucene/src/test/org/apache/lucene/index/TestSizeBoundedForceMerge.java (working copy)
@@ -0,0 +1,368 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestSizeBoundedForceMerge extends LuceneTestCase {
+
+ private void addDocs(IndexWriter writer, int numDocs) throws IOException {
+ for (int i = 0; i < numDocs; i++) {
+ Document doc = new Document();
+ writer.addDocument(doc);
+ }
+ writer.commit();
+ }
+
+ private static IndexWriterConfig newWriterConfig() throws IOException {
+ IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, null);
+ conf.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
+ conf.setRAMBufferSizeMB(IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB);
+ // prevent any merges by default.
+ conf.setMergePolicy(NoMergePolicy.COMPOUND_FILES);
+ return conf;
+ }
+
+ public void testByteSizeLimit() throws Exception {
+ // tests that the max merge size constraint is applied during forceMerge.
+ Directory dir = new RAMDirectory();
+
+ // Prepare an index w/ several small segments and a large one.
+ IndexWriterConfig conf = newWriterConfig();
+ IndexWriter writer = new IndexWriter(dir, conf);
+ final int numSegments = 15;
+ for (int i = 0; i < numSegments; i++) {
+ int numDocs = i == 7 ? 30 : 1;
+ addDocs(writer, numDocs);
+ }
+ writer.close();
+
+ SegmentInfos sis = new SegmentInfos();
+ sis.read(dir);
+ double min = sis.info(0).sizeInBytes(true);
+
+ conf = newWriterConfig();
+ LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
+ lmp.setMaxMergeMBForForcedMerge((min + 1) / (1 << 20));
+ conf.setMergePolicy(lmp);
+
+ writer = new IndexWriter(dir, conf);
+ writer.forceMerge(1);
+ writer.close();
+
+ // Should only be 3 segments in the index, because one of them exceeds the size limit
+ sis = new SegmentInfos();
+ sis.read(dir);
+ assertEquals(3, sis.size());
+ }
+
+ public void testNumDocsLimit() throws Exception {
+ // tests that the max merge docs constraint is applied during forceMerge.
+ Directory dir = new RAMDirectory();
+
+ // Prepare an index w/ several small segments and a large one.
+ IndexWriterConfig conf = newWriterConfig();
+ IndexWriter writer = new IndexWriter(dir, conf);
+
+ addDocs(writer, 3);
+ addDocs(writer, 3);
+ addDocs(writer, 5);
+ addDocs(writer, 3);
+ addDocs(writer, 3);
+ addDocs(writer, 3);
+ addDocs(writer, 3);
+
+ writer.close();
+
+ conf = newWriterConfig();
+ LogMergePolicy lmp = new LogDocMergePolicy();
+ lmp.setMaxMergeDocs(3);
+ conf.setMergePolicy(lmp);
+
+ writer = new IndexWriter(dir, conf);
+ writer.forceMerge(1);
+ writer.close();
+
+ // Should only be 3 segments in the index, because one of them exceeds the size limit
+ SegmentInfos sis = new SegmentInfos();
+ sis.read(dir);
+ assertEquals(3, sis.size());
+ }
+
+ public void testLastSegmentTooLarge() throws Exception {
+ Directory dir = new RAMDirectory();
+
+ IndexWriterConfig conf = newWriterConfig();
+ IndexWriter writer = new IndexWriter(dir, conf);
+
+ addDocs(writer, 3);
+ addDocs(writer, 3);
+ addDocs(writer, 3);
+ addDocs(writer, 5);
+
+ writer.close();
+
+ conf = newWriterConfig();
+ LogMergePolicy lmp = new LogDocMergePolicy();
+ lmp.setMaxMergeDocs(3);
+ conf.setMergePolicy(lmp);
+
+ writer = new IndexWriter(dir, conf);
+ writer.forceMerge(1);
+ writer.close();
+
+ SegmentInfos sis = new SegmentInfos();
+ sis.read(dir);
+ assertEquals(2, sis.size());
+ }
+
+ public void testFirstSegmentTooLarge() throws Exception {
+ Directory dir = new RAMDirectory();
+
+ IndexWriterConfig conf = newWriterConfig();
+ IndexWriter writer = new IndexWriter(dir, conf);
+
+ addDocs(writer, 5);
+ addDocs(writer, 3);
+ addDocs(writer, 3);
+ addDocs(writer, 3);
+
+ writer.close();
+
+ conf = newWriterConfig();
+ LogMergePolicy lmp = new LogDocMergePolicy();
+ lmp.setMaxMergeDocs(3);
+ conf.setMergePolicy(lmp);
+
+ writer = new IndexWriter(dir, conf);
+ writer.forceMerge(1);
+ writer.close();
+
+ SegmentInfos sis = new SegmentInfos();
+ sis.read(dir);
+ assertEquals(2, sis.size());
+ }
+
+ public void testAllSegmentsSmall() throws Exception {
+ Directory dir = new RAMDirectory();
+
+ IndexWriterConfig conf = newWriterConfig();
+ IndexWriter writer = new IndexWriter(dir, conf);
+
+ addDocs(writer, 3);
+ addDocs(writer, 3);
+ addDocs(writer, 3);
+ addDocs(writer, 3);
+
+ writer.close();
+
+ conf = newWriterConfig();
+ LogMergePolicy lmp = new LogDocMergePolicy();
+ lmp.setMaxMergeDocs(3);
+ conf.setMergePolicy(lmp);
+
+ writer = new IndexWriter(dir, conf);
+ writer.forceMerge(1);
+ writer.close();
+
+ SegmentInfos sis = new SegmentInfos();
+ sis.read(dir);
+ assertEquals(1, sis.size());
+ }
+
+ public void testAllSegmentsLarge() throws Exception {
+ Directory dir = new RAMDirectory();
+
+ IndexWriterConfig conf = newWriterConfig();
+ IndexWriter writer = new IndexWriter(dir, conf);
+
+ addDocs(writer, 3);
+ addDocs(writer, 3);
+ addDocs(writer, 3);
+
+ writer.close();
+
+ conf = newWriterConfig();
+ LogMergePolicy lmp = new LogDocMergePolicy();
+ lmp.setMaxMergeDocs(2);
+ conf.setMergePolicy(lmp);
+
+ writer = new IndexWriter(dir, conf);
+ writer.forceMerge(1);
+ writer.close();
+
+ SegmentInfos sis = new SegmentInfos();
+ sis.read(dir);
+ assertEquals(3, sis.size());
+ }
+
+ public void testOneLargeOneSmall() throws Exception {
+ Directory dir = new RAMDirectory();
+
+ IndexWriterConfig conf = newWriterConfig();
+ IndexWriter writer = new IndexWriter(dir, conf);
+
+ addDocs(writer, 3);
+ addDocs(writer, 5);
+ addDocs(writer, 3);
+ addDocs(writer, 5);
+
+ writer.close();
+
+ conf = newWriterConfig();
+ LogMergePolicy lmp = new LogDocMergePolicy();
+ lmp.setMaxMergeDocs(3);
+ conf.setMergePolicy(lmp);
+
+ writer = new IndexWriter(dir, conf);
+ writer.forceMerge(1);
+ writer.close();
+
+ SegmentInfos sis = new SegmentInfos();
+ sis.read(dir);
+ assertEquals(4, sis.size());
+ }
+
+ public void testMergeFactor() throws Exception {
+ Directory dir = new RAMDirectory();
+
+ IndexWriterConfig conf = newWriterConfig();
+ IndexWriter writer = new IndexWriter(dir, conf);
+
+ addDocs(writer, 3);
+ addDocs(writer, 3);
+ addDocs(writer, 3);
+ addDocs(writer, 3);
+ addDocs(writer, 5);
+ addDocs(writer, 3);
+ addDocs(writer, 3);
+
+ writer.close();
+
+ conf = newWriterConfig();
+ LogMergePolicy lmp = new LogDocMergePolicy();
+ lmp.setMaxMergeDocs(3);
+ lmp.setMergeFactor(2);
+ conf.setMergePolicy(lmp);
+
+ writer = new IndexWriter(dir, conf);
+ writer.forceMerge(1);
+ writer.close();
+
+ // Should only be 4 segments in the index, because of the merge factor and
+ // max merge docs settings.
+ SegmentInfos sis = new SegmentInfos();
+ sis.read(dir);
+ assertEquals(4, sis.size());
+ }
+
+ public void testSingleMergeableSegment() throws Exception {
+ Directory dir = new RAMDirectory();
+
+ IndexWriterConfig conf = newWriterConfig();
+ IndexWriter writer = new IndexWriter(dir, conf);
+
+ addDocs(writer, 3);
+ addDocs(writer, 5);
+ addDocs(writer, 3);
+
+ writer.close();
+
+ // delete the last document, so that the last segment is merged.
+ IndexReader r = IndexReader.open(dir, false);
+ r.deleteDocument(r.numDocs() - 1);
+ r.close();
+
+ conf = newWriterConfig();
+ LogMergePolicy lmp = new LogDocMergePolicy();
+ lmp.setMaxMergeDocs(3);
+ conf.setMergePolicy(lmp);
+
+ writer = new IndexWriter(dir, conf);
+ writer.forceMerge(1);
+ writer.close();
+
+ // Verify that the last segment does not have deletions.
+ SegmentInfos sis = new SegmentInfos();
+ sis.read(dir);
+ assertEquals(3, sis.size());
+ assertFalse(sis.info(2).hasDeletions());
+ }
+
+ public void testSingleNonMergeableSegment() throws Exception {
+ Directory dir = new RAMDirectory();
+
+ IndexWriterConfig conf = newWriterConfig();
+ IndexWriter writer = new IndexWriter(dir, conf);
+
+ addDocs(writer, 3);
+
+ writer.close();
+
+ conf = newWriterConfig();
+ LogMergePolicy lmp = new LogDocMergePolicy();
+ lmp.setMaxMergeDocs(3);
+ conf.setMergePolicy(lmp);
+
+ writer = new IndexWriter(dir, conf);
+ writer.forceMerge(1);
+ writer.close();
+
+ // Verify that the last segment does not have deletions.
+ SegmentInfos sis = new SegmentInfos();
+ sis.read(dir);
+ assertEquals(1, sis.size());
+ }
+
+ public void testSingleMergeableTooLargeSegment() throws Exception {
+ Directory dir = new RAMDirectory();
+
+ IndexWriterConfig conf = newWriterConfig();
+ IndexWriter writer = new IndexWriter(dir, conf);
+
+ addDocs(writer, 5);
+
+ writer.close();
+
+ // delete the last document
+ IndexReader r = IndexReader.open(dir, false);
+ r.deleteDocument(r.numDocs() - 1);
+ r.close();
+
+ conf = newWriterConfig();
+ LogMergePolicy lmp = new LogDocMergePolicy();
+ lmp.setMaxMergeDocs(2);
+ conf.setMergePolicy(lmp);
+
+ writer = new IndexWriter(dir, conf);
+ writer.forceMerge(1);
+ writer.close();
+
+ // Verify that the last segment does not have deletions.
+ SegmentInfos sis = new SegmentInfos();
+ sis.read(dir);
+ assertEquals(1, sis.size());
+ assertTrue(sis.info(0).hasDeletions());
+ }
+
+}
Property changes on: lucene/src/test/org/apache/lucene/index/TestSizeBoundedForceMerge.java
___________________________________________________________________
Added: svn:keywords
## -0,0 +1 ##
+Date Author Id Revision HeadURL
Added: svn:eol-style
## -0,0 +1 ##
+native
Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestIndexWriterUnicode.java (working copy)
@@ -326,9 +326,9 @@
// Test multi segment
r.close();
- writer.optimize();
+ writer.forceMerge(1);
- // Test optimized single segment
+ // Test single segment
r = writer.getReader();
checkTermsOrder(r, allTerms, true);
r.close();
Index: lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestIndexReaderClone.java (working copy)
@@ -192,15 +192,15 @@
}
// open non-readOnly reader1 on multi-segment index, then
- // optimize the index, then clone to readOnly reader2
- public void testReadOnlyCloneAfterOptimize() throws Exception {
+ // fully merge the index, then clone to readOnly reader2
+ public void testReadOnlyCloneAfterFullMerge() throws Exception {
final Directory dir1 = newDirectory();
TestIndexReaderReopen.createIndex(random, dir1, true);
IndexReader reader1 = IndexReader.open(dir1, false);
IndexWriter w = new IndexWriter(dir1, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random)));
- w.optimize();
+ w.forceMerge(1);
w.close();
IndexReader reader2 = reader1.clone(true);
assertTrue(isReadOnly(reader2));
Index: lucene/src/test/org/apache/lucene/index/TestNoMergePolicy.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestNoMergePolicy.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestNoMergePolicy.java (working copy)
@@ -31,7 +31,7 @@
public void testNoMergePolicy() throws Exception {
MergePolicy mp = NoMergePolicy.NO_COMPOUND_FILES;
assertNull(mp.findMerges(null));
- assertNull(mp.findMergesForOptimize(null, 0, null));
+ assertNull(mp.findForcedMerges(null, 0, null));
assertNull(mp.findMergesToExpungeDeletes(null));
assertFalse(mp.useCompoundFile(null, null));
mp.close();
Index: lucene/src/test/org/apache/lucene/index/TestDocCount.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestDocCount.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestDocCount.java (working copy)
@@ -42,7 +42,7 @@
IndexReader ir = iw.getReader();
verifyCount(ir);
ir.close();
- iw.optimize();
+ iw.forceMerge(1);
ir = iw.getReader();
verifyCount(ir);
ir.close();
Index: lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestStressIndexing2.java (working copy)
@@ -179,7 +179,7 @@
threads[i].join();
}
- // w.optimize();
+ // w.forceMerge(1);
//w.close();
for (int i=0; i files = Arrays.asList(dir.listAll());
assertTrue(files.contains("_0.cfs"));
w.addDocument(doc);
- w.optimize();
+ w.forceMerge(1);
if (iter == 1) {
w.commit();
}
@@ -1450,10 +1446,10 @@
// NOTE: here we rely on "Windows" behavior, ie, even
// though IW wanted to delete _0.cfs since it was
- // optimized away, because we have a reader open
+ // merged away, because we have a reader open
// against this file, it should still be here:
assertTrue(files.contains("_0.cfs"));
- // optimize created this
+ // forceMerge created this
//assertTrue(files.contains("_2.cfs"));
w.deleteUnusedFiles();
@@ -1697,7 +1693,7 @@
}
s.close();
r.close();
- w.optimize();
+ w.forceMerge(1);
}
}
w.close();
Index: lucene/src/test/org/apache/lucene/index/TestTermVectorsWriter.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestTermVectorsWriter.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestTermVectorsWriter.java (working copy)
@@ -315,7 +315,7 @@
document.add(termVectorField);
writer.addDocument(document);
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
@@ -333,7 +333,7 @@
Directory[] indexDirs = {new MockDirectoryWrapper(random, new RAMDirectory(dir, newIOContext(random)))};
writer.addIndexes(indexDirs);
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
}
dir.close();
@@ -369,7 +369,7 @@
Field termVectorField = newField("termVector", "termVector", customType2);
document.add(termVectorField);
writer.addDocument(document);
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
@@ -414,7 +414,7 @@
for(int i=0;i<6;i++)
writer.addDocument(document);
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
@@ -452,7 +452,7 @@
// Make 2nd segment
iw.commit();
- iw.optimize();
+ iw.forceMerge(1);
iw.close();
dir.close();
}
@@ -476,7 +476,7 @@
// Make first segment
iw.commit();
- iw.optimize();
+ iw.forceMerge(1);
FieldType customType2 = new FieldType(StringField.TYPE_UNSTORED);
customType2.setStoreTermVectors(true);
@@ -484,7 +484,7 @@
iw.addDocument(document);
// Make 2nd segment
iw.commit();
- iw.optimize();
+ iw.forceMerge(1);
iw.close();
dir.close();
Index: lucene/src/test/org/apache/lucene/index/TestCodecs.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestCodecs.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestCodecs.java (working copy)
@@ -33,7 +33,6 @@
import org.apache.lucene.index.codecs.TermStats;
import org.apache.lucene.index.codecs.TermsConsumer;
import org.apache.lucene.index.codecs.lucene3x.Lucene3xCodec;
-import org.apache.lucene.index.codecs.lucene3x.Lucene3xPostingsFormat;
import org.apache.lucene.index.codecs.mocksep.MockSepPostingsFormat;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
@@ -371,7 +370,7 @@
assertEquals(2, results.length);
assertEquals(0, results[0].doc);
- writer.optimize();
+ writer.forceMerge(1);
// optimise to merge the segments.
results = this.search(writer, pq, 5);
Index: lucene/src/test/org/apache/lucene/index/TestIndexWriterForceMerge.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestIndexWriterForceMerge.java (revision 0)
+++ lucene/src/test/org/apache/lucene/index/TestIndexWriterForceMerge.java (working copy)
@@ -0,0 +1,211 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.StringField;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockDirectoryWrapper;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util._TestUtil;
+
+public class TestIndexWriterForceMerge extends LuceneTestCase {
+ public void testPartialMerge() throws IOException {
+
+ MockDirectoryWrapper dir = newDirectory();
+
+ final Document doc = new Document();
+ doc.add(newField("content", "aaa", StringField.TYPE_UNSTORED));
+ final int incrMin = TEST_NIGHTLY ? 15 : 40;
+ for(int numDocs=10;numDocs<500;numDocs += _TestUtil.nextInt(random, incrMin, 5*incrMin)) {
+ LogDocMergePolicy ldmp = new LogDocMergePolicy();
+ ldmp.setMinMergeDocs(1);
+ ldmp.setMergeFactor(5);
+ IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
+ TEST_VERSION_CURRENT, new MockAnalyzer(random))
+ .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2).setMergePolicy(
+ ldmp));
+ for(int j=0;j 1);
+ reader.close();
+
+ SegmentInfos infos = new SegmentInfos();
+ infos.read(dir);
+ assertEquals(2, infos.size());
+ }
+ }
+
+ dir.close();
+ }
+}
Property changes on: lucene/src/test/org/apache/lucene/index/TestIndexWriterForceMerge.java
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
Index: lucene/src/test/org/apache/lucene/index/TestFieldsReader.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestFieldsReader.java (working copy)
@@ -203,7 +203,7 @@
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
for(int i=0;i<2;i++)
writer.addDocument(testDoc);
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
IndexReader reader = IndexReader.open(dir, true);
Index: lucene/src/test/org/apache/lucene/index/TestParallelReader.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestParallelReader.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestParallelReader.java (working copy)
@@ -144,64 +144,6 @@
dir2.close();
}
- public void testIsOptimized() throws IOException {
- Directory dir1 = getDir1(random);
- Directory dir2 = getDir2(random);
-
- // add another document to ensure that the indexes are not optimized
- IndexWriter modifier = new IndexWriter(
- dir1,
- newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
- setMergePolicy(newLogMergePolicy(10))
- );
- Document d = new Document();
- d.add(newField("f1", "v1", TextField.TYPE_STORED));
- modifier.addDocument(d);
- modifier.close();
-
- modifier = new IndexWriter(
- dir2,
- newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).
- setMergePolicy(newLogMergePolicy(10))
- );
- d = new Document();
- d.add(newField("f2", "v2", TextField.TYPE_STORED));
- modifier.addDocument(d);
- modifier.close();
-
-
- ParallelReader pr = new ParallelReader();
- pr.add(IndexReader.open(dir1, false));
- pr.add(IndexReader.open(dir2, false));
- assertFalse(pr.isOptimized());
- pr.close();
-
- modifier = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
- modifier.optimize();
- modifier.close();
-
- pr = new ParallelReader();
- pr.add(IndexReader.open(dir1, false));
- pr.add(IndexReader.open(dir2, false));
- // just one of the two indexes are optimized
- assertFalse(pr.isOptimized());
- pr.close();
-
-
- modifier = new IndexWriter(dir2, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
- modifier.optimize();
- modifier.close();
-
- pr = new ParallelReader();
- pr.add(IndexReader.open(dir1, false));
- pr.add(IndexReader.open(dir2, false));
- // now both indexes are optimized
- assertTrue(pr.isOptimized());
- pr.close();
- dir1.close();
- dir2.close();
- }
-
private void queryTest(Query query) throws IOException {
ScoreDoc[] parallelHits = parallel.search(query, null, 1000).scoreDocs;
ScoreDoc[] singleHits = single.search(query, null, 1000).scoreDocs;
Index: lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/index/TestConsistentFieldNumbers.java (working copy)
@@ -77,7 +77,7 @@
assertEquals("f4", fis2.fieldInfo(3).name);
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
sis = new SegmentInfos();
@@ -141,7 +141,7 @@
assertEquals("f4", fis2.fieldInfo(3).name);
writer = new IndexWriter(dir1, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random)));
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
sis = new SegmentInfos();
@@ -252,7 +252,7 @@
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setMergePolicy(
new LogByteSizeMergePolicy()).setInfoStream(new FailOnNonBulkMergesInfoStream()));
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
SegmentInfos sis = new SegmentInfos();
@@ -293,7 +293,7 @@
writer.addDocument(d);
}
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
SegmentInfos sis = new SegmentInfos();
Index: lucene/src/test/org/apache/lucene/util/fst/TestFSTs.java
===================================================================
--- lucene/src/test/org/apache/lucene/util/fst/TestFSTs.java (revision 1200448)
+++ lucene/src/test/org/apache/lucene/util/fst/TestFSTs.java (working copy)
@@ -1461,7 +1461,7 @@
w.addDocument(doc);
}
- //w.optimize();
+ //w.forceMerge(1);
// turn writer into reader:
final IndexReader r = w.getReader();
Index: lucene/src/java/org/apache/lucene/store/FlushInfo.java
===================================================================
--- lucene/src/java/org/apache/lucene/store/FlushInfo.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/store/FlushInfo.java (working copy)
@@ -18,7 +18,7 @@
*/
/**
- * A FlushInfo provides information required for a FLUSH context and other optimization operations.
+ *
A FlushInfo provides information required for a FLUSH context.
* It is used as part of an {@link IOContext} in case of FLUSH context.
*/
Index: lucene/src/java/org/apache/lucene/store/MergeInfo.java
===================================================================
--- lucene/src/java/org/apache/lucene/store/MergeInfo.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/store/MergeInfo.java (working copy)
@@ -17,7 +17,7 @@
*/
/**
- * A MergeInfo provides information required for a MERGE context and other optimization operations.
+ *
A MergeInfo provides information required for a MERGE context.
* It is used as part of an {@link IOContext} in case of MERGE context.
*/
@@ -29,7 +29,7 @@
public final boolean isExternal;
- public final boolean optimize;
+ public final int mergeMaxNumSegments;
/**
@@ -40,11 +40,11 @@
*
*/
- public MergeInfo(int totalDocCount, long estimatedMergeBytes, boolean isExternal, boolean optimize) {
+ public MergeInfo(int totalDocCount, long estimatedMergeBytes, boolean isExternal, int mergeMaxNumSegments) {
this.totalDocCount = totalDocCount;
this.estimatedMergeBytes = estimatedMergeBytes;
this.isExternal = isExternal;
- this.optimize = optimize;
+ this.mergeMaxNumSegments = mergeMaxNumSegments;
}
@@ -55,7 +55,7 @@
result = prime * result
+ (int) (estimatedMergeBytes ^ (estimatedMergeBytes >>> 32));
result = prime * result + (isExternal ? 1231 : 1237);
- result = prime * result + (optimize ? 1231 : 1237);
+ result = prime * result + mergeMaxNumSegments;
result = prime * result + totalDocCount;
return result;
}
@@ -73,7 +73,7 @@
return false;
if (isExternal != other.isExternal)
return false;
- if (optimize != other.optimize)
+ if (mergeMaxNumSegments != other.mergeMaxNumSegments)
return false;
if (totalDocCount != other.totalDocCount)
return false;
@@ -84,6 +84,6 @@
public String toString() {
return "MergeInfo [totalDocCount=" + totalDocCount
+ ", estimatedMergeBytes=" + estimatedMergeBytes + ", isExternal="
- + isExternal + ", optimize=" + optimize + "]";
+ + isExternal + ", mergeMaxNumSegments=" + mergeMaxNumSegments + "]";
}
-}
\ No newline at end of file
+}
Index: lucene/src/java/org/apache/lucene/search/MultiTermQuery.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/MultiTermQuery.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/search/MultiTermQuery.java (working copy)
@@ -273,14 +273,14 @@
/**
* Expert: Return the number of unique terms visited during execution of the query.
* If there are many of them, you may consider using another query type
- * or optimize your total term count in index.
+ * or reduce your total term count in index.
* This method is not thread safe, be sure to only call it when no query is running!
* If you re-use the same query instance for another
* search, be sure to first reset the term counter
* with {@link #clearTotalNumberOfTerms}.
- *
On optimized indexes / no MultiReaders, you get the correct number of
+ *
On single-segment indexes / no MultiReaders, you get the correct number of
* unique terms for the whole index. Use this number to compare different queries.
- * For non-optimized indexes this number can also be achieved in
+ * For multi-segment indexes this number can also be achieved in
* non-constant-score mode. In constant-score mode you get the total number of
* terms seeked for all segments / sub-readers.
* @see #clearTotalNumberOfTerms
Index: lucene/src/java/org/apache/lucene/index/UpgradeIndexMergePolicy.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/UpgradeIndexMergePolicy.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/UpgradeIndexMergePolicy.java (working copy)
@@ -27,24 +27,24 @@
import java.util.HashMap;
/** This {@link MergePolicy} is used for upgrading all existing segments of
- * an index when calling {@link IndexWriter#optimize()}.
+ * an index when calling {@link IndexWriter#forceMerge(int)}.
* All other methods delegate to the base {@code MergePolicy} given to the constructor.
* This allows for an as-cheap-as possible upgrade of an older index by only upgrading segments that
- * are created by previous Lucene versions. Optimize does no longer really optimize
- * it is just used to "optimize" older segment versions away.
+ * are created by previous Lucene versions. forceMerge does no longer really merge;
+ * it is just used to "forceMerge" older segment versions away.
*
In general one would use {@link IndexUpgrader}, but for a fully customizeable upgrade,
- * you can use this like any other {@code MergePolicy} and call {@link IndexWriter#optimize()}:
+ * you can use this like any other {@code MergePolicy} and call {@link IndexWriter#forceMerge(int)}:
*
* IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_XX, new KeywordAnalyzer());
* iwc.setMergePolicy(new UpgradeIndexMergePolicy(iwc.getMergePolicy()));
* IndexWriter w = new IndexWriter(dir, iwc);
- * w.optimize();
+ * w.forceMerge(1);
* w.close();
*
* Warning: This merge policy may reorder documents if the index was partially
- * upgraded before calling optimize (e.g., documents were added). If your application relies
+ * upgraded before calling forceMerge (e.g., documents were added). If your application relies
* on "monotonicity" of doc IDs (which means that the order in which the documents
- * were added to the index is preserved), do a full optimize instead. Please note, the
+ * were added to the index is preserved), do a forceMerge(1) instead. Please note, the
* delegate {@code MergePolicy} may also reorder documents.
* @lucene.experimental
* @see IndexUpgrader
@@ -53,7 +53,7 @@
protected final MergePolicy base;
- /** Wrap the given {@link MergePolicy} and intercept optimize requests to
+ /** Wrap the given {@link MergePolicy} and intercept forceMerge requests to
* only upgrade segments written with previous Lucene versions. */
public UpgradeIndexMergePolicy(MergePolicy base) {
this.base = base;
@@ -80,22 +80,22 @@
}
@Override
- public MergeSpecification findMergesForOptimize(SegmentInfos segmentInfos, int maxSegmentCount, Map segmentsToOptimize) throws CorruptIndexException, IOException {
+ public MergeSpecification findForcedMerges(SegmentInfos segmentInfos, int maxSegmentCount, Map segmentsToMerge) throws CorruptIndexException, IOException {
// first find all old segments
final Map oldSegments = new HashMap();
for (final SegmentInfo si : segmentInfos) {
- final Boolean v =segmentsToOptimize.get(si);
+ final Boolean v = segmentsToMerge.get(si);
if (v != null && shouldUpgradeSegment(si)) {
oldSegments.put(si, v);
}
}
- if (verbose()) message("findMergesForOptimize: segmentsToUpgrade=" + oldSegments);
+ if (verbose()) message("findForcedMerges: segmentsToUpgrade=" + oldSegments);
if (oldSegments.isEmpty())
return null;
- MergeSpecification spec = base.findMergesForOptimize(segmentInfos, maxSegmentCount, oldSegments);
+ MergeSpecification spec = base.findForcedMerges(segmentInfos, maxSegmentCount, oldSegments);
if (spec != null) {
// remove all segments that are in merge specification from oldSegments,
@@ -108,7 +108,7 @@
if (!oldSegments.isEmpty()) {
if (verbose())
- message("findMergesForOptimize: " + base.getClass().getSimpleName() +
+ message("findForcedMerges: " + base.getClass().getSimpleName() +
" does not want to merge all old segments, merge remaining ones into new segment: " + oldSegments);
final List newInfos = new ArrayList();
for (final SegmentInfo si : segmentInfos) {
Index: lucene/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java (working copy)
@@ -136,8 +136,8 @@
}
@Override
- public boolean isOptimized() {
- return cp.isOptimized();
+ public int getSegmentCount() {
+ return cp.getSegmentCount();
}
}
@@ -340,7 +340,7 @@
* NOTE: while the snapshot is held, the files it references will not
* be deleted, which will consume additional disk space in your index. If you
* take a snapshot at a particularly bad time (say just before you call
- * optimize()) then in the worst case this could consume an extra 1X of your
+ * forceMerge) then in the worst case this could consume an extra 1X of your
* total index size, until you release the snapshot.
*
* @param id
Index: lucene/src/java/org/apache/lucene/index/MergePolicy.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/MergePolicy.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/MergePolicy.java (working copy)
@@ -30,8 +30,7 @@
/**
* Expert: a MergePolicy determines the sequence of
- * primitive merge operations to be used for overall merge
- * and optimize operations.
+ * primitive merge operations.
*
* Whenever the segments in an index have been altered by
* {@link IndexWriter}, either the addition of a newly
@@ -42,8 +41,8 @@
* merges that are now required. This method returns a
* {@link MergeSpecification} instance describing the set of
* merges that should be done, or null if no merges are
- * necessary. When IndexWriter.optimize is called, it calls
- * {@link #findMergesForOptimize} and the MergePolicy should
+ * necessary. When IndexWriter.forceMerge is called, it calls
+ * {@link #findForcedMerges(SegmentInfos,int,Map)} and the MergePolicy should
* then return the necessary merges.
*
* Note that the policy can return more than one merge at
@@ -69,11 +68,10 @@
public static class OneMerge {
SegmentInfo info; // used by IndexWriter
- boolean optimize; // used by IndexWriter
boolean registerDone; // used by IndexWriter
long mergeGen; // used by IndexWriter
boolean isExternal; // used by IndexWriter
- int maxNumSegmentsOptimize; // used by IndexWriter
+ int maxNumSegments = -1; // used by IndexWriter
public long estimatedMergeBytes; // used by IndexWriter
List readers; // used by IndexWriter
List readerLiveDocs; // used by IndexWriter
@@ -160,8 +158,8 @@
}
if (info != null)
b.append(" into ").append(info.name);
- if (optimize)
- b.append(" [optimize]");
+ if (maxNumSegments != -1)
+ b.append(" [maxNumSegments=" + maxNumSegments + "]");
if (aborted) {
b.append(" [ABORTED]");
}
@@ -193,7 +191,7 @@
}
public MergeInfo getMergeInfo() {
- return new MergeInfo(totalDocCount, estimatedMergeBytes, isExternal, optimize);
+ return new MergeInfo(totalDocCount, estimatedMergeBytes, isExternal, maxNumSegments);
}
}
@@ -290,9 +288,9 @@
throws CorruptIndexException, IOException;
/**
- * Determine what set of merge operations is necessary in order to optimize
- * the index. {@link IndexWriter} calls this when its
- * {@link IndexWriter#optimize()} method is called. This call is always
+ * Determine what set of merge operations is necessary in
+ * order to merge to <= the specified segment count. {@link IndexWriter} calls this when its
+ * {@link IndexWriter#forceMerge} method is called. This call is always
* synchronized on the {@link IndexWriter} instance so only one thread at a
* time will call this method.
*
@@ -301,17 +299,17 @@
* @param maxSegmentCount
* requested maximum number of segments in the index (currently this
* is always 1)
- * @param segmentsToOptimize
+ * @param segmentsToMerge
* contains the specific SegmentInfo instances that must be merged
* away. This may be a subset of all
* SegmentInfos. If the value is True for a
* given SegmentInfo, that means this segment was
* an original segment present in the
- * to-be-optimized index; else, it was a segment
+ * to-be-merged index; else, it was a segment
* produced by a cascaded merge.
*/
- public abstract MergeSpecification findMergesForOptimize(
- SegmentInfos segmentInfos, int maxSegmentCount, Map segmentsToOptimize)
+ public abstract MergeSpecification findForcedMerges(
+ SegmentInfos segmentInfos, int maxSegmentCount, Map segmentsToMerge)
throws CorruptIndexException, IOException;
/**
Index: lucene/src/java/org/apache/lucene/index/LogMergePolicy.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/LogMergePolicy.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/LogMergePolicy.java (working copy)
@@ -70,7 +70,7 @@
protected long maxMergeSize;
// Although the core MPs set it explicitly, we must default in case someone
// out there wrote his own LMP ...
- protected long maxMergeSizeForOptimize = Long.MAX_VALUE;
+ protected long maxMergeSizeForForcedMerge = Long.MAX_VALUE;
protected int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
protected double noCFSRatio = DEFAULT_NO_CFS_RATIO;
@@ -123,10 +123,10 @@
/** Determines how often segment indices are merged by
* addDocument(). With smaller values, less RAM is used
- * while indexing, and searches on unoptimized indices are
+ * while indexing, and searches are
* faster, but indexing speed is slower. With larger
* values, more RAM is used during indexing, and while
- * searches on unoptimized indices are slower, indexing is
+ * searches is slower, indexing is
* faster. Thus larger values (> 10) are best for batch
* index creation, and smaller values (< 10) for indices
* that are interactively maintained. */
@@ -207,29 +207,29 @@
}
}
- protected boolean isOptimized(SegmentInfos infos, int maxNumSegments, Map segmentsToOptimize) throws IOException {
+ protected boolean isMerged(SegmentInfos infos, int maxNumSegments, Map segmentsToMerge) throws IOException {
final int numSegments = infos.size();
- int numToOptimize = 0;
- SegmentInfo optimizeInfo = null;
+ int numToMerge = 0;
+ SegmentInfo mergeInfo = null;
boolean segmentIsOriginal = false;
- for(int i=0;i segments = infos.asList();
@@ -256,14 +256,15 @@
int start = last - 1;
while (start >= 0) {
SegmentInfo info = infos.info(start);
- if (size(info) > maxMergeSizeForOptimize || sizeDocs(info) > maxMergeDocs) {
+ if (size(info) > maxMergeSizeForForcedMerge || sizeDocs(info) > maxMergeDocs) {
if (verbose()) {
- message("optimize: skip segment=" + info + ": size is > maxMergeSize (" + maxMergeSizeForOptimize + ") or sizeDocs is > maxMergeDocs (" + maxMergeDocs + ")");
+ message("findForcedMergesSizeLimit: skip segment=" + info + ": size is > maxMergeSize (" + maxMergeSizeForForcedMerge + ") or sizeDocs is > maxMergeDocs (" + maxMergeDocs + ")");
}
// need to skip that segment + add a merge for the 'right' segments,
- // unless there is only 1 which is optimized.
- if (last - start - 1 > 1 || (start != last - 1 && !isOptimized(infos.info(start + 1)))) {
- // there is more than 1 segment to the right of this one, or an unoptimized single segment.
+ // unless there is only 1 which is merged.
+ if (last - start - 1 > 1 || (start != last - 1 && !isMerged(infos.info(start + 1)))) {
+ // there is more than 1 segment to the right of
+ // this one, or a mergeable single segment.
spec.add(new OneMerge(segments.subList(start + 1, last)));
}
last = start;
@@ -275,8 +276,9 @@
--start;
}
- // Add any left-over segments, unless there is just 1 already optimized.
- if (last > 0 && (++start + 1 < last || !isOptimized(infos.info(start)))) {
+ // Add any left-over segments, unless there is just 1
+ // already fully merged
+ if (last > 0 && (++start + 1 < last || !isMerged(infos.info(start)))) {
spec.add(new OneMerge(segments.subList(start, last)));
}
@@ -284,11 +286,11 @@
}
/**
- * Returns the merges necessary to optimize the index. This method constraints
+ * Returns the merges necessary to forceMerge the index. This method constraints
* the returned merges only by the {@code maxNumSegments} parameter, and
* guaranteed that exactly that number of segments will remain in the index.
*/
- private MergeSpecification findMergesForOptimizeMaxNumSegments(SegmentInfos infos, int maxNumSegments, int last) throws IOException {
+ private MergeSpecification findForcedMergesMaxNumSegments(SegmentInfos infos, int maxNumSegments, int last) throws IOException {
MergeSpecification spec = new MergeSpecification();
final List segments = infos.asList();
@@ -304,9 +306,9 @@
if (0 == spec.merges.size()) {
if (maxNumSegments == 1) {
- // Since we must optimize down to 1 segment, the
+ // Since we must merge down to 1 segment, the
// choice is simple:
- if (last > 1 || !isOptimized(infos.info(0))) {
+ if (last > 1 || !isMerged(infos.info(0))) {
spec.add(new OneMerge(segments.subList(0, last)));
}
} else if (last > maxNumSegments) {
@@ -319,7 +321,7 @@
// We must merge this many segments to leave
// maxNumSegments in the index (from when
- // optimize was first kicked off):
+ // forceMerge was first kicked off):
final int finalMergeSize = last - maxNumSegments + 1;
// Consider all possible starting points:
@@ -342,10 +344,9 @@
return spec.merges.size() == 0 ? null : spec;
}
- /** Returns the merges necessary to optimize the index.
- * This merge policy defines "optimized" to mean only the
- * requested number of segments is left in the index, and
- * respects the {@link #maxMergeSizeForOptimize} setting.
+ /** Returns the merges necessary to merge the index down
+ * to a specified number of segments.
+ * This respects the {@link #maxMergeSizeForForcedMerge} setting.
* By default, and assuming {@code maxNumSegments=1}, only
* one segment will be left in the index, where that segment
* has no deletions pending nor separate norms, and it is in
@@ -354,30 +355,30 @@
* (mergeFactor at a time) so the {@link MergeScheduler}
* in use may make use of concurrency. */
@Override
- public MergeSpecification findMergesForOptimize(SegmentInfos infos,
- int maxNumSegments, Map segmentsToOptimize) throws IOException {
+ public MergeSpecification findForcedMerges(SegmentInfos infos,
+ int maxNumSegments, Map segmentsToMerge) throws IOException {
assert maxNumSegments > 0;
if (verbose()) {
- message("findMergesForOptimize: maxNumSegs=" + maxNumSegments + " segsToOptimize="+ segmentsToOptimize);
+ message("findForcedMerges: maxNumSegs=" + maxNumSegments + " segsToMerge="+ segmentsToMerge);
}
- // If the segments are already optimized (e.g. there's only 1 segment), or
- // there are 0) {
final SegmentInfo info = infos.info(--last);
- if (segmentsToOptimize.get(info) != null) {
+ if (segmentsToMerge.get(info) != null) {
last++;
break;
}
@@ -390,8 +391,8 @@
return null;
}
- // There is only one segment already, and it is optimized
- if (maxNumSegments == 1 && last == 1 && isOptimized(infos.info(0))) {
+ // There is only one segment already, and it is merged
+ if (maxNumSegments == 1 && last == 1 && isMerged(infos.info(0))) {
if (verbose()) {
message("already 1 seg; skip");
}
@@ -402,16 +403,16 @@
boolean anyTooLarge = false;
for (int i = 0; i < last; i++) {
SegmentInfo info = infos.info(i);
- if (size(info) > maxMergeSizeForOptimize || sizeDocs(info) > maxMergeDocs) {
+ if (size(info) > maxMergeSizeForForcedMerge || sizeDocs(info) > maxMergeDocs) {
anyTooLarge = true;
break;
}
}
if (anyTooLarge) {
- return findMergesForOptimizeSizeLimit(infos, maxNumSegments, last);
+ return findForcedMergesSizeLimit(infos, maxNumSegments, last);
} else {
- return findMergesForOptimizeMaxNumSegments(infos, maxNumSegments, last);
+ return findForcedMergesMaxNumSegments(infos, maxNumSegments, last);
}
}
@@ -661,7 +662,7 @@
sb.append("minMergeSize=").append(minMergeSize).append(", ");
sb.append("mergeFactor=").append(mergeFactor).append(", ");
sb.append("maxMergeSize=").append(maxMergeSize).append(", ");
- sb.append("maxMergeSizeForOptimize=").append(maxMergeSizeForOptimize).append(", ");
+ sb.append("maxMergeSizeForForcedMerge=").append(maxMergeSizeForForcedMerge).append(", ");
sb.append("calibrateSizeByDeletes=").append(calibrateSizeByDeletes).append(", ");
sb.append("maxMergeDocs=").append(maxMergeDocs).append(", ");
sb.append("useCompoundFile=").append(useCompoundFile).append(", ");
Index: lucene/src/java/org/apache/lucene/index/MultiReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/MultiReader.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/MultiReader.java (working copy)
@@ -234,12 +234,6 @@
}
@Override
- public boolean isOptimized() {
- ensureOpen();
- return false;
- }
-
- @Override
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
// NOTE: multiple threads may wind up init'ing
Index: lucene/src/java/org/apache/lucene/index/IndexCommit.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/IndexCommit.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/IndexCommit.java (working copy)
@@ -75,8 +75,8 @@
public abstract boolean isDeleted();
- /** Returns true if this commit is an optimized index. */
- public abstract boolean isOptimized();
+ /** Returns number of segments referenced by this commit. */
+ public abstract int getSegmentCount();
/** Two IndexCommits are equal if both their Directory and versions are equal. */
@Override
Index: lucene/src/java/org/apache/lucene/index/FilterIndexReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/FilterIndexReader.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/FilterIndexReader.java (working copy)
@@ -431,12 +431,6 @@
}
@Override
- public boolean isOptimized() {
- ensureOpen();
- return in.isOptimized();
- }
-
- @Override
public IndexReader[] getSequentialSubReaders() {
return in.getSequentialSubReaders();
}
Index: lucene/src/java/org/apache/lucene/index/TieredMergePolicy.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/TieredMergePolicy.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/TieredMergePolicy.java (working copy)
@@ -62,7 +62,7 @@
* NOTE: This policy always merges by byte size
* of the segments, always pro-rates by percent deletes,
* and does not apply any maximum segment size during
- * optimize (unlike {@link LogByteSizeMergePolicy}).
+ * forceMerge (unlike {@link LogByteSizeMergePolicy}).
*
* @lucene.experimental
*/
@@ -88,7 +88,7 @@
/** Maximum number of segments to be merged at a time
* during "normal" merging. For explicit merging (eg,
- * optimize or expungeDeletes was called), see {@link
+ * forceMerge or expungeDeletes was called), see {@link
* #setMaxMergeAtOnceExplicit}. Default is 10. */
public TieredMergePolicy setMaxMergeAtOnce(int v) {
if (v < 2) {
@@ -107,7 +107,7 @@
// if user calls IW.maybeMerge "explicitly"
/** Maximum number of segments to be merged at a time,
- * during optimize or expungeDeletes. Default is 30. */
+ * during forceMerge or expungeDeletes. Default is 30. */
public TieredMergePolicy setMaxMergeAtOnceExplicit(int v) {
if (v < 2) {
throw new IllegalArgumentException("maxMergeAtOnceExplicit must be > 1 (got " + v + ")");
@@ -478,23 +478,23 @@
}
@Override
- public MergeSpecification findMergesForOptimize(SegmentInfos infos, int maxSegmentCount, Map segmentsToOptimize) throws IOException {
+ public MergeSpecification findForcedMerges(SegmentInfos infos, int maxSegmentCount, Map segmentsToMerge) throws IOException {
if (verbose()) {
- message("findMergesForOptimize maxSegmentCount=" + maxSegmentCount + " infos=" + writer.get().segString(infos) + " segmentsToOptimize=" + segmentsToOptimize);
+ message("findForcedMerges maxSegmentCount=" + maxSegmentCount + " infos=" + writer.get().segString(infos) + " segmentsToMerge=" + segmentsToMerge);
}
List eligible = new ArrayList();
- boolean optimizeMergeRunning = false;
+ boolean forceMergeRunning = false;
final Collection merging = writer.get().getMergingSegments();
boolean segmentIsOriginal = false;
for(SegmentInfo info : infos) {
- final Boolean isOriginal = segmentsToOptimize.get(info);
+ final Boolean isOriginal = segmentsToMerge.get(info);
if (isOriginal != null) {
segmentIsOriginal = isOriginal;
if (!merging.contains(info)) {
eligible.add(info);
} else {
- optimizeMergeRunning = true;
+ forceMergeRunning = true;
}
}
}
@@ -504,9 +504,9 @@
}
if ((maxSegmentCount > 1 && eligible.size() <= maxSegmentCount) ||
- (maxSegmentCount == 1 && eligible.size() == 1 && (!segmentIsOriginal || isOptimized(eligible.get(0))))) {
+ (maxSegmentCount == 1 && eligible.size() == 1 && (!segmentIsOriginal || isMerged(eligible.get(0))))) {
if (verbose()) {
- message("already optimized");
+ message("already merged");
}
return null;
}
@@ -515,7 +515,7 @@
if (verbose()) {
message("eligible=" + eligible);
- message("optimizeMergeRunning=" + optimizeMergeRunning);
+ message("forceMergeRunning=" + forceMergeRunning);
}
int end = eligible.size();
@@ -535,7 +535,7 @@
end -= maxMergeAtOnceExplicit;
}
- if (spec == null && !optimizeMergeRunning) {
+ if (spec == null && !forceMergeRunning) {
// Do final merge
final int numToMerge = end - maxSegmentCount + 1;
final OneMerge merge = new OneMerge(eligible.subList(end-numToMerge, end));
@@ -580,7 +580,7 @@
while(start < eligible.size()) {
// Don't enforce max merged size here: app is explicitly
// calling expungeDeletes, and knows this may take a
- // long time / produce big segments (like optimize):
+ // long time / produce big segments (like forceMerge):
final int end = Math.min(start + maxMergeAtOnceExplicit, eligible.size());
if (spec == null) {
spec = new MergeSpecification();
@@ -619,7 +619,7 @@
public void close() {
}
- private boolean isOptimized(SegmentInfo info)
+ private boolean isMerged(SegmentInfo info)
throws IOException {
IndexWriter w = writer.get();
assert w != null;
Index: lucene/src/java/org/apache/lucene/index/NoMergePolicy.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/NoMergePolicy.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/NoMergePolicy.java (working copy)
@@ -58,8 +58,8 @@
throws CorruptIndexException, IOException { return null; }
@Override
- public MergeSpecification findMergesForOptimize(SegmentInfos segmentInfos,
- int maxSegmentCount, Map segmentsToOptimize)
+ public MergeSpecification findForcedMerges(SegmentInfos segmentInfos,
+ int maxSegmentCount, Map segmentsToMerge)
throws CorruptIndexException, IOException { return null; }
@Override
Index: lucene/src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/IndexWriter.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/IndexWriter.java (working copy)
@@ -101,11 +101,6 @@
addDocument calls (see below
for changing the {@link MergeScheduler}).
- If an index will not have more documents added for a while and optimal search
- performance is desired, then either the full {@link #optimize() optimize}
- method or partial {@link #optimize(int)} method should be
- called before the index is closed.
-
Opening an IndexWriter creates a lock file for the directory in use. Trying to open
another IndexWriter on the same directory will lead to a
{@link LockObtainFailedException}. The {@link LockObtainFailedException}
@@ -134,9 +129,8 @@
The {@link MergePolicy} is invoked whenever there are
changes to the segments in the index. Its role is to
select which merges to do, if any, and return a {@link
- MergePolicy.MergeSpecification} describing the merges. It
- also selects merges to do for optimize(). (The default is
- {@link LogByteSizeMergePolicy}. Then, the {@link
+ MergePolicy.MergeSpecification} describing the merges.
+ The default is {@link LogByteSizeMergePolicy}. Then, the {@link
MergeScheduler} is invoked with the requested merges and
it decides when and how to run the merges. The default is
{@link ConcurrentMergeScheduler}.
@@ -223,8 +217,9 @@
private DocumentsWriter docWriter;
final IndexFileDeleter deleter;
- private Map segmentsToOptimize = new HashMap(); // used by optimize to note those needing optimization
- private int optimizeMaxNumSegments;
+ // used by forceMerge to note those needing merging
+ private Map segmentsToMerge = new HashMap();
+ private int mergeMaxNumSegments;
private Lock writeLock;
@@ -1215,7 +1210,7 @@
* readers/searchers are open against the index, and up to
* 2X the size of all segments being merged when
* readers/searchers are open against the index (see
- * {@link #optimize()} for details). The sequence of
+ * {@link #forceMerge(int)} for details). The sequence of
* primitive merge operations performed is governed by the
* merge policy.
*
@@ -1565,55 +1560,52 @@
final InfoStream infoStream;
/**
- * Requests an "optimize" operation on an index, priming the index
- * for the fastest available search. Traditionally this has meant
- * merging all segments into a single segment as is done in the
- * default merge policy, but individual merge policies may implement
- * optimize in different ways.
+ * Forces merge policy to merge segments until there's <=
+ * maxNumSegments. The actual merges to be
+ * executed are determined by the {@link MergePolicy}.
*
- * Optimize is a very costly operation, so you
- * should only do it if your search performance really
- * requires it. Many search applications do fine never
- * calling optimize.
+ * This is a horribly costly operation, especially when
+ * you pass a small {@code maxNumSegments}; usually you
+ * should only call this if the index is static (will no
+ * longer be changed).
*
- * Note that optimize requires 2X the index size free
+ *
Note that this requires up to 2X the index size free
* space in your Directory (3X if you're using compound
* file format). For example, if your index size is 10 MB
- * then you need 20 MB free for optimize to complete (30
+ * then you need up to 20 MB free for this to complete (30
* MB if you're using compound file format). Also,
- * it's best to call {@link #commit()} after the optimize
- * completes to allow IndexWriter to free up disk space.
+ * it's best to call {@link #commit()} afterwards,
+ * to allow IndexWriter to free up disk space.
*
- * If some but not all readers re-open while an
- * optimize is underway, this will cause > 2X temporary
+ *
If some but not all readers re-open while merging
+ * is underway, this will cause > 2X temporary
* space to be consumed as those new readers will then
- * hold open the partially optimized segments at that
- * time. It is best not to re-open readers while optimize
- * is running.
+ * hold open the temporary segments at that time. It is
+ * best not to re-open readers while merging is running.
*
* The actual temporary usage could be much less than
* these figures (it depends on many factors).
*
- * In general, once the optimize completes, the total size of the
+ *
In general, once the this completes, the total size of the
* index will be less than the size of the starting index.
* It could be quite a bit smaller (if there were many
* pending deletes) or just slightly smaller.
*
- * If an Exception is hit during optimize(), for example
+ *
If an Exception is hit, for example
* due to disk full, the index will not be corrupt and no
* documents will have been lost. However, it may have
- * been partially optimized (some segments were merged but
+ * been partially merged (some segments were merged but
* not all), and it's possible that one of the segments in
* the index will be in non-compound format even when
* using compound file format. This will occur when the
* Exception is hit during conversion of the segment into
* compound format.
*
- * This call will optimize those segments present in
+ *
This call will merge those segments present in
* the index when the call started. If other threads are
* still adding documents and flushing segments, those
- * newly created segments will not be optimized unless you
- * call optimize again.
+ * newly created segments will not be merged unless you
+ * call forceMerge again.
*
* NOTE: if this method hits an OutOfMemoryError
* you should immediately close the writer. See NOTE: if this method hits an OutOfMemoryError
- * you should immediately close the writer. See above for details.
- *
* @param maxNumSegments maximum number of segments left
- * in the index after optimization finishes
- */
- public void optimize(int maxNumSegments) throws CorruptIndexException, IOException {
- optimize(maxNumSegments, true);
+ * in the index after merging finishes
+ */
+ public void forceMerge(int maxNumSegments) throws CorruptIndexException, IOException {
+ forceMerge(maxNumSegments, true);
}
- /** Just like {@link #optimize()}, except you can specify
- * whether the call should block until the optimize
- * completes. This is only meaningful with a
+ /** Just like {@link #forceMerge(int)}, except you can
+ * specify whether the call should block until
+ * all merging completes. This is only meaningful with a
* {@link MergeScheduler} that is able to run merges in
* background threads.
*
- * NOTE: if this method hits an OutOfMemoryError
- * you should immediately close the writer. See above for details.
+ * NOTE: if this method hits an OutOfMemoryError
+ * you should immediately close the writer. See above for details.
*/
- public void optimize(boolean doWait) throws CorruptIndexException, IOException {
- optimize(1, doWait);
- }
-
- /** Just like {@link #optimize(int)}, except you can
- * specify whether the call should block until the
- * optimize completes. This is only meaningful with a
- * {@link MergeScheduler} that is able to run merges in
- * background threads.
- *
- * NOTE: if this method hits an OutOfMemoryError
- * you should immediately close the writer. See above for details.
- */
- public void optimize(int maxNumSegments, boolean doWait) throws CorruptIndexException, IOException {
+ public void forceMerge(int maxNumSegments, boolean doWait) throws CorruptIndexException, IOException {
ensureOpen();
if (maxNumSegments < 1)
throw new IllegalArgumentException("maxNumSegments must be >= 1; got " + maxNumSegments);
if (infoStream != null) {
- infoStream.message("IW", "optimize: index now " + segString());
- infoStream.message("IW", "now flush at optimize");
+ infoStream.message("IW", "forceMerge: index now " + segString());
+ infoStream.message("IW", "now flush at forceMerge");
}
flush(true, true);
synchronized(this) {
resetMergeExceptions();
- segmentsToOptimize.clear();
+ segmentsToMerge.clear();
for(SegmentInfo info : segmentInfos) {
- segmentsToOptimize.put(info, Boolean.TRUE);
+ segmentsToMerge.put(info, Boolean.TRUE);
}
- optimizeMaxNumSegments = maxNumSegments;
+ mergeMaxNumSegments = maxNumSegments;
- // Now mark all pending & running merges as optimize
- // merge:
+ // Now mark all pending & running merges as isMaxNumSegments:
for(final MergePolicy.OneMerge merge : pendingMerges) {
- merge.optimize = true;
- merge.maxNumSegmentsOptimize = maxNumSegments;
- segmentsToOptimize.put(merge.info, Boolean.TRUE);
+ merge.maxNumSegments = maxNumSegments;
+ segmentsToMerge.put(merge.info, Boolean.TRUE);
}
for ( final MergePolicy.OneMerge merge: runningMerges ) {
- merge.optimize = true;
- merge.maxNumSegmentsOptimize = maxNumSegments;
- segmentsToOptimize.put(merge.info, Boolean.TRUE);
+ merge.maxNumSegments = maxNumSegments;
+ segmentsToMerge.put(merge.info, Boolean.TRUE);
}
}
- maybeMerge(maxNumSegments, true);
+ maybeMerge(maxNumSegments);
if (doWait) {
synchronized(this) {
while(true) {
if (hitOOM) {
- throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot complete optimize");
+ throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot complete forceMerge");
}
if (mergeExceptions.size() > 0) {
@@ -1724,7 +1686,7 @@
final int size = mergeExceptions.size();
for(int i=0;iWhen an index
* has many document deletions (or updates to existing
- * documents), it's best to either call optimize or
+ * documents), it's best to either call forceMerge or
* expungeDeletes to remove all unused data in the index
* associated with the deleted documents. To see how
* many deletions you have pending in your index, call
* {@link IndexReader#numDeletedDocs}
* This saves disk space and memory usage while
* searching. expungeDeletes should be somewhat faster
- * than optimize since it does not insist on reducing the
+ * than forceMerge since it does not insist on reducing the
* index to a single segment (though, this depends on the
* {@link MergePolicy}; see {@link
* MergePolicy#findMergesToExpungeDeletes}.). Note that
@@ -1896,22 +1858,18 @@
* href="#OOME">above for details.
*/
public final void maybeMerge() throws CorruptIndexException, IOException {
- maybeMerge(false);
+ maybeMerge(-1);
}
- private final void maybeMerge(boolean optimize) throws CorruptIndexException, IOException {
- maybeMerge(1, optimize);
- }
-
- private final void maybeMerge(int maxNumSegmentsOptimize, boolean optimize) throws CorruptIndexException, IOException {
+ private final void maybeMerge(int maxNumSegments) throws CorruptIndexException, IOException {
ensureOpen(false);
- updatePendingMerges(maxNumSegmentsOptimize, optimize);
+ updatePendingMerges(maxNumSegments);
mergeScheduler.merge(this);
}
- private synchronized void updatePendingMerges(int maxNumSegmentsOptimize, boolean optimize)
+ private synchronized void updatePendingMerges(int maxNumSegments)
throws CorruptIndexException, IOException {
- assert !optimize || maxNumSegmentsOptimize > 0;
+ assert maxNumSegments == -1 || maxNumSegments > 0;
if (stopMerges) {
return;
@@ -1923,14 +1881,13 @@
}
final MergePolicy.MergeSpecification spec;
- if (optimize) {
- spec = mergePolicy.findMergesForOptimize(segmentInfos, maxNumSegmentsOptimize, Collections.unmodifiableMap(segmentsToOptimize));
+ if (maxNumSegments != -1) {
+ spec = mergePolicy.findForcedMerges(segmentInfos, maxNumSegments, Collections.unmodifiableMap(segmentsToMerge));
if (spec != null) {
final int numMerges = spec.merges.size();
for(int i=0;iNOTE: this method will forcefully abort all merges
* in progress. If other threads are running {@link
- * #optimize()}, {@link #addIndexes(IndexReader[])} or
+ * #forceMerge}, {@link #addIndexes(IndexReader[])} or
* {@link #expungeDeletes} methods, they may receive
* {@link MergePolicy.MergeAbortedException}s.
*/
@@ -2390,7 +2347,7 @@
* (including the starting index). If readers/searchers
* are open against the starting index, then temporary
* free space required will be higher by the size of the
- * starting index (see {@link #optimize()} for details).
+ * starting index (see {@link #forceMerge(int)} for details).
*
*
* NOTE: this method only copies the segments of the incoming indexes
@@ -2452,7 +2409,7 @@
&& versionComparator.compare(info.getVersion(), "3.1") >= 0;
}
- IOContext context = new IOContext(new MergeInfo(info.docCount, info.sizeInBytes(true), true, false));
+ IOContext context = new IOContext(new MergeInfo(info.docCount, info.sizeInBytes(true), true, -1));
if (createCFS) {
copySegmentIntoCFS(info, newSegName, context);
@@ -2476,7 +2433,6 @@
}
/** Merges the provided indexes into this index.
- *
After this completes, the index is optimized.
* The provided IndexReaders are not closed.
*
* NOTE: while this is running, any attempts to
@@ -2512,7 +2468,7 @@
for (IndexReader indexReader : readers) {
numDocs += indexReader.numDocs();
}
- final IOContext context = new IOContext(new MergeInfo(numDocs, -1, true, false));
+ final IOContext context = new IOContext(new MergeInfo(numDocs, -1, true, -1));
// TODO: somehow we should fix this merge so it's
// abortable so that IW.close(false) is able to stop it
@@ -2789,7 +2745,7 @@
/**
*
Commits all pending changes (added & deleted
- * documents, optimizations, segment merges, added
+ * documents, segment merges, added
* indexes, etc.) to the index, and syncs all referenced
* index files, such that a reader will see the changes
* and the index updates will survive an OS or machine
@@ -3199,10 +3155,10 @@
// disk, updating SegmentInfo, etc.:
readerPool.clear(merge.segments);
- if (merge.optimize) {
- // cascade the optimize:
- if (!segmentsToOptimize.containsKey(merge.info)) {
- segmentsToOptimize.put(merge.info, Boolean.FALSE);
+ if (merge.maxNumSegments != -1) {
+ // cascade the forceMerge:
+ if (!segmentsToMerge.containsKey(merge.info)) {
+ segmentsToMerge.put(merge.info, Boolean.FALSE);
}
}
@@ -3216,7 +3172,7 @@
}
// Set the exception on the merge, so if
- // optimize() is waiting on us it sees the root
+ // forceMerge is waiting on us it sees the root
// cause exception:
merge.setException(t);
addMergeException(merge);
@@ -3283,8 +3239,8 @@
// This merge (and, generally, any change to the
// segments) may now enable new merges, so we call
// merge policy & update pending merges.
- if (success && !merge.isAborted() && (merge.optimize || (!closed && !closing))) {
- updatePendingMerges(merge.maxNumSegmentsOptimize, merge.optimize);
+ if (success && !merge.isAborted() && (merge.maxNumSegments != -1 || (!closed && !closing))) {
+ updatePendingMerges(merge.maxNumSegments);
}
}
}
@@ -3328,9 +3284,8 @@
if (info.dir != directory) {
isExternal = true;
}
- if (segmentsToOptimize.containsKey(info)) {
- merge.optimize = true;
- merge.maxNumSegmentsOptimize = optimizeMaxNumSegments;
+ if (segmentsToMerge.containsKey(info)) {
+ merge.maxNumSegments = mergeMaxNumSegments;
}
}
@@ -3393,7 +3348,7 @@
assert testPoint("startMergeInit");
assert merge.registerDone;
- assert !merge.optimize || merge.maxNumSegmentsOptimize > 0;
+ assert merge.maxNumSegments == -1 || merge.maxNumSegments > 0;
if (hitOOM) {
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot merge");
@@ -3443,7 +3398,7 @@
// Lock order: IW -> BD
bufferedDeletesStream.prune(segmentInfos);
Map details = new HashMap();
- details.put("optimize", Boolean.toString(merge.optimize));
+ details.put("mergeMaxNumSegments", ""+merge.maxNumSegments);
details.put("mergeFactor", Integer.toString(merge.segments.size()));
setDiagnostics(merge.info, "merge", details);
@@ -3495,7 +3450,7 @@
* the synchronized lock on IndexWriter instance. */
final synchronized void mergeFinish(MergePolicy.OneMerge merge) throws IOException {
- // Optimize, addIndexes or finishMerges may be waiting
+ // forceMerge, addIndexes or finishMerges may be waiting
// on merges to finish.
notifyAll();
@@ -4090,7 +4045,7 @@
* NOTE: the set {@link PayloadProcessorProvider} will be in effect
* immediately, potentially for already running merges too. If you want to be
* sure it is used for further operations only, such as {@link #addIndexes} or
- * {@link #optimize}, you can call {@link #waitForMerges()} before.
+ * {@link #forceMerge}, you can call {@link #waitForMerges()} before.
*/
public void setPayloadProcessorProvider(PayloadProcessorProvider pcp) {
ensureOpen();
Index: lucene/src/java/org/apache/lucene/index/LogDocMergePolicy.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/LogDocMergePolicy.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/LogDocMergePolicy.java (working copy)
@@ -31,10 +31,10 @@
public LogDocMergePolicy() {
minMergeSize = DEFAULT_MIN_MERGE_DOCS;
- // maxMergeSize(ForOptimize) are never used by LogDocMergePolicy; set
+ // maxMergeSize(ForForcedMerge) are never used by LogDocMergePolicy; set
// it to Long.MAX_VALUE to disable it
maxMergeSize = Long.MAX_VALUE;
- maxMergeSizeForOptimize = Long.MAX_VALUE;
+ maxMergeSizeForForcedMerge = Long.MAX_VALUE;
}
@Override
Index: lucene/src/java/org/apache/lucene/index/IndexUpgrader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/IndexUpgrader.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/IndexUpgrader.java (working copy)
@@ -35,7 +35,7 @@
* java -cp lucene-core.jar org.apache.lucene.index.IndexUpgrader [-delete-prior-commits] [-verbose] indexDir
*
* Alternatively this class can be instantiated and {@link #upgrade} invoked. It uses {@link UpgradeIndexMergePolicy}
- * and triggers the upgrade via an optimize request to {@link IndexWriter}.
+ * and triggers the upgrade via an forceMerge request to {@link IndexWriter}.
* This tool keeps only the last commit in an index; for this
* reason, if the incoming index has more than one commit, the tool
* refuses to run by default. Specify {@code -delete-prior-commits}
@@ -45,7 +45,7 @@
*
Warning: This tool may reorder documents if the index was partially
* upgraded before execution (e.g., documents were added). If your application relies
* on "monotonicity" of doc IDs (which means that the order in which the documents
- * were added to the index is preserved), do a full optimize instead.
+ * were added to the index is preserved), do a full forceMerge instead.
* The {@link MergePolicy} set by {@link IndexWriterConfig} may also reorder
* documents.
*/
@@ -134,7 +134,7 @@
if (infoStream != null) {
infoStream.message("IndexUpgrader", "Upgrading all pre-" + Constants.LUCENE_MAIN_VERSION + " segments of index directory '" + dir + "' to version " + Constants.LUCENE_MAIN_VERSION + "...");
}
- w.optimize();
+ w.forceMerge(1);
if (infoStream != null) {
infoStream.message("IndexUpgrader", "All segments upgraded to version " + Constants.LUCENE_MAIN_VERSION);
}
Index: lucene/src/java/org/apache/lucene/index/ParallelReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/ParallelReader.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/ParallelReader.java (working copy)
@@ -477,23 +477,6 @@
return true;
}
- /**
- * Checks recursively if all subindexes are optimized
- */
- @Override
- public boolean isOptimized() {
- ensureOpen();
- for (final IndexReader reader : readers) {
- if (!reader.isOptimized()) {
- return false;
- }
- }
-
- // all subindexes are optimized
- return true;
- }
-
-
/** Not implemented.
* @throws UnsupportedOperationException
*/
Index: lucene/src/java/org/apache/lucene/index/DirectoryReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/DirectoryReader.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/DirectoryReader.java (working copy)
@@ -523,17 +523,7 @@
subReaders[i].getTermFreqVector(docNumber - starts[i], mapper);
}
- /**
- * Checks is the index is optimized (if it has a single segment and no deletions)
- * @return true if the index is optimized; false otherwise
- */
@Override
- public boolean isOptimized() {
- ensureOpen();
- return segmentInfos.size() == 1 && !hasDeletions();
- }
-
- @Override
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
@@ -953,8 +943,8 @@
Directory dir;
long generation;
long version;
- final boolean isOptimized;
final Map userData;
+ private final int segmentCount;
ReaderCommit(SegmentInfos infos, Directory dir) throws IOException {
segmentsFileName = infos.getCurrentSegmentFileName();
@@ -963,7 +953,7 @@
files = Collections.unmodifiableCollection(infos.files(dir, true));
version = infos.getVersion();
generation = infos.getGeneration();
- isOptimized = infos.size() == 1 && !infos.info(0).hasDeletions();
+ segmentCount = infos.size();
}
@Override
@@ -972,8 +962,8 @@
}
@Override
- public boolean isOptimized() {
- return isOptimized;
+ public int getSegmentCount() {
+ return segmentCount;
}
@Override
Index: lucene/src/java/org/apache/lucene/index/LogByteSizeMergePolicy.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/LogByteSizeMergePolicy.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/LogByteSizeMergePolicy.java (working copy)
@@ -31,13 +31,13 @@
public static final double DEFAULT_MAX_MERGE_MB = 2048;
/** Default maximum segment size. A segment of this size
- * or larger will never be merged during optimize. @see setMaxMergeMBForOptimize */
- public static final double DEFAULT_MAX_MERGE_MB_FOR_OPTIMIZE = Long.MAX_VALUE;
+ * or larger will never be merged during forceMerge. @see setMaxMergeMBForForceMerge */
+ public static final double DEFAULT_MAX_MERGE_MB_FOR_MERGE_IF_NEEDED = Long.MAX_VALUE;
public LogByteSizeMergePolicy() {
minMergeSize = (long) (DEFAULT_MIN_MERGE_MB*1024*1024);
maxMergeSize = (long) (DEFAULT_MAX_MERGE_MB*1024*1024);
- maxMergeSizeForOptimize = (long) (DEFAULT_MAX_MERGE_MB_FOR_OPTIMIZE*1024*1024);
+ maxMergeSizeForForcedMerge = (long) (DEFAULT_MAX_MERGE_MB_FOR_MERGE_IF_NEEDED*1024*1024);
}
@Override
@@ -70,19 +70,19 @@
/** Determines the largest segment (measured by total
* byte size of the segment's files, in MB) that may be
- * merged with other segments during optimize. Setting
+ * merged with other segments during forceMerge. Setting
* it low will leave the index with more than 1 segment,
- * even if {@link IndexWriter#optimize()} is called.*/
- public void setMaxMergeMBForOptimize(double mb) {
- maxMergeSizeForOptimize = (long) (mb*1024*1024);
+ * even if {@link IndexWriter#forceMerge} is called.*/
+ public void setMaxMergeMBForForcedMerge(double mb) {
+ maxMergeSizeForForcedMerge = (long) (mb*1024*1024);
}
/** Returns the largest segment (measured by total byte
* size of the segment's files, in MB) that may be merged
- * with other segments during optimize.
- * @see #setMaxMergeMBForOptimize */
- public double getMaxMergeMBForOptimize() {
- return ((double) maxMergeSizeForOptimize)/1024/1024;
+ * with other segments during forceMerge.
+ * @see #setMaxMergeMBForForcedMerge */
+ public double getMaxMergeMBForForcedMerge() {
+ return ((double) maxMergeSizeForForcedMerge)/1024/1024;
}
/** Sets the minimum size for the lowest level segments.
Index: lucene/src/java/org/apache/lucene/index/IndexReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/IndexReader.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/IndexReader.java (working copy)
@@ -28,7 +28,6 @@
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DocumentStoredFieldVisitor;
-import org.apache.lucene.index.codecs.PostingsFormat;
import org.apache.lucene.index.codecs.PerDocValues;
import org.apache.lucene.index.values.IndexDocValues;
import org.apache.lucene.search.FieldCache; // javadocs
@@ -805,16 +804,6 @@
}
/**
- * Checks is the index is optimized (if it has a single segment and
- * no deletions). Not implemented in the IndexReader base class.
- * @return true if the index is optimized; false otherwise
- * @throws UnsupportedOperationException unless overridden in subclass
- */
- public boolean isOptimized() {
- throw new UnsupportedOperationException("This reader does not support this method.");
- }
-
- /**
* Return an array of term frequency vectors for the specified document.
* The array contains a vector for each vectorized field in the document.
* Each vector contains terms and frequencies for all terms in a given vectorized field.
Index: lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/IndexFileDeleter.java (working copy)
@@ -19,11 +19,9 @@
import java.io.FileNotFoundException;
import java.io.IOException;
-import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
-import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -653,8 +651,8 @@
Collection commitsToDelete;
long version;
long generation;
- final boolean isOptimized;
final Map userData;
+ private final int segmentCount;
public CommitPoint(Collection commitsToDelete, Directory directory, SegmentInfos segmentInfos) throws IOException {
this.directory = directory;
@@ -664,7 +662,7 @@
version = segmentInfos.getVersion();
generation = segmentInfos.getGeneration();
files = Collections.unmodifiableCollection(segmentInfos.files(directory, true));
- isOptimized = segmentInfos.size() == 1 && !segmentInfos.info(0).hasDeletions();
+ segmentCount = segmentInfos.size();
}
@Override
@@ -673,8 +671,8 @@
}
@Override
- public boolean isOptimized() {
- return isOptimized;
+ public int getSegmentCount() {
+ return segmentCount;
}
@Override
Index: lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java (revision 1200448)
+++ lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java (working copy)
@@ -518,7 +518,7 @@
* Expert: {@link MergePolicy} is invoked whenever there are changes to the
* segments in the index. Its role is to select which merges to do, if any,
* and return a {@link MergePolicy.MergeSpecification} describing the merges.
- * It also selects merges to do for optimize(). (The default is
+ * It also selects merges to do for forceMerge. (The default is
* {@link LogByteSizeMergePolicy}.
*
* Only takes effect when IndexWriter is first created. */
Index: lucene/src/test-framework/java/org/apache/lucene/analysis/CollationTestBase.java
===================================================================
--- lucene/src/test-framework/java/org/apache/lucene/analysis/CollationTestBase.java (revision 1200448)
+++ lucene/src/test-framework/java/org/apache/lucene/analysis/CollationTestBase.java (working copy)
@@ -216,7 +216,7 @@
doc.add(new TextField("Denmark", denmarkAnalyzer.tokenStream("Denmark", new StringReader(sortData[i][5]))));
writer.addDocument(doc);
}
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
IndexSearcher searcher = new IndexSearcher(indexStore, true);
Index: lucene/src/test-framework/java/org/apache/lucene/index/RandomIndexWriter.java
===================================================================
--- lucene/src/test-framework/java/org/apache/lucene/index/RandomIndexWriter.java (revision 1200448)
+++ lucene/src/test-framework/java/org/apache/lucene/index/RandomIndexWriter.java (working copy)
@@ -38,7 +38,7 @@
/** Silly class that randomizes the indexing experience. EG
* it may swap in a different merge policy/scheduler; may
- * commit periodically; may or may not optimize in the end,
+ * commit periodically; may or may not forceMerge in the end,
* may flush by doc count instead of RAM, etc.
*/
@@ -323,8 +323,8 @@
return getReader(true);
}
- private boolean doRandomOptimize = true;
- private boolean doRandomOptimizeAssert = true;
+ private boolean doRandomForceMerge = true;
+ private boolean doRandomForceMergeAssert = true;
public void expungeDeletes(boolean doWait) throws IOException {
w.expungeDeletes(doWait);
@@ -334,25 +334,25 @@
w.expungeDeletes();
}
- public void setDoRandomOptimize(boolean v) {
- doRandomOptimize = v;
+ public void setDoRandomForceMerge(boolean v) {
+ doRandomForceMerge = v;
}
- public void setDoRandomOptimizeAssert(boolean v) {
- doRandomOptimizeAssert = v;
+ public void setDoRandomForceMergeAssert(boolean v) {
+ doRandomForceMergeAssert = v;
}
- private void doRandomOptimize() throws IOException {
- if (doRandomOptimize) {
+ private void doRandomForceMerge() throws IOException {
+ if (doRandomForceMerge) {
final int segCount = w.getSegmentCount();
if (r.nextBoolean() || segCount == 0) {
- // full optimize
- w.optimize();
+ // full forceMerge
+ w.forceMerge(1);
} else {
- // partial optimize
+ // partial forceMerge
final int limit = _TestUtil.nextInt(r, 1, segCount);
- w.optimize(limit);
- assert !doRandomOptimizeAssert || w.getSegmentCount() <= limit: "limit=" + limit + " actual=" + w.getSegmentCount();
+ w.forceMerge(limit);
+ assert !doRandomForceMergeAssert || w.getSegmentCount() <= limit: "limit=" + limit + " actual=" + w.getSegmentCount();
}
}
switchDoDocValues();
@@ -361,7 +361,7 @@
public IndexReader getReader(boolean applyDeletions) throws IOException {
getReaderCalled = true;
if (r.nextInt(4) == 2) {
- doRandomOptimize();
+ doRandomForceMerge();
}
// If we are writing with PreFlexRW, force a full
// IndexReader.open so terms are sorted in codepoint
@@ -394,21 +394,21 @@
*/
public void close() throws IOException {
// if someone isn't using getReader() API, we want to be sure to
- // maybeOptimize since presumably they might open a reader on the dir.
+ // forceMerge since presumably they might open a reader on the dir.
if (getReaderCalled == false && r.nextInt(8) == 2) {
- doRandomOptimize();
+ doRandomForceMerge();
}
w.close();
}
/**
- * Forces an optimize.
+ * Forces a forceMerge.
*
* NOTE: this should be avoided in tests unless absolutely necessary,
* as it will result in less test coverage.
- * @see IndexWriter#optimize()
+ * @see IndexWriter#forceMerge(int)
*/
- public void optimize() throws IOException {
- w.optimize();
+ public void forceMerge(int maxSegmentCount) throws IOException {
+ w.forceMerge(maxSegmentCount);
}
}
Index: lucene/src/test-framework/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java
===================================================================
--- lucene/src/test-framework/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java (revision 1200448)
+++ lucene/src/test-framework/java/org/apache/lucene/index/ThreadedIndexingAndSearchingTestCase.java (working copy)
@@ -54,7 +54,7 @@
import org.apache.lucene.util._TestUtil;
// TODO
-// - mix in optimize, addIndexes
+// - mix in forceMerge, addIndexes
// - randomly mix in non-congruent docs
/** Utility class that spawns multiple indexing and
Index: lucene/src/test-framework/java/org/apache/lucene/index/MockRandomMergePolicy.java
===================================================================
--- lucene/src/test-framework/java/org/apache/lucene/index/MockRandomMergePolicy.java (revision 1200448)
+++ lucene/src/test-framework/java/org/apache/lucene/index/MockRandomMergePolicy.java (working copy)
@@ -55,18 +55,18 @@
}
@Override
- public MergeSpecification findMergesForOptimize(
- SegmentInfos segmentInfos, int maxSegmentCount, Map segmentsToOptimize)
+ public MergeSpecification findForcedMerges(
+ SegmentInfos segmentInfos, int maxSegmentCount, Map segmentsToMerge)
throws CorruptIndexException, IOException {
final List eligibleSegments = new ArrayList();
for(SegmentInfo info : segmentInfos) {
- if (segmentsToOptimize.containsKey(info)) {
+ if (segmentsToMerge.containsKey(info)) {
eligibleSegments.add(info);
}
}
- //System.out.println("MRMP: findMergesForOptimize sis=" + segmentInfos + " eligible=" + eligibleSegments);
+ //System.out.println("MRMP: findMerges sis=" + segmentInfos + " eligible=" + eligibleSegments);
MergeSpecification mergeSpec = null;
if (eligibleSegments.size() > 1 || (eligibleSegments.size() == 1 && eligibleSegments.get(0).hasDeletions())) {
mergeSpec = new MergeSpecification();
@@ -85,7 +85,7 @@
if (mergeSpec != null) {
for(OneMerge merge : mergeSpec.merges) {
for(SegmentInfo info : merge.segments) {
- assert segmentsToOptimize.containsKey(info);
+ assert segmentsToMerge.containsKey(info);
}
}
}
Index: lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java
===================================================================
--- lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java (revision 1200448)
+++ lucene/src/test-framework/java/org/apache/lucene/util/LuceneTestCase.java (working copy)
@@ -32,7 +32,6 @@
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
-import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.lucene.analysis.Analyzer;
@@ -41,17 +40,8 @@
import org.apache.lucene.index.*;
import org.apache.lucene.index.codecs.Codec;
import org.apache.lucene.index.codecs.PostingsFormat;
-import org.apache.lucene.index.codecs.lucene3x.Lucene3xCodec;
-import org.apache.lucene.index.codecs.lucene3x.Lucene3xPostingsFormat;
import org.apache.lucene.index.codecs.lucene40.Lucene40Codec;
-import org.apache.lucene.index.codecs.mockintblock.MockFixedIntBlockPostingsFormat;
-import org.apache.lucene.index.codecs.mockintblock.MockVariableIntBlockPostingsFormat;
-import org.apache.lucene.index.codecs.mocksep.MockSepPostingsFormat;
-import org.apache.lucene.index.codecs.mockrandom.MockRandomPostingsFormat;
-import org.apache.lucene.index.codecs.perfield.PerFieldPostingsFormat;
import org.apache.lucene.index.codecs.preflexrw.PreFlexRWCodec;
-import org.apache.lucene.index.codecs.preflexrw.PreFlexRWPostingsFormat;
-import org.apache.lucene.index.codecs.pulsing.PulsingPostingsFormat;
import org.apache.lucene.index.codecs.simpletext.SimpleTextCodec;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.FieldCache;
@@ -1329,7 +1319,7 @@
context = IOContext.READONCE;
break;
case 3:
- context = new IOContext(new MergeInfo(randomNumDocs, size, true, false));
+ context = new IOContext(new MergeInfo(randomNumDocs, size, true, -1));
break;
case 4:
context = new IOContext(new FlushInfo(randomNumDocs, size));
Index: lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java
===================================================================
--- lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java (revision 1200448)
+++ lucene/contrib/demo/src/java/org/apache/lucene/demo/IndexFiles.java (working copy)
@@ -109,12 +109,12 @@
indexDocs(writer, docDir);
// NOTE: if you want to maximize search performance,
- // you can optionally call optimize here. This can be
- // a costly operation, so generally it's only worth
- // it when your index is relatively static (ie you're
- // done adding documents to it):
+ // you can optionally call forceMerge here. This can be
+ // a terribly costly operation, so generally it's only
+ // worth it when your index is relatively static (ie
+ // you're done adding documents to it):
//
- // writer.optimize();
+ // writer.forceMerge(1);
writer.close();
Index: lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestMultiSegmentReaderOnConstructor.java
===================================================================
--- lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestMultiSegmentReaderOnConstructor.java (revision 0)
+++ lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestMultiSegmentReaderOnConstructor.java (working copy)
@@ -0,0 +1,72 @@
+package org.apache.lucene.store.instantiated;
+
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.IndexWriterConfig.OpenMode;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.TextField;
+
+/**
+ * @since 2009-mar-30 13:15:49
+ */
+public class TestMultiSegmentReaderOnConstructor extends LuceneTestCase {
+
+ public void test() throws Exception {
+ Directory dir = newDirectory();
+ IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
+ addDocument(iw, "Hello, world!");
+ addDocument(iw, "All work and no play makes jack a dull boy");
+ iw.close();
+
+ iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+ addDocument(iw, "Hello, tellus!");
+ addDocument(iw, "All work and no play makes danny a dull boy");
+ iw.close();
+
+ iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
+ addDocument(iw, "Hello, earth!");
+ addDocument(iw, "All work and no play makes wendy a dull girl");
+ iw.close();
+
+ IndexReader multiSegReader = IndexReader.open(dir, false);
+ multiSegReader.deleteDocument(2);
+
+ try {
+ new InstantiatedIndex(multiSegReader);
+ } catch (Exception e) {
+ e.printStackTrace(System.out);
+ fail("No exceptions when loading a multi-seg reader!");
+ }
+
+ // todo some assertations.
+ multiSegReader.close();
+ dir.close();
+ }
+
+ private void addDocument(IndexWriter iw, String text) throws IOException {
+ Document doc = new Document();
+ doc.add(new TextField("field", text));
+ iw.addDocument(doc);
+ }
+}
Property changes on: lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestMultiSegmentReaderOnConstructor.java
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
Index: lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java
===================================================================
--- lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java (revision 1200448)
+++ lucene/contrib/instantiated/src/test/org/apache/lucene/store/instantiated/TestUnoptimizedReaderOnConstructor.java (working copy)
@@ -1,72 +0,0 @@
-package org.apache.lucene.store.instantiated;
-
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-import java.io.IOException;
-
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.IndexWriter;
-import org.apache.lucene.index.IndexWriterConfig;
-import org.apache.lucene.index.IndexWriterConfig.OpenMode;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase;
-import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.TextField;
-
-/**
- * @since 2009-mar-30 13:15:49
- */
-public class TestUnoptimizedReaderOnConstructor extends LuceneTestCase {
-
- public void test() throws Exception {
- Directory dir = newDirectory();
- IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
- addDocument(iw, "Hello, world!");
- addDocument(iw, "All work and no play makes jack a dull boy");
- iw.close();
-
- iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
- addDocument(iw, "Hello, tellus!");
- addDocument(iw, "All work and no play makes danny a dull boy");
- iw.close();
-
- iw = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.APPEND));
- addDocument(iw, "Hello, earth!");
- addDocument(iw, "All work and no play makes wendy a dull girl");
- iw.close();
-
- IndexReader unoptimizedReader = IndexReader.open(dir, false);
- unoptimizedReader.deleteDocument(2);
-
- try {
- new InstantiatedIndex(unoptimizedReader);
- } catch (Exception e) {
- e.printStackTrace(System.out);
- fail("No exceptions when loading an unoptimized reader!");
- }
-
- // todo some assertations.
- unoptimizedReader.close();
- dir.close();
- }
-
- private void addDocument(IndexWriter iw, String text) throws IOException {
- Document doc = new Document();
- doc.add(new TextField("field", text));
- iw.addDocument(doc);
- }
-}
Index: lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java
===================================================================
--- lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (revision 1200448)
+++ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndexReader.java (working copy)
@@ -56,14 +56,6 @@
}
/**
- * @return always true.
- */
- @Override
- public boolean isOptimized() {
- return true;
- }
-
- /**
* An InstantiatedIndexReader is not a snapshot in time, it is completely in
* sync with the latest commit to the store!
*
Index: lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java
===================================================================
--- lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java (revision 1200448)
+++ lucene/contrib/instantiated/src/java/org/apache/lucene/store/instantiated/InstantiatedIndex.java (working copy)
@@ -96,7 +96,7 @@
* Creates a new instantiated index that looks just like the index in a specific state as represented by a reader.
*
* @param sourceIndexReader the source index this new instantiated index will be copied from.
- * @throws IOException if the source index is not optimized, or when accessing the source.
+ * @throws IOException if the source index is not single-segment, or when accessing the source.
*/
public InstantiatedIndex(IndexReader sourceIndexReader) throws IOException {
this(sourceIndexReader, null);
@@ -109,13 +109,13 @@
*
* @param sourceIndexReader the source index this new instantiated index will be copied from.
* @param fields fields to be added, or null for all
- * @throws IOException if the source index is not optimized, or when accessing the source.
+ * @throws IOException if the source index is not single-segment, or when accessing the source.
*/
public InstantiatedIndex(IndexReader sourceIndexReader, Set fields) throws IOException {
- if (!sourceIndexReader.isOptimized()) {
- System.out.println(("Source index is not optimized."));
- //throw new IOException("Source index is not optimized.");
+ if (sourceIndexReader.getSequentialSubReaders().length != 1) {
+ System.out.println(("Source index has more than one segment."));
+ //throw new IOException("Source index has more than one segment.");
}
Index: lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java
===================================================================
--- lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java (revision 1200448)
+++ lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java (working copy)
@@ -121,7 +121,7 @@
writer.addDocument(doc);
writer.commit();
writer.addDocument(doc);
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
IndexReader reader = IndexReader.open(dir, null, true, 1);
assertEquals(2, reader.numDocs());
Index: lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java
===================================================================
--- lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java (revision 1200448)
+++ lucene/contrib/misc/src/test/org/apache/lucene/index/TestIndexSplitter.java (working copy)
@@ -94,7 +94,7 @@
fsDir.close();
}
- public void testDeleteThenOptimize() throws Exception {
+ public void testDeleteThenFullMerge() throws Exception {
// Create directories where the indexes will reside
File indexPath = new File(TEMP_DIR, "testfilesplitter");
_TestUtil.rmDir(indexPath);
@@ -134,7 +134,7 @@
indexReader.close();
fsDirDest.close();
- // Optimize the split index
+ // Fully merge the split index
mergePolicy = new LogByteSizeMergePolicy();
mergePolicy.setNoCFSRatio(1);
iwConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random))
@@ -142,7 +142,7 @@
.setMergePolicy(mergePolicy);
fsDirDest = newFSDirectory(indexSplitPath);
indexWriter = new IndexWriter(fsDirDest, iwConfig);
- indexWriter.optimize();
+ indexWriter.forceMerge(1);
indexWriter.close();
fsDirDest.close();
Index: lucene/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java
===================================================================
--- lucene/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java (revision 1200448)
+++ lucene/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java (working copy)
@@ -105,22 +105,22 @@
}
@Override
- public MergeSpecification findMergesForOptimize(SegmentInfos infos, int maxNumSegments, Map segmentsToOptimize) throws IOException {
+ public MergeSpecification findForcedMerges(SegmentInfos infos, int maxNumSegments, Map segmentsToMerge) throws IOException {
assert maxNumSegments > 0;
MergeSpecification spec = null;
- if (!isOptimized(infos, maxNumSegments, segmentsToOptimize)) {
+ if (!isMerged(infos, maxNumSegments, segmentsToMerge)) {
// Find the newest (rightmost) segment that needs to
- // be optimized (other segments may have been flushed
- // since optimize started):
+ // be merged (other segments may have been flushed
+ // since the merge started):
int last = infos.size();
while(last > 0) {
final SegmentInfo info = infos.info(--last);
- if (segmentsToOptimize.containsKey(info)) {
+ if (segmentsToMerge.containsKey(info)) {
last++;
break;
}
@@ -130,9 +130,9 @@
if (maxNumSegments == 1) {
- // Since we must optimize down to 1 segment, the
+ // Since we must merge down to 1 segment, the
// choice is simple:
- if (last > 1 || !isOptimized(infos.info(0))) {
+ if (last > 1 || !isMerged(infos.info(0))) {
spec = new MergeSpecification();
spec.add(new OneMerge(infos.asList().subList(0, last)));
Index: lucene/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java
===================================================================
--- lucene/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java (revision 1200448)
+++ lucene/contrib/misc/src/java/org/apache/lucene/index/IndexSplitter.java (working copy)
@@ -36,8 +36,8 @@
*
* This tool does file-level copying of segments files.
* This means it's unable to split apart a single segment
- * into multiple segments. For example if your index is
- * optimized, this tool won't help. Also, it does basic
+ * into multiple segments. For example if your index is a
+ * single segment, this tool won't help. Also, it does basic
* file-level copying (using simple
* File{In,Out}putStream) so it will not work with non
* FSDirectory Directory impls.
Index: lucene/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java
===================================================================
--- lucene/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java (revision 1200448)
+++ lucene/contrib/misc/src/java/org/apache/lucene/misc/IndexMergeTool.java (working copy)
@@ -50,8 +50,8 @@
System.out.println("Merging...");
writer.addIndexes(indexes);
- System.out.println("Optimizing...");
- writer.optimize();
+ System.out.println("Full merge...");
+ writer.forceMerge(1);
writer.close();
System.out.println("Done.");
}
Index: lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java
===================================================================
--- lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (revision 1200448)
+++ lucene/contrib/spatial/src/test/org/apache/lucene/spatial/tier/TestCartesian.java (working copy)
@@ -141,7 +141,7 @@
writer.commit();
// TODO: fix CustomScoreQuery usage in testRange/testGeoHashRange so we don't need this.
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
}
Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
===================================================================
--- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (revision 1200448)
+++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java (working copy)
@@ -1635,7 +1635,7 @@
writer.addDocument( doc( "t_text1", "more random words for second field del" ) );
writer.addDocument( doc( "t_text1", "random words for highlighting tests del" ) );
writer.addDocument( doc( "t_text1", "more random words for second field" ) );
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
}
@@ -1643,7 +1643,7 @@
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)).setOpenMode(OpenMode.APPEND));
writer.deleteDocuments( new Term( "t_text1", "del" ) );
// To see negative idf, keep comment the following line
- //writer.optimize();
+ //writer.forceMerge(1);
writer.close();
}
@@ -1759,7 +1759,7 @@
doc = new Document();
doc.add(nfield);
writer.addDocument(doc, analyzer);
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
reader = IndexReader.open(ramDir, true);
numHighlights = 0;
Index: lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java
===================================================================
--- lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java (revision 1200448)
+++ lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/DuplicateFilterTest.java (working copy)
@@ -57,7 +57,7 @@
// Until we fix LUCENE-2348, the index must
// have only 1 segment:
- writer.optimize();
+ writer.forceMerge(1);
reader = writer.getReader();
writer.close();
Index: lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java
===================================================================
--- lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java (revision 1200448)
+++ lucene/contrib/sandbox/src/test/org/apache/lucene/sandbox/queries/regex/TestSpanRegexQuery.java (working copy)
@@ -68,7 +68,7 @@
doc = new Document();
doc.add(newField("field", "first auto update", TextField.TYPE_UNSTORED));
writer.addDocument(doc);
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
IndexSearcher searcher = new IndexSearcher(directory, true);
@@ -98,14 +98,14 @@
IndexWriter writerA = new IndexWriter(indexStoreA, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
writerA.addDocument(lDoc);
- writerA.optimize();
+ writerA.forceMerge(1);
writerA.close();
// creating second index writer
IndexWriter writerB = new IndexWriter(indexStoreB, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random)).setOpenMode(OpenMode.CREATE));
writerB.addDocument(lDoc2);
- writerB.optimize();
+ writerB.forceMerge(1);
writerB.close();
}
}
Index: solr/core/src/test/org/apache/solr/core/TestSolrDeletionPolicy1.java
===================================================================
--- solr/core/src/test/org/apache/solr/core/TestSolrDeletionPolicy1.java (revision 1200448)
+++ solr/core/src/test/org/apache/solr/core/TestSolrDeletionPolicy1.java (working copy)
@@ -95,7 +95,7 @@
for (Long version : commits.keySet()) {
if (commits.get(version) == latest)
continue;
- assertTrue(commits.get(version).isOptimized());
+ assertEquals(1, commits.get(version).getSegmentCount());
}
}
Index: solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java
===================================================================
--- solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java (revision 1200448)
+++ solr/core/src/test/org/apache/solr/search/TestRealTimeGet.java (working copy)
@@ -31,7 +31,6 @@
import org.apache.solr.request.SolrQueryRequest;
import org.junit.BeforeClass;
import org.junit.Test;
-import org.junit.Ignore;
import java.io.IOException;
import java.util.*;
@@ -448,7 +447,7 @@
Directory dir = newDirectory();
final RandomIndexWriter writer = new RandomIndexWriter(random, dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)));
- writer.setDoRandomOptimizeAssert(false);
+ writer.setDoRandomForceMergeAssert(false);
// writer.commit();
// reader = IndexReader.open(dir);
Index: solr/core/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java
===================================================================
--- solr/core/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java (revision 1200448)
+++ solr/core/src/test/org/apache/solr/spelling/IndexBasedSpellCheckerTest.java (working copy)
@@ -293,7 +293,7 @@
doc.add(new Field("title", ALT_DOCS[i], TextField.TYPE_STORED));
iw.addDocument(doc);
}
- iw.optimize();
+ iw.forceMerge(1);
iw.close();
dir.close();
indexDir.mkdirs();
Index: solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
===================================================================
--- solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java (revision 1200448)
+++ solr/core/src/java/org/apache/solr/update/DirectUpdateHandler2.java (working copy)
@@ -24,7 +24,6 @@
import java.net.URL;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
-import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
@@ -312,7 +311,7 @@
log.info("start "+cmd);
if (cmd.optimize) {
- writer.optimize(cmd.maxOptimizeSegments);
+ writer.forceMerge(cmd.maxOptimizeSegments);
} else if (cmd.expungeDeletes) {
writer.expungeDeletes();
}
Index: solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
===================================================================
--- solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java (revision 1200448)
+++ solr/core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java (working copy)
@@ -173,8 +173,8 @@
}
@Override
- public boolean isOptimized() {
- return delegate.isOptimized();
+ public int getSegmentCount() {
+ return delegate.getSegmentCount();
}
@Override
Index: solr/core/src/java/org/apache/solr/core/SolrDeletionPolicy.java
===================================================================
--- solr/core/src/java/org/apache/solr/core/SolrDeletionPolicy.java (revision 1200448)
+++ solr/core/src/java/org/apache/solr/core/SolrDeletionPolicy.java (working copy)
@@ -135,7 +135,7 @@
IndexCommit newest = commits.get(commits.size() - 1);
log.info("newest commit = " + newest.getVersion());
- int optimizedKept = newest.isOptimized() ? 1 : 0;
+ int singleSegKept = (newest.getSegmentCount() == 1) ? 1 : 0;
int totalKept = 1;
// work our way from newest to oldest, skipping the first since we always want to keep it.
@@ -158,9 +158,9 @@
log.warn("Exception while checking commit point's age for deletion", e);
}
- if (optimizedKept < maxOptimizedCommitsToKeep && commit.isOptimized()) {
+ if (singleSegKept < maxOptimizedCommitsToKeep && commit.getSegmentCount() == 1) {
totalKept++;
- optimizedKept++;
+ singleSegKept++;
continue;
}
Index: solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
===================================================================
--- solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java (revision 1200448)
+++ solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java (working copy)
@@ -758,7 +758,7 @@
void refreshCommitpoint() {
IndexCommit commitPoint = core.getDeletionPolicy().getLatestCommit();
- if(replicateOnCommit || (replicateOnOptimize && commitPoint.isOptimized())) {
+ if(replicateOnCommit || (replicateOnOptimize && commitPoint.getSegmentCount() == 1)) {
indexCommitPoint = commitPoint;
}
}
@@ -827,7 +827,7 @@
if(replicateOnOptimize){
Collection commits = IndexReader.listCommits(reader.directory());
for (IndexCommit ic : commits) {
- if(ic.isOptimized()){
+ if(ic.getSegmentCount() == 1){
if(indexCommitPoint == null || indexCommitPoint.getVersion() < ic.getVersion()) indexCommitPoint = ic;
}
}
Index: solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
===================================================================
--- solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java (revision 1200448)
+++ solr/core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java (working copy)
@@ -492,7 +492,7 @@
}
indexInfo.add("version", reader.getVersion()); // TODO? Is this different then: IndexReader.getCurrentVersion( dir )?
- indexInfo.add("optimized", reader.isOptimized() );
+ indexInfo.add("segmentCount", reader.getSequentialSubReaders().length);
indexInfo.add("current", reader.isCurrent() );
indexInfo.add("hasDeletions", reader.hasDeletions() );
indexInfo.add("directory", dir );
Index: solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java
===================================================================
--- solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java (revision 1200448)
+++ solr/core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java (working copy)
@@ -22,12 +22,7 @@
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
-import org.apache.lucene.search.spell.DirectSpellChecker;
-import org.apache.lucene.search.spell.JaroWinklerDistance;
-import org.apache.lucene.search.spell.LevensteinDistance;
-import org.apache.lucene.search.spell.StringDistance;
import org.apache.lucene.search.spell.SuggestWord;
-import org.apache.lucene.search.spell.SuggestWordQueue;
import org.apache.solr.client.solrj.response.SpellCheckResponse;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.slf4j.Logger;
@@ -618,7 +613,7 @@
if (buildOnCommit) {
buildSpellIndex(newSearcher);
} else if (buildOnOptimize) {
- if (newSearcher.getIndexReader().isOptimized()) {
+ if (newSearcher.getIndexReader().getSequentialSubReaders().length == 1) {
buildSpellIndex(newSearcher);
} else {
LOG.info("Index is not optimized therefore skipping building spell check index for: " + checker.getDictionaryName());
Index: solr/core/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java
===================================================================
--- solr/core/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java (revision 1200448)
+++ solr/core/src/java/org/apache/solr/spelling/FileBasedSpellChecker.java (working copy)
@@ -108,7 +108,7 @@
d.add(new TextField(WORD_FIELD_NAME, s));
writer.addDocument(d);
}
- writer.optimize();
+ writer.forceMerge(1);
writer.close();
dictionary = new HighFrequencyDictionary(IndexReader.open(ramDir, true),