Index: src/test/org/apache/lucene/index/TestStressIndexing2.java =================================================================== --- src/test/org/apache/lucene/index/TestStressIndexing2.java (revision 0) +++ src/test/org/apache/lucene/index/TestStressIndexing2.java (revision 0) @@ -0,0 +1,385 @@ +package org.apache.lucene.index; + +/** + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.apache.lucene.store.*; +import org.apache.lucene.document.*; +import org.apache.lucene.analysis.*; + +import org.apache.lucene.util.LuceneTestCase; + +import java.util.*; +import java.io.IOException; + +import junit.framework.TestCase; + +public class TestStressIndexing2 extends LuceneTestCase { + static int maxFields=4; + static int bigFieldSize=10; + static boolean sameFieldOrder=false; + static boolean autoCommit=false; + static int mergeFactor=3; + static int maxBufferedDocs=3; + + static Random r = new Random(0); + + + public void testRandom() throws Exception { + Directory dir1 = new RAMDirectory(); + // dir1 = FSDirectory.getDirectory("foofoofoo"); + Directory dir2 = new RAMDirectory(); + // mergeFactor=2; maxBufferedDocs=2; Map docs = indexRandom(1, 3, 2, dir1); + Map docs = indexRandom(10, 100, 100, dir1); + indexSerial(docs, dir2); + + // verifying verify + // verifyEquals(dir1, dir1, "id"); + // verifyEquals(dir2, dir2, "id"); + + verifyEquals(dir1, dir2, "id"); + } + + public void testMultiConfig() throws Exception { + // test lots of smaller different params together + for (int i=0; i<100; i++) { // increase iterations for better testing + sameFieldOrder=r.nextBoolean(); + autoCommit=r.nextBoolean(); + mergeFactor=r.nextInt(3)+2; + maxBufferedDocs=r.nextInt(3)+2; + + int nThreads=r.nextInt(5)+1; + int iter=r.nextInt(10)+1; + int range=r.nextInt(20)+1; + + Directory dir1 = new RAMDirectory(); + Directory dir2 = new RAMDirectory(); + Map docs = indexRandom(nThreads, iter, range, dir1); + indexSerial(docs, dir2); + verifyEquals(dir1, dir2, "id"); + } + } + + + static Term idTerm = new Term("id",""); + IndexingThread[] threads; + static Comparator fieldNameComparator = new Comparator() { + public int compare(Object o1, Object o2) { + return ((Fieldable)o1).name().compareTo(((Fieldable)o2).name()); + } + }; + + // This test avoids using any extra synchronization in the multiple + // indexing threads to test that IndexWriter does correctly synchronize + // everything. + + public Map indexRandom(int nThreads, int iterations, int range, Directory dir) throws IOException, InterruptedException { + IndexWriter w = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); + w.setUseCompoundFile(false); + /*** + w.setMaxMergeDocs(Integer.MAX_VALUE); + w.setMaxFieldLength(10000); + w.setRAMBufferSizeMB(1); + w.setMergeFactor(10); + ***/ + + // force many merges + w.setMergeFactor(mergeFactor); + w.setRAMBufferSizeMB(.1); + w.setMaxBufferedDocs(maxBufferedDocs); + + threads = new IndexingThread[nThreads]; + for (int i=0; i0) break; + if (!termEnum1.next()) break; + } + + // iterate until we get some docs + int len2; + for(;;) { + len2=0; + term2 = termEnum2.term(); + if (term2==null) break; + termDocs2.seek(termEnum2); + while (termDocs2.next()) { + int d2 = termDocs2.doc(); + int f2 = termDocs2.freq(); + info2[len2] = (((long)r2r1[d2])<<32) | f2; + len2++; + } + if (len2>0) break; + if (!termEnum2.next()) break; + } + + if (!hasDeletes) + assertEquals(termEnum1.docFreq(), termEnum2.docFreq()); + + assertEquals(len1, len2); + if (len1==0) break; // no more terms + + assertEquals(term1, term2); + + // sort info2 to get it into ascending docid + Arrays.sort(info2, 0, len2); + + // now compare + for (int i=0; i + Random r; + + public int nextInt(int lim) { + return r.nextInt(lim); + } + + public String getString(int nTokens) { + nTokens = nTokens!=0 ? nTokens : r.nextInt(4)+1; + // avoid StringBuffer because it adds extra synchronization. + char[] arr = new char[nTokens*2]; + for (int i=0; i