diff -r 14ed99c667de lucene/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java --- a/lucene/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/contrib/ant/src/java/org/apache/lucene/ant/IndexTask.java Thu Dec 16 11:51:57 2010 -0500 @@ -288,7 +288,6 @@ create ? OpenMode.CREATE : OpenMode.APPEND); LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); lmp.setUseCompoundFile(useCompoundIndex); - lmp.setUseCompoundDocStore(useCompoundIndex); lmp.setMergeFactor(mergeFactor); IndexWriter writer = new IndexWriter(dir, conf); int totalFiles = 0; diff -r 14ed99c667de lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java --- a/lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/contrib/misc/src/test/org/apache/lucene/index/codecs/appending/TestAppendingCodec.java Thu Dec 16 11:51:57 2010 -0500 @@ -138,7 +138,6 @@ cfg.setCodecProvider(new AppendingCodecProvider()); ((LogMergePolicy)cfg.getMergePolicy()).setUseCompoundFile(false); - ((LogMergePolicy)cfg.getMergePolicy()).setUseCompoundDocStore(false); IndexWriter writer = new IndexWriter(dir, cfg); Document doc = new Document(); doc.add(newField("f", text, Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); diff -r 14ed99c667de lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java --- a/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/contrib/wordnet/src/java/org/apache/lucene/wordnet/Syns2Index.java Thu Dec 16 11:51:57 2010 -0500 @@ -251,7 +251,6 @@ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig( Version.LUCENE_CURRENT, ana).setOpenMode(OpenMode.CREATE)); ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(true); // why? - ((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(true); // why? Iterator i1 = word2Nums.keySet().iterator(); while (i1.hasNext()) // for each word { diff -r 14ed99c667de lucene/src/java/org/apache/lucene/index/DocInverter.java --- a/lucene/src/java/org/apache/lucene/index/DocInverter.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/java/org/apache/lucene/index/DocInverter.java Thu Dec 16 11:51:57 2010 -0500 @@ -76,7 +76,6 @@ @Override public void closeDocStore(SegmentWriteState state) throws IOException { consumer.closeDocStore(state); - endConsumer.closeDocStore(state); } @Override diff -r 14ed99c667de lucene/src/java/org/apache/lucene/index/DocumentsWriter.java --- a/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/java/org/apache/lucene/index/DocumentsWriter.java Thu Dec 16 11:51:57 2010 -0500 @@ -114,12 +114,12 @@ Directory directory; String segment; // Current segment we are working on - private String docStoreSegment; // Current doc-store segment we are writing - private int docStoreOffset; // Current starting doc-store offset of current segment + boolean docStoreEnabled; // nocommit private int nextDocID; // Next docID to be added private int numDocsInRAM; // # docs buffered in RAM - int numDocsInStore; // # docs written to doc stores + + // nocommits everywhere // Max # ThreadState instances; if there are more threads // than this they share ThreadStates @@ -139,8 +139,6 @@ // this, they wait for others to finish first private final int maxThreadStates; - List newFiles; - // Deletes for our still-in-RAM (to be flushed next) segment private SegmentDeletes pendingDeletes = new SegmentDeletes(); @@ -428,92 +426,6 @@ return numDocsInRAM; } - /** Returns the current doc store segment we are writing - * to. */ - synchronized String getDocStoreSegment() { - return docStoreSegment; - } - - /** Returns the doc offset into the shared doc store for - * the current buffered docs. */ - synchronized int getDocStoreOffset() { - return docStoreOffset; - } - - /** Closes the current open doc stores an sets the - * docStoreSegment and docStoreUseCFS on the provided - * SegmentInfo. */ - synchronized void closeDocStore(SegmentWriteState flushState, IndexWriter writer, IndexFileDeleter deleter, SegmentInfo newSegment, MergePolicy mergePolicy, SegmentInfos segmentInfos) throws IOException { - - final boolean isSeparate = numDocsInRAM == 0 || !segment.equals(docStoreSegment); - - assert docStoreSegment != null; - - if (infoStream != null) { - message("closeDocStore: openFiles=" + openFiles + "; segment=" + docStoreSegment + "; docStoreOffset=" + docStoreOffset + "; numDocsInStore=" + numDocsInStore + "; isSeparate=" + isSeparate); - } - - closedFiles.clear(); - consumer.closeDocStore(flushState); - flushState.numDocsInStore = 0; - assert 0 == openFiles.size(); - - if (isSeparate) { - flushState.flushedFiles.clear(); - - if (mergePolicy.useCompoundDocStore(segmentInfos)) { - - final String compoundFileName = IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.COMPOUND_FILE_STORE_EXTENSION); - - if (infoStream != null) { - message("closeDocStore: create compound file " + compoundFileName); - } - - boolean success = false; - try { - - CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, compoundFileName); - for (final String file : closedFiles) { - cfsWriter.addFile(file); - } - - // Perform the merge - cfsWriter.close(); - - success = true; - } finally { - if (!success) { - deleter.deleteFile(compoundFileName); - } - } - - // In case the files we just merged into a CFS were - // not registered w/ IFD: - deleter.deleteNewFiles(closedFiles); - - final int numSegments = segmentInfos.size(); - for(int i=0;i DW -> BD pushDeletes(newSegment, segmentInfos); - docStoreOffset = numDocsInStore; - - return newSegment; + return newSegment; // nocommit } synchronized void close() { @@ -832,17 +738,6 @@ notifyAll(); } - synchronized void initSegmentName(boolean onlyDocStore) { - if (segment == null && (!onlyDocStore || docStoreSegment == null)) { - segment = writer.newSegmentName(); - assert numDocsInRAM == 0; - } - if (docStoreSegment == null) { - docStoreSegment = segment; - assert numDocsInStore == 0; - } - } - /** Returns a free (idle) ThreadState that may be used for * indexing this one document. This call also pauses if a * flush is pending. If delTerm is non-null then we @@ -888,7 +783,11 @@ // Allocate segment name if this is the first doc since // last flush: - initSegmentName(false); + if (segment == null) { + segment = writer.newSegmentName(); + assert numDocsInRAM == 0; + docStoreEnabled = true; + } state.docState.docID = nextDocID++; @@ -896,7 +795,7 @@ pendingDeletes.addTerm(delTerm, state.docState.docID); } - numDocsInRAM++; + // numDocsInRAM++; // nocommit state.isIdle = false; return state; } @@ -1267,7 +1166,7 @@ try { doc.finish(); nextWriteDocID++; - numDocsInStore++; + numDocsInRAM++; // nocommit nextWriteLoc++; assert nextWriteLoc <= waiting.length; if (nextWriteLoc == waiting.length) diff -r 14ed99c667de lucene/src/java/org/apache/lucene/index/IndexWriter.java --- a/lucene/src/java/org/apache/lucene/index/IndexWriter.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/java/org/apache/lucene/index/IndexWriter.java Thu Dec 16 11:51:57 2010 -0500 @@ -346,7 +346,7 @@ // just like we do when loading segments_N IndexReader r; synchronized(this) { - flush(false, true, true); + flush(false, true); r = new DirectoryReader(this, segmentInfos, config.getReaderTermsIndexDivisor(), codecs); if (infoStream != null) { message("return reader version=" + r.getVersion() + " reader=" + r); @@ -1037,7 +1037,7 @@ // Only allow a new merge to be triggered if we are // going to wait for merges: if (!hitOOM) { - flush(waitForMerges, true, true); + flush(waitForMerges, true); } if (waitForMerges) @@ -1264,7 +1264,7 @@ } } if (doFlush) - flush(true, false, false); + flush(true, false); } catch (OutOfMemoryError oom) { handleOOM(oom, "addDocument"); } @@ -1285,7 +1285,7 @@ ensureOpen(); try { if (docWriter.deleteTerm(term, false)) { - flush(true, false, false); + flush(true, false); } } catch (OutOfMemoryError oom) { handleOOM(oom, "deleteDocuments(Term)"); @@ -1309,7 +1309,7 @@ ensureOpen(); try { if (docWriter.deleteTerms(terms)) { - flush(true, false, false); + flush(true, false); } } catch (OutOfMemoryError oom) { handleOOM(oom, "deleteDocuments(Term..)"); @@ -1331,7 +1331,7 @@ ensureOpen(); try { if (docWriter.deleteQuery(query)) { - flush(true, false, false); + flush(true, false); } } catch (OutOfMemoryError oom) { handleOOM(oom, "deleteDocuments(Query)"); @@ -1355,7 +1355,7 @@ ensureOpen(); try { if (docWriter.deleteQueries(queries)) { - flush(true, false, false); + flush(true, false); } } catch (OutOfMemoryError oom) { handleOOM(oom, "deleteDocuments(Query..)"); @@ -1431,7 +1431,7 @@ } } if (doFlush) { - flush(true, false, false); + flush(true, false); } } catch (OutOfMemoryError oom) { handleOOM(oom, "updateDocument"); @@ -1601,7 +1601,7 @@ message("now flush at optimize"); } - flush(true, false, true); + flush(true, true); synchronized(this) { resetMergeExceptions(); @@ -2141,7 +2141,7 @@ try { if (infoStream != null) message("flush at addIndexes(Directory...)"); - flush(false, false, true); + flush(false, true); int docCount = 0; List infos = new ArrayList(); @@ -2337,7 +2337,7 @@ if (infoStream != null) message("prepareCommit: flush"); - flush(true, true, true); + flush(true, true); startCommit(commitUserData); } @@ -2454,12 +2454,9 @@ * to the Directory. * @param triggerMerge if true, we may merge segments (if * deletes or docs were flushed) if necessary - * @param flushDocStores if false we are allowed to keep - * doc stores open to share with the next segment * @param flushDeletes whether pending deletes should also - * be flushed */ - protected final void flush(boolean triggerMerge, boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException { + protected final void flush(boolean triggerMerge, boolean flushDeletes) throws CorruptIndexException, IOException { // NOTE: this method cannot be sync'd because // maybeMerge() in turn calls mergeScheduler.merge which @@ -2470,7 +2467,7 @@ // We can be called during close, when closing==true, so we must pass false to ensureOpen: ensureOpen(false); - if (doFlush(flushDocStores, flushDeletes) && triggerMerge) { + if (doFlush(flushDeletes) && triggerMerge) { maybeMerge(); } } @@ -2478,7 +2475,7 @@ // TODO: this method should not have to be entirely // synchronized, ie, merges should be allowed to commit // even while a flush is happening - private synchronized final boolean doFlush(boolean closeDocStores, boolean applyAllDeletes) throws CorruptIndexException, IOException { + private synchronized boolean doFlush(boolean applyAllDeletes) throws CorruptIndexException, IOException { if (hitOOM) { throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot flush"); @@ -2501,11 +2498,11 @@ try { if (infoStream != null) { - message(" start flush: applyAllDeletes=" + applyAllDeletes + " closeDocStores=" + closeDocStores); + message(" start flush: applyAllDeletes=" + applyAllDeletes); message(" index before flush " + segString()); } - final SegmentInfo newSegment = docWriter.flush(this, closeDocStores, deleter, mergePolicy, segmentInfos); + final SegmentInfo newSegment = docWriter.flush(this, deleter, mergePolicy, segmentInfos); if (newSegment != null) { setDiagnostics(newSegment, "flush"); segmentInfos.add(newSegment); @@ -2916,7 +2913,7 @@ } } - final synchronized private void _mergeInit(MergePolicy.OneMerge merge) throws IOException { + synchronized private void _mergeInit(MergePolicy.OneMerge merge) throws IOException { assert testPoint("startMergeInit"); @@ -2933,133 +2930,33 @@ if (merge.isAborted()) return; - - final SegmentInfos sourceSegments = merge.segments; - final int end = sourceSegments.size(); - - // Check whether this merge will allow us to skip - // merging the doc stores (stored field & vectors). - // This is a very substantial optimization (saves tons - // of IO). - - Directory lastDir = directory; - String lastDocStoreSegment = null; - int next = -1; - - boolean mergeDocStores = false; - boolean doFlushDocStore = false; + boolean hasVectors = false; - final String currentDocStoreSegment = docWriter.getDocStoreSegment(); - - // Test each segment to be merged: check if we need to - // flush/merge doc stores - for (int i = 0; i < end; i++) { - SegmentInfo si = sourceSegments.info(i); - - // If it has deletions we must merge the doc stores - if (si.hasDeletions()) - mergeDocStores = true; - - if (si.getHasVectors()) { + + for (SegmentInfo sourceSegment : merge.segments) { + if (sourceSegment.getHasVectors()) { hasVectors = true; } - - // If it has its own (private) doc stores we must - // merge the doc stores - if (-1 == si.getDocStoreOffset()) - mergeDocStores = true; - - // If it has a different doc store segment than - // previous segments, we must merge the doc stores - String docStoreSegment = si.getDocStoreSegment(); - if (docStoreSegment == null) - mergeDocStores = true; - else if (lastDocStoreSegment == null) - lastDocStoreSegment = docStoreSegment; - else if (!lastDocStoreSegment.equals(docStoreSegment)) - mergeDocStores = true; - - // Segments' docScoreOffsets must be in-order, - // contiguous. For the default merge policy now - // this will always be the case but for an arbitrary - // merge policy this may not be the case - if (-1 == next) - next = si.getDocStoreOffset() + si.docCount; - else if (next != si.getDocStoreOffset()) - mergeDocStores = true; - else - next = si.getDocStoreOffset() + si.docCount; - - // If the segment comes from a different directory - // we must merge - if (lastDir != si.dir) - mergeDocStores = true; - - // If the segment is referencing the current "live" - // doc store outputs then we must merge - if (si.getDocStoreOffset() != -1 && currentDocStoreSegment != null && si.getDocStoreSegment().equals(currentDocStoreSegment)) { - doFlushDocStore = true; - } } - // if a mergedSegmentWarmer is installed, we must merge - // the doc stores because we will open a full - // SegmentReader on the merged segment: - if (!mergeDocStores && mergedSegmentWarmer != null && currentDocStoreSegment != null && lastDocStoreSegment != null && lastDocStoreSegment.equals(currentDocStoreSegment)) { - mergeDocStores = true; - } - - final int docStoreOffset; - final String docStoreSegment; - final boolean docStoreIsCompoundFile; - - if (mergeDocStores) { - docStoreOffset = -1; - docStoreSegment = null; - docStoreIsCompoundFile = false; - } else { - SegmentInfo si = sourceSegments.info(0); - docStoreOffset = si.getDocStoreOffset(); - docStoreSegment = si.getDocStoreSegment(); - docStoreIsCompoundFile = si.getDocStoreIsCompoundFile(); - } - - if (mergeDocStores && doFlushDocStore) { - // SegmentMerger intends to merge the doc stores - // (stored fields, vectors), and at least one of the - // segments to be merged refers to the currently - // live doc stores. - - // TODO: if we know we are about to merge away these - // newly flushed doc store files then we should not - // make compound file out of them... - if (infoStream != null) - message("now flush at merge"); - doFlush(true, false); - updatePendingMerges(1, false); - } - - merge.mergeDocStores = mergeDocStores; - // Bind a new segment name here so even with // ConcurrentMergePolicy we keep deterministic segment // names. merge.info = new SegmentInfo(newSegmentName(), 0, - directory, false, docStoreOffset, - docStoreSegment, - docStoreIsCompoundFile, + directory, false, -1, + null, + false, false, null, hasVectors); Map details = new HashMap(); details.put("optimize", Boolean.toString(merge.optimize)); - details.put("mergeFactor", Integer.toString(end)); - details.put("mergeDocStores", Boolean.toString(mergeDocStores)); + details.put("mergeFactor", Integer.toString(merge.segments.size())); setDiagnostics(merge.info, "merge", details); if (infoStream != null) { - message("merge seg=" + merge.info.name + " mergeDocStores=" + mergeDocStores); + message("merge seg=" + merge.info.name); } // Also enroll the merged segment into mergingSegments; @@ -3183,8 +3080,6 @@ final String mergedName = merge.info.name; - SegmentMerger merger = null; - int mergedDocCount = 0; SegmentInfos sourceSegments = merge.segments; @@ -3193,20 +3088,11 @@ if (infoStream != null) message("merging " + merge.segString(directory)); - merger = new SegmentMerger(directory, termIndexInterval, mergedName, merge, codecs, payloadProcessorProvider); + SegmentMerger merger = new SegmentMerger(directory, termIndexInterval, mergedName, merge, codecs, payloadProcessorProvider); merge.readers = new SegmentReader[numSegments]; merge.readersClone = new SegmentReader[numSegments]; - boolean mergeDocStores = false; - - final String currentDocStoreSegment; - synchronized(this) { - currentDocStoreSegment = docWriter.getDocStoreSegment(); - } - - boolean currentDSSMerged = false; - // This is try/finally to make sure merger's readers are // closed: boolean success = false; @@ -3218,7 +3104,7 @@ // Hold onto the "live" reader; we will use this to // commit merged deletes - SegmentReader reader = merge.readers[i] = readerPool.get(info, merge.mergeDocStores, + SegmentReader reader = merge.readers[i] = readerPool.get(info, true, // nocommit MERGE_READ_BUFFER_SIZE, -config.getReaderTermsIndexDivisor()); @@ -3228,14 +3114,6 @@ SegmentReader clone = merge.readersClone[i] = (SegmentReader) reader.clone(true); merger.add(clone); - if (clone.hasDeletions()) { - mergeDocStores = true; - } - - if (info.getDocStoreOffset() != -1 && currentDocStoreSegment != null) { - currentDSSMerged |= currentDocStoreSegment.equals(info.getDocStoreSegment()); - } - totDocCount += clone.numDocs(); } @@ -3245,39 +3123,8 @@ merge.checkAborted(directory); - // If deletions have arrived and it has now become - // necessary to merge doc stores, go and open them: - if (mergeDocStores && !merge.mergeDocStores) { - merge.mergeDocStores = true; - synchronized(this) { - - // If 1) we must now merge doc stores, and 2) at - // least one of the segments we are merging uses - // the doc store we are now writing to, we must at - // this point force this doc store closed (by - // calling flush). If we didn't do this then the - // readers will attempt to open an IndexInput - // on files that have still-open IndexOutputs - // against them: - if (currentDSSMerged) { - if (infoStream != null) { - message("now flush at mergeMiddle"); - } - doFlush(true, false); - updatePendingMerges(1, false); - } - } - - for(int i=0;i> threadsAndFields, SegmentWriteState state) throws IOException; - abstract void closeDocStore(SegmentWriteState state) throws IOException; abstract void abort(); abstract void setFieldInfos(FieldInfos fieldInfos); } diff -r 14ed99c667de lucene/src/java/org/apache/lucene/index/LogMergePolicy.java --- a/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/java/org/apache/lucene/index/LogMergePolicy.java Thu Dec 16 11:51:57 2010 -0500 @@ -70,7 +70,6 @@ protected boolean calibrateSizeByDeletes = true; protected boolean useCompoundFile = true; - protected boolean useCompoundDocStore = true; public LogMergePolicy() { super(); @@ -157,27 +156,6 @@ return useCompoundFile; } - // Javadoc inherited - @Override - public boolean useCompoundDocStore(SegmentInfos infos) { - return useCompoundDocStore; - } - - /** Sets whether compound file format should be used for - * newly flushed and newly merged doc store - * segment files (term vectors and stored fields). */ - public void setUseCompoundDocStore(boolean useCompoundDocStore) { - this.useCompoundDocStore = useCompoundDocStore; - } - - /** Returns true if newly flushed and newly merge doc - * store segment files (term vectors and stored fields) - * are written in compound file format. @see - * #setUseCompoundDocStore */ - public boolean getUseCompoundDocStore() { - return useCompoundDocStore; - } - /** Sets whether the segment size should be calibrated by * the number of deletes when choosing segments for merge. */ public void setCalibrateSizeByDeletes(boolean calibrateSizeByDeletes) { @@ -591,8 +569,7 @@ sb.append("maxMergeSize=").append(maxMergeSize).append(", "); sb.append("calibrateSizeByDeletes=").append(calibrateSizeByDeletes).append(", "); sb.append("maxMergeDocs=").append(maxMergeDocs).append(", "); - sb.append("useCompoundFile=").append(useCompoundFile).append(", "); - sb.append("useCompoundDocStore=").append(useCompoundDocStore); + sb.append("useCompoundFile=").append(useCompoundFile); sb.append("]"); return sb.toString(); } diff -r 14ed99c667de lucene/src/java/org/apache/lucene/index/MergePolicy.java --- a/lucene/src/java/org/apache/lucene/index/MergePolicy.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/java/org/apache/lucene/index/MergePolicy.java Thu Dec 16 11:51:57 2010 -0500 @@ -67,7 +67,6 @@ public static class OneMerge { SegmentInfo info; // used by IndexWriter - boolean mergeDocStores; // used by IndexWriter boolean optimize; // used by IndexWriter boolean registerDone; // used by IndexWriter long mergeGen; // used by IndexWriter @@ -153,9 +152,6 @@ b.append(" into ").append(info.name); if (optimize) b.append(" [optimize]"); - if (mergeDocStores) { - b.append(" [mergeDocStores]"); - } if (aborted) { b.append(" [ABORTED]"); } @@ -318,10 +314,4 @@ * Returns true if a new segment (regardless of its origin) should use the compound file format. */ public abstract boolean useCompoundFile(SegmentInfos segments, SegmentInfo newSegment) throws IOException; - - /** - * Returns true if the doc store files should use the - * compound file format. - */ - public abstract boolean useCompoundDocStore(SegmentInfos segments); } diff -r 14ed99c667de lucene/src/java/org/apache/lucene/index/NoMergePolicy.java --- a/lucene/src/java/org/apache/lucene/index/NoMergePolicy.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/java/org/apache/lucene/index/NoMergePolicy.java Thu Dec 16 11:51:57 2010 -0500 @@ -67,9 +67,6 @@ throws CorruptIndexException, IOException { return null; } @Override - public boolean useCompoundDocStore(SegmentInfos segments) { return useCompoundFile; } - - @Override public boolean useCompoundFile(SegmentInfos segments, SegmentInfo newSegment) { return useCompoundFile; } @Override diff -r 14ed99c667de lucene/src/java/org/apache/lucene/index/NormsWriter.java --- a/lucene/src/java/org/apache/lucene/index/NormsWriter.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/java/org/apache/lucene/index/NormsWriter.java Thu Dec 16 11:51:57 2010 -0500 @@ -171,7 +171,4 @@ normsOut.close(); } } - - @Override - void closeDocStore(SegmentWriteState state) {} } diff -r 14ed99c667de lucene/src/java/org/apache/lucene/index/SegmentMerger.java --- a/lucene/src/java/org/apache/lucene/index/SegmentMerger.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/java/org/apache/lucene/index/SegmentMerger.java Thu Dec 16 11:51:57 2010 -0500 @@ -62,12 +62,6 @@ private final CheckAbort checkAbort; - // Whether we should merge doc stores (stored fields and - // vectors files). When all segments we are merging - // already share the same doc store files, we don't need - // to merge the doc stores. - private boolean mergeDocStores; - /** Maximum number of contiguous documents to bulk-copy when merging stored fields */ private final static int MAX_RAW_MERGE_DOCS = 4192; @@ -115,22 +109,6 @@ * @throws IOException if there is a low-level IO error */ final int merge() throws CorruptIndexException, IOException { - return merge(true); - } - - /** - * Merges the readers specified by the {@link #add} method - * into the directory passed to the constructor. - * @param mergeDocStores if false, we will not merge the - * stored fields nor vectors files - * @return The number of documents that were merged - * @throws CorruptIndexException if the index is corrupt - * @throws IOException if there is a low-level IO error - */ - final int merge(boolean mergeDocStores) throws CorruptIndexException, IOException { - - this.mergeDocStores = mergeDocStores; - // NOTE: it's important to add calls to // checkAbort.work(...) if you make any changes to this // method that will spend alot of time. The frequency @@ -142,7 +120,7 @@ mergeTerms(); mergeNorms(); - if (mergeDocStores && fieldInfos.hasVectors()) { + if (fieldInfos.hasVectors()) { mergeVectors(); } @@ -154,9 +132,7 @@ // Basic files for (String ext : IndexFileNames.COMPOUND_EXTENSIONS_NOT_CODEC) { - if (mergeDocStores || (!ext.equals(IndexFileNames.FIELDS_EXTENSION) && - !ext.equals(IndexFileNames.FIELDS_INDEX_EXTENSION))) - fileSet.add(IndexFileNames.segmentFileName(segment, "", ext)); + fileSet.add(IndexFileNames.segmentFileName(segment, "", ext)); } segmentWriteState.segmentCodecs.files(directory, info, fileSet); @@ -172,7 +148,7 @@ } // Vector files - if (fieldInfos.hasVectors() && mergeDocStores) { + if (fieldInfos.hasVectors()) { for (String ext : IndexFileNames.VECTOR_EXTENSIONS) { fileSet.add(IndexFileNames.segmentFileName(segment, "", ext)); } @@ -251,18 +227,8 @@ * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ - private final int mergeFields() throws CorruptIndexException, IOException { - - if (!mergeDocStores) { - // When we are not merging by doc stores, their field - // name -> number mapping are the same. So, we start - // with the fieldInfos of the last segment in this - // case, to keep that numbering. - final SegmentReader sr = (SegmentReader) readers.get(readers.size()-1); - fieldInfos = (FieldInfos) sr.core.fieldInfos.clone(); - } else { - fieldInfos = new FieldInfos();// merge field names - } + private int mergeFields() throws CorruptIndexException, IOException { + fieldInfos = new FieldInfos();// merge field names for (IndexReader reader : readers) { if (reader instanceof SegmentReader) { @@ -294,54 +260,43 @@ setMatchingSegmentReaders(); - if (mergeDocStores) { - // merge field values - final FieldsWriter fieldsWriter = new FieldsWriter(directory, segment, fieldInfos); + final FieldsWriter fieldsWriter = new FieldsWriter(directory, segment, fieldInfos); - try { - int idx = 0; - for (IndexReader reader : readers) { - final SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++]; - FieldsReader matchingFieldsReader = null; - if (matchingSegmentReader != null) { - final FieldsReader fieldsReader = matchingSegmentReader.getFieldsReader(); - if (fieldsReader != null) { - matchingFieldsReader = fieldsReader; - } - } - if (reader.hasDeletions()) { - docCount += copyFieldsWithDeletions(fieldsWriter, - reader, matchingFieldsReader); - } else { - docCount += copyFieldsNoDeletions(fieldsWriter, - reader, matchingFieldsReader); + try { + int idx = 0; + for (IndexReader reader : readers) { + final SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++]; + FieldsReader matchingFieldsReader = null; + if (matchingSegmentReader != null) { + final FieldsReader fieldsReader = matchingSegmentReader.getFieldsReader(); + if (fieldsReader != null) { + matchingFieldsReader = fieldsReader; } } - } finally { - fieldsWriter.close(); + if (reader.hasDeletions()) { + docCount += copyFieldsWithDeletions(fieldsWriter, + reader, matchingFieldsReader); + } else { + docCount += copyFieldsNoDeletions(fieldsWriter, + reader, matchingFieldsReader); + } } - - final String fileName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.FIELDS_INDEX_EXTENSION); - final long fdxFileLength = directory.fileLength(fileName); - - if (4+((long) docCount)*8 != fdxFileLength) - // This is most likely a bug in Sun JRE 1.6.0_04/_05; - // we detect that the bug has struck, here, and - // throw an exception to prevent the corruption from - // entering the index. See LUCENE-1282 for - // details. - throw new RuntimeException("mergeFields produced an invalid result: docCount is " + docCount + " but fdx file size is " + fdxFileLength + " file=" + fileName + " file exists?=" + directory.fileExists(fileName) + "; now aborting this merge to prevent index corruption"); - - } else { - // If we are skipping the doc stores, that means there - // are no deletions in any of these segments, so we - // just sum numDocs() of each segment to get total docCount - for (final IndexReader reader : readers) { - docCount += reader.numDocs(); - } + } finally { + fieldsWriter.close(); } - segmentWriteState = new SegmentWriteState(null, directory, segment, fieldInfos, null, docCount, 0, termIndexInterval, codecInfo); + final String fileName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.FIELDS_INDEX_EXTENSION); + final long fdxFileLength = directory.fileLength(fileName); + + if (4+((long) docCount)*8 != fdxFileLength) + // This is most likely a bug in Sun JRE 1.6.0_04/_05; + // we detect that the bug has struck, here, and + // throw an exception to prevent the corruption from + // entering the index. See LUCENE-1282 for + // details. + throw new RuntimeException("mergeFields produced an invalid result: docCount is " + docCount + " but fdx file size is " + fdxFileLength + " file=" + fileName + " file exists?=" + directory.fileExists(fileName) + "; now aborting this merge to prevent index corruption"); + + segmentWriteState = new SegmentWriteState(null, directory, segment, fieldInfos, docCount, termIndexInterval, codecInfo); return docCount; } diff -r 14ed99c667de lucene/src/java/org/apache/lucene/index/SegmentWriteState.java --- a/lucene/src/java/org/apache/lucene/index/SegmentWriteState.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/java/org/apache/lucene/index/SegmentWriteState.java Thu Dec 16 11:51:57 2010 -0500 @@ -31,9 +31,7 @@ public final Directory directory; public final String segmentName; public final FieldInfos fieldInfos; - public final String docStoreSegmentName; public final int numDocs; - public int numDocsInStore; public boolean hasVectors; public final Collection flushedFiles; @@ -62,15 +60,12 @@ public SegmentWriteState(PrintStream infoStream, Directory directory, String segmentName, FieldInfos fieldInfos, - String docStoreSegmentName, int numDocs, - int numDocsInStore, int termIndexInterval, SegmentCodecs segmentCodecs) { + int numDocs, int termIndexInterval, SegmentCodecs segmentCodecs) { this.infoStream = infoStream; this.directory = directory; this.segmentName = segmentName; this.fieldInfos = fieldInfos; - this.docStoreSegmentName = docStoreSegmentName; this.numDocs = numDocs; - this.numDocsInStore = numDocsInStore; this.termIndexInterval = termIndexInterval; this.segmentCodecs = segmentCodecs; flushedFiles = new HashSet(); @@ -85,9 +80,7 @@ directory = state.directory; segmentName = state.segmentName; fieldInfos = state.fieldInfos; - docStoreSegmentName = state.docStoreSegmentName; numDocs = state.numDocs; - numDocsInStore = state.numDocsInStore; termIndexInterval = state.termIndexInterval; segmentCodecs = state.segmentCodecs; flushedFiles = state.flushedFiles; diff -r 14ed99c667de lucene/src/java/org/apache/lucene/index/StoredFieldsWriter.java --- a/lucene/src/java/org/apache/lucene/index/StoredFieldsWriter.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/java/org/apache/lucene/index/StoredFieldsWriter.java Thu Dec 16 11:51:57 2010 -0500 @@ -29,7 +29,7 @@ final DocumentsWriter docWriter; final FieldInfos fieldInfos; int lastDocID; - private String docStoreSegment; + private String segment; PerDoc[] docFreeList = new PerDoc[1]; int freeCount; @@ -45,7 +45,8 @@ synchronized public void flush(SegmentWriteState state) throws IOException { - if (state.numDocsInStore > 0) { + /* nocommit: numDocs (former numDocsInStore should be zero by now (due to DW.closeDocStore) + if (state.numDocs > 0) { // It's possible that all documents seen in this segment // hit non-aborting exceptions, in which case we will // not have yet init'd the FieldsWriter: @@ -53,51 +54,44 @@ // Fill fdx file to include any final docs that we // skipped because they hit non-aborting exceptions - fill(state.numDocsInStore - docWriter.getDocStoreOffset()); + fill(state.numDocs); } - + */ if (fieldsWriter != null) fieldsWriter.flush(); } private synchronized void initFieldsWriter() throws IOException { - if (fieldsWriter == null) { - docStoreSegment = docWriter.getDocStoreSegment(); - if (docStoreSegment != null) { - fieldsWriter = new FieldsWriter(docWriter.directory, - docStoreSegment, - fieldInfos); - docWriter.addOpenFile(IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.FIELDS_EXTENSION)); - docWriter.addOpenFile(IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.FIELDS_INDEX_EXTENSION)); - lastDocID = 0; - } + if (fieldsWriter == null && docWriter.docStoreEnabled) { + segment = docWriter.getSegment(); + fieldsWriter = new FieldsWriter(docWriter.directory, segment, fieldInfos); // nocommit + docWriter.addOpenFile(IndexFileNames.segmentFileName(segment, "", IndexFileNames.FIELDS_EXTENSION)); + docWriter.addOpenFile(IndexFileNames.segmentFileName(segment, "", IndexFileNames.FIELDS_INDEX_EXTENSION)); + lastDocID = 0; } } synchronized public void closeDocStore(SegmentWriteState state) throws IOException { - final int inc = state.numDocsInStore - lastDocID; + final int inc = state.numDocs - lastDocID; if (inc > 0) { initFieldsWriter(); - fill(state.numDocsInStore - docWriter.getDocStoreOffset()); + fill(state.numDocs); } if (fieldsWriter != null) { fieldsWriter.close(); fieldsWriter = null; - assert docStoreSegment != null; - assert state.docStoreSegmentName != null; - assert docStoreSegment.equals(state.docStoreSegmentName): "fieldsWriter wrote to segment=" + docStoreSegment + " vs SegmentWriteState segment=" + state.docStoreSegmentName; lastDocID = 0; - String fieldsName = IndexFileNames.segmentFileName(state.docStoreSegmentName, "", IndexFileNames.FIELDS_EXTENSION); - String fieldsIdxName = IndexFileNames.segmentFileName(state.docStoreSegmentName, "", IndexFileNames.FIELDS_INDEX_EXTENSION); + String fieldsName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.FIELDS_EXTENSION); + String fieldsIdxName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.FIELDS_INDEX_EXTENSION); state.flushedFiles.add(fieldsName); state.flushedFiles.add(fieldsIdxName); docWriter.removeOpenFile(fieldsName); docWriter.removeOpenFile(fieldsIdxName); - if (4+((long) state.numDocsInStore)*8 != state.directory.fileLength(fieldsIdxName)) - throw new RuntimeException("after flush: fdx size mismatch: " + state.numDocsInStore + " docs vs " + state.directory.fileLength(fieldsIdxName) + " length in bytes of " + fieldsIdxName + " file exists?=" + state.directory.fileExists(fieldsIdxName)); + if (4+((long) state.numDocs)*8 != state.directory.fileLength(fieldsIdxName)) + throw new RuntimeException("after flush: fdx size mismatch: " + state.numDocs + " docs vs " + state.directory.fileLength(fieldsIdxName) + " length in bytes of " + fieldsIdxName + " file exists?=" + state.directory.fileExists(fieldsIdxName)); } } @@ -131,12 +125,9 @@ /** Fills in any hole in the docIDs */ void fill(int docID) throws IOException { - final int docStoreOffset = docWriter.getDocStoreOffset(); - // We must "catch up" for all docs before us // that had no stored fields: - final int end = docID+docStoreOffset; - while(lastDocID < end) { + while(lastDocID < docID) { fieldsWriter.skipDocument(); lastDocID++; } @@ -156,10 +147,6 @@ assert docWriter.writer.testPoint("StoredFieldsWriter.finishDocument end"); } - public boolean freeRAM() { - return false; - } - synchronized void free(PerDoc perDoc) { assert freeCount < docFreeList.length; assert 0 == perDoc.numStoredFields; diff -r 14ed99c667de lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java --- a/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java Thu Dec 16 11:51:57 2010 -0500 @@ -30,7 +30,6 @@ final class TermVectorsTermsWriter extends TermsHashConsumer { final DocumentsWriter docWriter; - TermVectorsWriter termVectorsWriter; PerDoc[] docFreeList = new PerDoc[1]; int freeCount; IndexOutput tvx; @@ -62,10 +61,10 @@ if (tvx != null) { - if (state.numDocsInStore > 0) + if (state.numDocs > 0) // In case there are some final documents that we // didn't see (because they hit a non-aborting exception): - fill(state.numDocsInStore - docWriter.getDocStoreOffset()); + fill(state.numDocs); tvx.flush(); tvd.flush(); @@ -89,18 +88,18 @@ if (tvx != null) { // At least one doc in this run had term vectors // enabled - fill(state.numDocsInStore - docWriter.getDocStoreOffset()); + fill(state.numDocs); tvx.close(); tvf.close(); tvd.close(); tvx = null; - assert state.docStoreSegmentName != null; - String idxName = IndexFileNames.segmentFileName(state.docStoreSegmentName, "", IndexFileNames.VECTORS_INDEX_EXTENSION); - if (4+((long) state.numDocsInStore)*16 != state.directory.fileLength(idxName)) - throw new RuntimeException("after flush: tvx size mismatch: " + state.numDocsInStore + " docs vs " + state.directory.fileLength(idxName) + " length in bytes of " + idxName + " file exists?=" + state.directory.fileExists(idxName)); + assert state.segmentName != null; + String idxName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.VECTORS_INDEX_EXTENSION); + if (4+((long) state.numDocs)*16 != state.directory.fileLength(idxName)) + throw new RuntimeException("after flush: tvx size mismatch: " + state.numDocs + " docs vs " + state.directory.fileLength(idxName) + " length in bytes of " + idxName + " file exists?=" + state.directory.fileExists(idxName)); - String fldName = IndexFileNames.segmentFileName(state.docStoreSegmentName, "", IndexFileNames.VECTORS_FIELDS_EXTENSION); - String docName = IndexFileNames.segmentFileName(state.docStoreSegmentName, "", IndexFileNames.VECTORS_DOCUMENTS_EXTENSION); + String fldName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.VECTORS_FIELDS_EXTENSION); + String docName = IndexFileNames.segmentFileName(state.segmentName, "", IndexFileNames.VECTORS_DOCUMENTS_EXTENSION); state.flushedFiles.add(idxName); state.flushedFiles.add(fldName); state.flushedFiles.add(docName); @@ -135,11 +134,9 @@ /** Fills in no-term-vectors for all docs we haven't seen * since the last doc that had term vectors. */ void fill(int docID) throws IOException { - final int docStoreOffset = docWriter.getDocStoreOffset(); - final int end = docID+docStoreOffset; - if (lastDocID < end) { + if (lastDocID < docID) { final long tvfPosition = tvf.getFilePointer(); - while(lastDocID < end) { + while(lastDocID < docID) { tvx.writeLong(tvd.getFilePointer()); tvd.writeVInt(0); tvx.writeLong(tvfPosition); @@ -149,20 +146,18 @@ } synchronized void initTermVectorsWriter() throws IOException { - if (tvx == null) { + if (tvx == null && docWriter.docStoreEnabled) { + String segment = docWriter.getSegment(); - final String docStoreSegment = docWriter.getDocStoreSegment(); - - if (docStoreSegment == null) - return; + // nocommit // If we hit an exception while init'ing the term // vector output files, we must abort this segment // because those files will be in an unknown // state: - String idxName = IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.VECTORS_INDEX_EXTENSION); - String docName = IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.VECTORS_DOCUMENTS_EXTENSION); - String fldName = IndexFileNames.segmentFileName(docStoreSegment, "", IndexFileNames.VECTORS_FIELDS_EXTENSION); + String idxName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.VECTORS_INDEX_EXTENSION); + String docName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.VECTORS_DOCUMENTS_EXTENSION); + String fldName = IndexFileNames.segmentFileName(segment, "", IndexFileNames.VECTORS_FIELDS_EXTENSION); hasVectors = true; tvx = docWriter.directory.createOutput(idxName); tvd = docWriter.directory.createOutput(docName); @@ -206,7 +201,7 @@ perDoc.numVectorFields = 0; } - assert lastDocID == perDoc.docID + docWriter.getDocStoreOffset(); + assert lastDocID == perDoc.docID; lastDocID++; @@ -215,12 +210,6 @@ assert docWriter.writer.testPoint("TermVectorsTermsWriter.finishDocument end"); } - public boolean freeRAM() { - // We don't hold any state beyond one doc, so we don't - // free persistent RAM here - return false; - } - @Override public void abort() { hasVectors = false; diff -r 14ed99c667de lucene/src/test/org/apache/lucene/TestSearch.java --- a/lucene/src/test/org/apache/lucene/TestSearch.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/test/org/apache/lucene/TestSearch.java Thu Dec 16 11:51:57 2010 -0500 @@ -77,7 +77,6 @@ IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer); LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); lmp.setUseCompoundFile(useCompoundFile); - lmp.setUseCompoundDocStore(useCompoundFile); IndexWriter writer = new IndexWriter(directory, conf); String[] docs = { diff -r 14ed99c667de lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java --- a/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/test/org/apache/lucene/TestSearchForDuplicates.java Thu Dec 16 11:51:57 2010 -0500 @@ -82,7 +82,6 @@ IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer); LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); lmp.setUseCompoundFile(useCompoundFiles); - lmp.setUseCompoundDocStore(useCompoundFiles); IndexWriter writer = new IndexWriter(directory, conf); if (VERBOSE) { System.out.println("TEST: now build index"); diff -r 14ed99c667de lucene/src/test/org/apache/lucene/index/TestAddIndexes.java --- a/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/test/org/apache/lucene/index/TestAddIndexes.java Thu Dec 16 11:51:57 2010 -0500 @@ -570,7 +570,6 @@ Directory dir = newDirectory(); LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy(); lmp.setUseCompoundFile(false); - lmp.setUseCompoundDocStore(false); lmp.setMergeFactor(100); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()) @@ -599,7 +598,6 @@ lmp = new LogByteSizeMergePolicy(); lmp.setMinMergeMB(0.0001); lmp.setUseCompoundFile(false); - lmp.setUseCompoundDocStore(false); lmp.setMergeFactor(4); writer = new IndexWriter(dir2, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()) diff -r 14ed99c667de lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java --- a/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/test/org/apache/lucene/index/TestBackwardsCompatibility.java Thu Dec 16 11:51:57 2010 -0500 @@ -496,7 +496,6 @@ Directory dir = newFSDirectory(new File(dirName)); IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10); ((LogMergePolicy) conf.getMergePolicy()).setUseCompoundFile(doCFS); - ((LogMergePolicy) conf.getMergePolicy()).setUseCompoundDocStore(doCFS); IndexWriter writer = new IndexWriter(dir, conf); for(int i=0;i<35;i++) { @@ -508,7 +507,6 @@ // open fresh writer so we get no prx file in the added segment conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10); ((LogMergePolicy) conf.getMergePolicy()).setUseCompoundFile(doCFS); - ((LogMergePolicy) conf.getMergePolicy()).setUseCompoundDocStore(doCFS); writer = new IndexWriter(dir, conf); addNoProxDoc(writer); writer.close(); diff -r 14ed99c667de lucene/src/test/org/apache/lucene/index/TestCodecs.java --- a/lucene/src/test/org/apache/lucene/index/TestCodecs.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/test/org/apache/lucene/index/TestCodecs.java Thu Dec 16 11:51:57 2010 -0500 @@ -607,7 +607,7 @@ final int termIndexInterval = this.nextInt(13, 27); final SegmentCodecs codecInfo = SegmentCodecs.build(fieldInfos, CodecProvider.getDefault()); - final SegmentWriteState state = new SegmentWriteState(null, dir, SEGMENT, fieldInfos, null, 10000, 10000, termIndexInterval, codecInfo); + final SegmentWriteState state = new SegmentWriteState(null, dir, SEGMENT, fieldInfos, 10000, termIndexInterval, codecInfo); final FieldsConsumer consumer = state.segmentCodecs.codec().fieldsConsumer(state); Arrays.sort(fields); diff -r 14ed99c667de lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java --- a/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/test/org/apache/lucene/index/TestConcurrentMergeScheduler.java Thu Dec 16 11:51:57 2010 -0500 @@ -96,7 +96,7 @@ writer.addDocument(doc); failure.setDoFail(); try { - writer.flush(true, false, true); + writer.flush(true, true); if (failure.hitExc) { fail("failed to hit IOException"); } diff -r 14ed99c667de lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java --- a/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/test/org/apache/lucene/index/TestDeletionPolicy.java Thu Dec 16 11:51:57 2010 -0500 @@ -195,16 +195,13 @@ final double SECONDS = 2.0; - boolean useCompoundFile = true; - Directory dir = newDirectory(); ExpirationTimeDeletionPolicy policy = new ExpirationTimeDeletionPolicy(dir, SECONDS); IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()) .setIndexDeletionPolicy(policy); LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); - lmp.setUseCompoundFile(useCompoundFile); - lmp.setUseCompoundDocStore(useCompoundFile); + lmp.setUseCompoundFile(true); IndexWriter writer = new IndexWriter(dir, conf); writer.close(); @@ -219,8 +216,7 @@ new MockAnalyzer()).setOpenMode( OpenMode.APPEND).setIndexDeletionPolicy(policy); lmp = (LogMergePolicy) conf.getMergePolicy(); - lmp.setUseCompoundFile(useCompoundFile); - lmp.setUseCompoundDocStore(useCompoundFile); + lmp.setUseCompoundFile(true); writer = new IndexWriter(dir, conf); for(int j=0;j<17;j++) { addDoc(writer); @@ -298,7 +294,6 @@ .setMergeScheduler(new SerialMergeScheduler()); LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); lmp.setUseCompoundFile(useCompoundFile); - lmp.setUseCompoundDocStore(useCompoundFile); lmp.setMergeFactor(10); IndexWriter writer = new IndexWriter(dir, conf); for(int i=0;i<107;i++) { @@ -311,7 +306,6 @@ OpenMode.APPEND).setIndexDeletionPolicy(policy); lmp = (LogMergePolicy) conf.getMergePolicy(); lmp.setUseCompoundFile(useCompoundFile); - lmp.setUseCompoundDocStore(useCompoundFile); writer = new IndexWriter(dir, conf); writer.optimize(); writer.close(); @@ -488,7 +482,6 @@ .setMaxBufferedDocs(10); LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); lmp.setUseCompoundFile(useCompoundFile); - lmp.setUseCompoundDocStore(useCompoundFile); IndexWriter writer = new IndexWriter(dir, conf); for(int i=0;i<107;i++) { addDoc(writer); @@ -499,7 +492,6 @@ .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy); lmp = (LogMergePolicy) conf.getMergePolicy(); lmp.setUseCompoundFile(useCompoundFile); - lmp.setUseCompoundDocStore(useCompoundFile); writer = new IndexWriter(dir, conf); writer.optimize(); writer.close(); @@ -539,7 +531,6 @@ .setMaxBufferedDocs(10); LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); lmp.setUseCompoundFile(useCompoundFile); - lmp.setUseCompoundDocStore(useCompoundFile); IndexWriter writer = new IndexWriter(dir, conf); for(int i=0;i<17;i++) { addDoc(writer); @@ -597,7 +588,6 @@ .setOpenMode(OpenMode.CREATE).setIndexDeletionPolicy(policy); LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); lmp.setUseCompoundFile(useCompoundFile); - lmp.setUseCompoundDocStore(useCompoundFile); IndexWriter writer = new IndexWriter(dir, conf); writer.close(); Term searchTerm = new Term("content", "aaa"); @@ -609,7 +599,6 @@ .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy); lmp = (LogMergePolicy) conf.getMergePolicy(); lmp.setUseCompoundFile(useCompoundFile); - lmp.setUseCompoundDocStore(useCompoundFile); writer = new IndexWriter(dir, conf); for(int j=0;j<17;j++) { addDoc(writer); @@ -630,7 +619,6 @@ .setOpenMode(OpenMode.APPEND).setIndexDeletionPolicy(policy); lmp = (LogMergePolicy) conf.getMergePolicy(); lmp.setUseCompoundFile(useCompoundFile); - lmp.setUseCompoundDocStore(useCompoundFile); writer = new IndexWriter(dir, conf); writer.optimize(); // this is a commit @@ -706,7 +694,6 @@ .setMaxBufferedDocs(10); LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); lmp.setUseCompoundFile(useCompoundFile); - lmp.setUseCompoundDocStore(useCompoundFile); IndexWriter writer = new IndexWriter(dir, conf); writer.close(); Term searchTerm = new Term("content", "aaa"); @@ -720,7 +707,6 @@ .setMaxBufferedDocs(10); lmp = (LogMergePolicy) conf.getMergePolicy(); lmp.setUseCompoundFile(useCompoundFile); - lmp.setUseCompoundDocStore(useCompoundFile); writer = new IndexWriter(dir, conf); for(int j=0;j<17;j++) { addDoc(writer); diff -r 14ed99c667de lucene/src/test/org/apache/lucene/index/TestFieldsReader.java --- a/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/test/org/apache/lucene/index/TestFieldsReader.java Thu Dec 16 11:51:57 2010 -0500 @@ -53,7 +53,6 @@ dir = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()); ((LogMergePolicy) conf.getMergePolicy()).setUseCompoundFile(false); - ((LogMergePolicy) conf.getMergePolicy()).setUseCompoundDocStore(false); IndexWriter writer = new IndexWriter(dir, conf); writer.addDocument(testDoc); writer.close(); diff -r 14ed99c667de lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java --- a/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/test/org/apache/lucene/index/TestIndexFileDeleter.java Thu Dec 16 11:51:57 2010 -0500 @@ -56,7 +56,6 @@ addDoc(writer, i); } mergePolicy.setUseCompoundFile(false); - mergePolicy.setUseCompoundDocStore(false); for(;i<45;i++) { addDoc(writer, i); } diff -r 14ed99c667de lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java --- a/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/test/org/apache/lucene/index/TestIndexReaderCloneNorms.java Thu Dec 16 11:51:57 2010 -0500 @@ -240,7 +240,6 @@ LogMergePolicy lmp = (LogMergePolicy) iw.getConfig().getMergePolicy(); lmp.setMergeFactor(3); lmp.setUseCompoundFile(true); - lmp.setUseCompoundDocStore(true); iw.close(); } @@ -293,7 +292,6 @@ LogMergePolicy lmp = (LogMergePolicy) conf.getMergePolicy(); lmp.setMergeFactor(3); lmp.setUseCompoundFile(compound); - lmp.setUseCompoundDocStore(compound); IndexWriter iw = new IndexWriter(dir, conf); for (int i = 0; i < ndocs; i++) { iw.addDocument(newDoc()); diff -r 14ed99c667de lucene/src/test/org/apache/lucene/index/TestIndexWriter.java --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriter.java Thu Dec 16 11:51:57 2010 -0500 @@ -866,7 +866,7 @@ doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS)); for(int i=0;i<19;i++) writer.addDocument(doc); - writer.flush(false, true, true); + writer.flush(false, true); writer.close(); SegmentInfos sis = new SegmentInfos(); sis.read(dir); @@ -1039,7 +1039,6 @@ LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy(); lmp.setMergeFactor(2); lmp.setUseCompoundFile(false); - lmp.setUseCompoundDocStore(false); Document doc = new Document(); String contents = "aa bb cc dd ee ff gg hh ii jj kk"; @@ -1075,7 +1074,6 @@ writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())); LogMergePolicy lmp2 = (LogMergePolicy) writer.getConfig().getMergePolicy(); lmp2.setUseCompoundFile(false); - lmp2.setUseCompoundDocStore(false); writer.optimize(); writer.close(); } @@ -2257,7 +2255,6 @@ TEST_VERSION_CURRENT, new MockAnalyzer())); LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy(); lmp.setUseCompoundFile(false); - lmp.setUseCompoundDocStore(false); ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); writer.setInfoStream(new PrintStream(bos)); writer.addDocument(new Document()); diff -r 14ed99c667de lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterDelete.java Thu Dec 16 11:51:57 2010 -0500 @@ -689,7 +689,6 @@ LogMergePolicy lmp = (LogMergePolicy) modifier.getConfig().getMergePolicy(); lmp.setUseCompoundFile(true); - lmp.setUseCompoundDocStore(true); dir.failOn(failure.reset()); diff -r 14ed99c667de lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java --- a/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/test/org/apache/lucene/index/TestIndexWriterReader.java Thu Dec 16 11:51:57 2010 -0500 @@ -237,7 +237,7 @@ writer.setInfoStream(infoStream); // create the index createIndexNoClose(!optimize, "index1", writer); - writer.flush(false, true, true); + writer.flush(false, true); // create a 2nd index Directory dir2 = newDirectory(); @@ -317,7 +317,7 @@ writer.setInfoStream(infoStream); // create the index createIndexNoClose(!optimize, "index1", writer); - writer.flush(false, true, true); + writer.flush(false, true); // get a reader IndexReader r1 = writer.getReader(); @@ -524,7 +524,7 @@ IndexReader r1 = writer.getReader(); assertEquals(0, r1.maxDoc()); createIndexNoClose(false, "index1", writer); - writer.flush(!optimize, true, true); + writer.flush(!optimize, true); IndexReader iwr1 = writer.getReader(); assertEquals(100, iwr1.maxDoc()); @@ -536,7 +536,7 @@ Document d = createDocument(x, "index1", 5); writer.addDocument(d); } - writer.flush(false, true, true); + writer.flush(false, true); // verify the reader was reopened internally IndexReader iwr2 = writer.getReader(); assertTrue(iwr2 != r1); diff -r 14ed99c667de lucene/src/test/org/apache/lucene/index/TestLazyBug.java --- a/lucene/src/test/org/apache/lucene/index/TestLazyBug.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/test/org/apache/lucene/index/TestLazyBug.java Thu Dec 16 11:51:57 2010 -0500 @@ -66,8 +66,7 @@ TEST_VERSION_CURRENT, new MockAnalyzer())); LogMergePolicy lmp = (LogMergePolicy) writer.getConfig().getMergePolicy(); lmp.setUseCompoundFile(false); - lmp.setUseCompoundDocStore(false); - + for (int d = 1; d <= NUM_DOCS; d++) { Document doc = new Document(); for (int f = 1; f <= NUM_FIELDS; f++ ) { diff -r 14ed99c667de lucene/src/test/org/apache/lucene/index/TestNRTThreads.java --- a/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java Thu Dec 16 08:43:43 2010 -0500 +++ b/lucene/src/test/org/apache/lucene/index/TestNRTThreads.java Thu Dec 16 11:51:57 2010 -0500 @@ -108,11 +108,13 @@ final int NUM_INDEX_THREADS = 2; final int NUM_SEARCH_THREADS = 3; - final int RUN_TIME_SEC = LuceneTestCase.TEST_NIGHTLY ? 300 : 5; + // nocommit + final int RUN_TIME_SEC = LuceneTestCase.TEST_NIGHTLY ? 300 : 1; final AtomicBoolean failed = new AtomicBoolean(); final AtomicInteger addCount = new AtomicInteger(); final AtomicInteger delCount = new AtomicInteger(); + // nocommit final long stopTime = System.currentTimeMillis() + RUN_TIME_SEC*1000; Thread[] threads = new Thread[NUM_INDEX_THREADS]; for(int thread=0;thread