Index: src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java =================================================================== --- src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java (revision 724107) +++ src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java (working copy) @@ -18,6 +18,8 @@ */ import org.apache.lucene.store.Directory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.List; @@ -34,6 +36,8 @@ public class ConcurrentMergeScheduler extends MergeScheduler { + private static final Logger logger = LoggerFactory.getLogger(ConcurrentMergeScheduler.class); + private int mergeThreadPriority = -1; protected List mergeThreads = new ArrayList(); @@ -94,15 +98,6 @@ } } - private boolean verbose() { - return writer != null && writer.verbose(); - } - - private void message(String message) { - if (verbose()) - writer.message("CMS: " + message); - } - private synchronized void initMergeThreadPriority() { if (mergeThreadPriority == -1) { // Default to slightly higher priority than our @@ -119,12 +114,13 @@ public synchronized void sync() { while(mergeThreadCount() > 0) { - if (verbose()) - message("now wait for threads; currently " + mergeThreads.size() + " still running"); + if (logger.isDebugEnabled()) { + logger.debug("now wait for threads; currently " + mergeThreads.size() + " still running"); + } final int count = mergeThreads.size(); - if (verbose()) { + if (logger.isDebugEnabled()) { for(int i=0;i= maxThreadCount) { - if (verbose()) - message(" too many merge threads running; stalling..."); + if (logger.isDebugEnabled()) { + logger.debug(" too many merge threads running; stalling..."); + } try { wait(); } catch (InterruptedException ie) { @@ -196,8 +194,9 @@ } } - if (verbose()) - message(" consider merge " + merge.segString(dir)); + if (logger.isDebugEnabled()) { + logger.debug(" consider merge " + merge.segString(dir)); + } assert mergeThreadCount() < maxThreadCount; @@ -205,8 +204,9 @@ // merge: final MergeThread merger = getMergeThread(writer, merge); mergeThreads.add(merger); - if (verbose()) - message(" launch new thread [" + merger.getName() + "]"); + if (logger.isDebugEnabled()) { + logger.debug(" launch new thread [" + merger.getName() + "]"); + } merger.start(); } } @@ -266,8 +266,9 @@ try { - if (verbose()) - message(" merge thread: start"); + if (logger.isDebugEnabled()) { + logger.debug(" merge thread: start"); + } while(true) { setRunningMerge(merge); @@ -278,14 +279,16 @@ merge = writer.getNextMerge(); if (merge != null) { writer.mergeInit(merge); - if (verbose()) - message(" merge thread: do another merge " + merge.segString(dir)); + if (logger.isDebugEnabled()) { + logger.debug(" merge thread: do another merge " + merge.segString(dir)); + } } else break; } - if (verbose()) - message(" merge thread: done"); + if (logger.isDebugEnabled()) { + logger.debug(" merge thread: done"); + } } catch (Throwable exc) { Index: src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java =================================================================== --- src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java (revision 724107) +++ src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java (working copy) @@ -23,6 +23,8 @@ import java.io.IOException; import org.apache.lucene.document.Document; import org.apache.lucene.document.Fieldable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Gathers all Fieldables for a document under the same @@ -35,6 +37,8 @@ final class DocFieldProcessorPerThread extends DocConsumerPerThread { + private static final Logger logger = LoggerFactory.getLogger(DocFieldProcessorPerThread.class); + float docBoost; int fieldGen; final DocFieldProcessor docFieldProcessor; @@ -106,8 +110,9 @@ else lastPerField.next = perField.next; - if (state.docWriter.infoStream != null) - state.docWriter.infoStream.println(" purge field=" + perField.fieldInfo.name); + if (logger.isDebugEnabled()) { + logger.debug(" purge field=" + perField.fieldInfo.name); + } totalFieldCount--; @@ -233,8 +238,9 @@ for(int i=0;i= maxFieldLength) { - if (docState.infoStream != null) - docState.infoStream.println("maxFieldLength " +maxFieldLength+ " reached for field " + fieldInfo.name + ", ignoring following tokens"); + if (logger.isDebugEnabled()) { + logger.debug("maxFieldLength " +maxFieldLength+ " reached for field " + fieldInfo.name + ", ignoring following tokens"); + } break; } } Index: src/java/org/apache/lucene/index/DocumentsWriter.java =================================================================== --- src/java/org/apache/lucene/index/DocumentsWriter.java (revision 724107) +++ src/java/org/apache/lucene/index/DocumentsWriter.java (working copy) @@ -17,28 +17,29 @@ * limitations under the License. */ +import java.io.IOException; +import java.text.NumberFormat; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; + import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.Document; -import org.apache.lucene.search.Similarity; -import org.apache.lucene.search.Query; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Similarity; import org.apache.lucene.search.Weight; -import org.apache.lucene.store.Directory; import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.store.Directory; import org.apache.lucene.util.ArrayUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.io.PrintStream; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.HashMap; -import java.util.HashSet; -import java.util.ArrayList; -import java.util.Map.Entry; -import java.text.NumberFormat; - /** * This class accepts multiple added documents and directly * writes a single segment file. It does this more @@ -109,6 +110,8 @@ final class DocumentsWriter { + private static final Logger logger = LoggerFactory.getLogger(DocumentsWriter.class); + IndexWriter writer; Directory directory; @@ -134,7 +137,6 @@ private DocFieldProcessor docFieldProcessor; - PrintStream infoStream; int maxFieldLength = IndexWriter.DEFAULT_MAX_FIELD_LENGTH; Similarity similarity; @@ -144,7 +146,6 @@ DocumentsWriter docWriter; Analyzer analyzer; int maxFieldLength; - PrintStream infoStream; Similarity similarity; int docID; Document doc; @@ -169,7 +170,7 @@ void setNext(DocWriter next) { this.next = next; } - }; + } /** * The IndexingChain must define the {@link #getChain(DocumentsWriter)} method @@ -278,14 +279,6 @@ : true; } - /** If non-null, various details of indexing are printed - * here. */ - synchronized void setInfoStream(PrintStream infoStream) { - this.infoStream = infoStream; - for(int i=0;i freeTrigger) { - if (infoStream != null) - message(" RAM: now balance allocations: usedMB=" + toMB(numBytesUsed) + + if (logger.isDebugEnabled()) { + logger.debug(" RAM: now balance allocations: usedMB=" + toMB(numBytesUsed) + " vs trigger=" + toMB(flushTrigger) + " allocMB=" + toMB(numBytesAlloc) + " vs trigger=" + toMB(freeTrigger) + " byteBlockFree=" + toMB(byteBlockAllocator.freeByteBlocks.size()*BYTE_BLOCK_SIZE) + " charBlockFree=" + toMB(freeCharBlocks.size()*CHAR_BLOCK_SIZE*CHAR_NUM_BYTE)); - + } + final long startBytesAlloc = numBytesAlloc; int iter = 0; @@ -1299,11 +1291,11 @@ if (0 == byteBlockAllocator.freeByteBlocks.size() && 0 == freeCharBlocks.size() && 0 == freeIntBlocks.size() && !any) { // Nothing else to free -- must flush now. bufferIsFull = numBytesUsed > flushTrigger; - if (infoStream != null) { + if (logger.isDebugEnabled()) { if (numBytesUsed > flushTrigger) - message(" nothing to free; now set bufferIsFull"); + logger.debug(" nothing to free; now set bufferIsFull"); else - message(" nothing to free"); + logger.debug(" nothing to free"); } assert numBytesUsed <= numBytesAlloc; break; @@ -1332,9 +1324,9 @@ iter++; } - if (infoStream != null) - message(" after free: freedMB=" + nf.format((startBytesAlloc-numBytesAlloc)/1024./1024.) + " usedMB=" + nf.format(numBytesUsed/1024./1024.) + " allocMB=" + nf.format(numBytesAlloc/1024./1024.)); - + if (logger.isDebugEnabled()) { + logger.debug(" after free: freedMB=" + nf.format((startBytesAlloc-numBytesAlloc)/1024./1024.) + " usedMB=" + nf.format(numBytesUsed/1024./1024.) + " allocMB=" + nf.format(numBytesAlloc/1024./1024.)); + } } else { // If we have not crossed the 100% mark, but have // crossed the 95% mark of RAM we are actually @@ -1344,11 +1336,11 @@ synchronized(this) { if (numBytesUsed > flushTrigger) { - if (infoStream != null) - message(" RAM: now flush @ usedMB=" + nf.format(numBytesUsed/1024./1024.) + + if (logger.isDebugEnabled()) { + logger.debug(" RAM: now flush @ usedMB=" + nf.format(numBytesUsed/1024./1024.) + " allocMB=" + nf.format(numBytesAlloc/1024./1024.) + " triggerMB=" + nf.format(flushTrigger/1024./1024.)); - + } bufferIsFull = true; } } Index: src/java/org/apache/lucene/index/DocumentsWriterThreadState.java =================================================================== --- src/java/org/apache/lucene/index/DocumentsWriterThreadState.java (revision 724107) +++ src/java/org/apache/lucene/index/DocumentsWriterThreadState.java (working copy) @@ -37,7 +37,6 @@ this.docWriter = docWriter; docState = new DocumentsWriter.DocState(); docState.maxFieldLength = docWriter.maxFieldLength; - docState.infoStream = docWriter.infoStream; docState.similarity = docWriter.similarity; docState.docWriter = docWriter; consumer = docWriter.consumer.addThread(this); Index: src/java/org/apache/lucene/index/IndexFileDeleter.java =================================================================== --- src/java/org/apache/lucene/index/IndexFileDeleter.java (revision 724107) +++ src/java/org/apache/lucene/index/IndexFileDeleter.java (working copy) @@ -18,6 +18,8 @@ */ import org.apache.lucene.store.Directory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.io.FileNotFoundException; @@ -76,6 +78,8 @@ final class IndexFileDeleter { + private static final Logger logger = LoggerFactory.getLogger(IndexFileDeleter.class); + /* Files that we tried to delete but failed (likely * because they are open and we are running on Windows), * so we will retry them again later: */ @@ -100,7 +104,6 @@ /* Commits that the IndexDeletionPolicy have decided to delete: */ private List commitsToDelete = new ArrayList(); - private PrintStream infoStream; private Directory directory; private IndexDeletionPolicy policy; private DocumentsWriter docWriter; @@ -111,17 +114,29 @@ * infoStream != null */ public static boolean VERBOSE_REF_COUNTS = false; - void setInfoStream(PrintStream infoStream) { - this.infoStream = infoStream; - if (infoStream != null) - message("setInfoStream deletionPolicy=" + policy); - } - private void message(String message) { - infoStream.println("IFD [" + Thread.currentThread().getName() + "]: " + message); + logger.debug("IFD [" + Thread.currentThread().getName() + "]: " + message); } /** + * Initialize the deleter: find all previous commits in the Directory, incref + * the files they reference, call the policy to let it delete commits. This + * will remove any files not referenced by any of the commits. + * + * @throws CorruptIndexException + * if the index is corrupt + * @throws IOException + * if there is a low-level IO error + * @deprecated use + * {@link #IndexFileDeleter(Directory, IndexDeletionPolicy, SegmentInfos, DocumentsWriter)} + * instead since infoStream is no longer in use. + */ + public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, PrintStream infoStream, DocumentsWriter docWriter) + throws CorruptIndexException, IOException { + this(directory, policy, segmentInfos, docWriter); + } + + /** * Initialize the deleter: find all previous commits in * the Directory, incref the files they reference, call * the policy to let it delete commits. This will remove @@ -129,15 +144,15 @@ * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ - public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, PrintStream infoStream, DocumentsWriter docWriter) + public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, DocumentsWriter docWriter) throws CorruptIndexException, IOException { this.docWriter = docWriter; - this.infoStream = infoStream; - if (infoStream != null) + if (logger.isDebugEnabled()) { message("init: current segments file is \"" + segmentInfos.getCurrentSegmentFileName() + "\"; deletionPolicy=" + policy); - + } + this.policy = policy; this.directory = directory; @@ -165,7 +180,7 @@ // it's valid (<= the max gen). Load it, then // incref all files it refers to: if (SegmentInfos.generationFromSegmentsFileName(fileName) <= currentGen) { - if (infoStream != null) { + if (logger.isDebugEnabled()) { message("init: load commit \"" + fileName + "\""); } SegmentInfos sis = new SegmentInfos(); @@ -179,7 +194,7 @@ // file segments_X exists when in fact it // doesn't. So, we catch this and handle it // as if the file does not exist - if (infoStream != null) { + if (logger.isDebugEnabled()) { message("init: hit FileNotFoundException when loading commit \"" + fileName + "\"; skipping this commit point"); } sis = null; @@ -211,8 +226,9 @@ } catch (IOException e) { throw new CorruptIndexException("failed to locate current segments_N file"); } - if (infoStream != null) + if (logger.isDebugEnabled()) { message("forced open of current segments file " + segmentInfos.getCurrentSegmentFileName()); + } currentCommitPoint = new CommitPoint(commitsToDelete, directory, sis); commits.add(currentCommitPoint); incRef(sis, true); @@ -229,7 +245,7 @@ String fileName = (String) it.next(); RefCount rc = (RefCount) refCounts.get(fileName); if (0 == rc.count) { - if (infoStream != null) { + if (logger.isDebugEnabled()) { message("init: removing unreferenced file \"" + fileName + "\""); } deleteFile(fileName); @@ -263,7 +279,7 @@ // the now-deleted commits: for(int i=0;i[Note that as of 2.1, all but one of the * methods in this class are available via {@link @@ -100,7 +99,6 @@ protected boolean open = false; // Lucene defaults: - protected PrintStream infoStream = null; protected boolean useCompoundFile = true; protected int maxBufferedDocs = IndexWriter.DEFAULT_MAX_BUFFERED_DOCS; protected int maxFieldLength = IndexWriter.DEFAULT_MAX_FIELD_LENGTH; @@ -205,7 +203,6 @@ // because it synchronizes on the directory which can // cause deadlock indexWriter.setMergeScheduler(new SerialMergeScheduler()); - indexWriter.setInfoStream(infoStream); indexWriter.setUseCompoundFile(useCompoundFile); if (maxBufferedDocs != IndexWriter.DISABLE_AUTO_FLUSH) indexWriter.setMaxBufferedDocs(maxBufferedDocs); @@ -382,15 +379,10 @@ * @see IndexWriter#setInfoStream(PrintStream) * @throws IllegalStateException if the index is closed */ - public void setInfoStream(PrintStream infoStream) { - synchronized(directory) { - assureOpen(); - if (indexWriter != null) { - indexWriter.setInfoStream(infoStream); - } - this.infoStream = infoStream; - } - } + // TODO: reenstate and deprecate +// public void setInfoStream(PrintStream infoStream) { +// // Do nothing. Method is deprecated and should use a logging framework. +// } /** * @see IndexModifier#setInfoStream(PrintStream) @@ -400,13 +392,11 @@ * be obtained) * @throws IOException if there is a low-level IO error */ - public PrintStream getInfoStream() throws CorruptIndexException, LockObtainFailedException, IOException { - synchronized(directory) { - assureOpen(); - createIndexWriter(); - return indexWriter.getInfoStream(); - } - } + // TODO: reenstate and deprecate +// public PrintStream getInfoStream() throws CorruptIndexException, LockObtainFailedException, IOException { +// // Method is deprecated and should use a logging framework. +// return null; +// } /** * Setting to turn on usage of a compound file. When on, multiple files Index: src/java/org/apache/lucene/index/IndexWriter.java =================================================================== --- src/java/org/apache/lucene/index/IndexWriter.java (revision 724107) +++ src/java/org/apache/lucene/index/IndexWriter.java (working copy) @@ -29,6 +29,8 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.BitVector; import org.apache.lucene.util.Constants; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; @@ -220,6 +222,8 @@ */ public class IndexWriter { + private static final Logger logger = LoggerFactory.getLogger(IndexWriter.class); + /** * Default value for the write lock timeout (1,000). * @see #setDefaultWriteLockTimeout @@ -308,9 +312,6 @@ private final static int MERGE_READ_BUFFER_SIZE = 4096; // Used for printing messages - private static Object MESSAGE_ID_LOCK = new Object(); - private static int MESSAGE_ID = 0; - private int messageID = -1; volatile private boolean hitOOM; private Directory directory; // where this index resides @@ -416,24 +417,21 @@ } /** - * Prints a message to the infoStream (if non-null), - * prefixed with the identifying information for this - * writer and the thread that's calling it. + * Prints a message to the infoStream (if non-null), prefixed with the + * identifying information for this writer and the thread that's calling it. + * + * @deprecated we use SLF4J to output logging messages. You can use Java's + * built-in logging by enabling logging for this class, or bind + * another package. Read more in http://www.slf4j.org/docs.html. */ public void message(String message) { - if (infoStream != null) - infoStream.println("IW " + messageID + " [" + Thread.currentThread().getName() + "]: " + message); } - private synchronized void setMessageID(PrintStream infoStream) { - if (infoStream != null && messageID == -1) { - synchronized(MESSAGE_ID_LOCK) { - messageID = MESSAGE_ID++; - } - } - this.infoStream = infoStream; + // Assumes logger.isDebugEnabled() is checked by the calling code. + protected void safeDebugMsg(String message) { + logger.debug("IW [" + Thread.currentThread().getName() + "]: " + message); } - + /** * Casts current mergePolicy to LogMergePolicy, and throws * an exception if the mergePolicy is not a LogMergePolicy. @@ -1137,7 +1135,6 @@ this.closeDir = closeDir; directory = d; analyzer = a; - setMessageID(defaultInfoStream); this.maxFieldLength = maxFieldLength; if (indexingChain == null) @@ -1181,8 +1178,9 @@ oldInfos.read(directory, commit.getSegmentsFileName()); segmentInfos.replace(oldInfos); changeCount++; - if (infoStream != null) - message("init: loaded commit \"" + commit.getSegmentsFileName() + "\""); + if (logger.isDebugEnabled()) { + safeDebugMsg("init: loaded commit \"" + commit.getSegmentsFileName() + "\""); + } } // We assume that this segments_N was previously @@ -1199,14 +1197,13 @@ setRollbackSegmentInfos(segmentInfos); docWriter = new DocumentsWriter(directory, this, indexingChain); - docWriter.setInfoStream(infoStream); docWriter.setMaxFieldLength(maxFieldLength); // Default deleter (for backwards compatibility) is // KeepOnlyLastCommitDeleter: deleter = new IndexFileDeleter(directory, deletionPolicy == null ? new KeepOnlyLastCommitDeletionPolicy() : deletionPolicy, - segmentInfos, infoStream, docWriter); + segmentInfos, docWriter); if (deleter.startingCommitDeleted) // Deletion policy deleted the "head" commit point. @@ -1217,9 +1214,9 @@ pushMaxBufferedDocs(); - if (infoStream != null) { - message("init: create=" + create); - messageState(); + if (logger.isDebugEnabled()) { + safeDebugMsg("init: create=" + create); + safeDebugMsg(messageState()); } } catch (IOException e) { @@ -1250,8 +1247,9 @@ mergePolicy.close(); mergePolicy = mp; pushMaxBufferedDocs(); - if (infoStream != null) - message("setMergePolicy " + mp); + if (logger.isDebugEnabled()) { + safeDebugMsg("setMergePolicy " + mp); + } } /** @@ -1276,8 +1274,9 @@ this.mergeScheduler.close(); } this.mergeScheduler = mergeScheduler; - if (infoStream != null) - message("setMergeScheduler " + mergeScheduler); + if (logger.isDebugEnabled()) { + safeDebugMsg("setMergeScheduler " + mergeScheduler); + } } /** @@ -1347,8 +1346,9 @@ ensureOpen(); this.maxFieldLength = maxFieldLength; docWriter.setMaxFieldLength(maxFieldLength); - if (infoStream != null) - message("setMaxFieldLength " + maxFieldLength); + if (logger.isDebugEnabled()) { + safeDebugMsg("setMaxFieldLength " + maxFieldLength); + } } /** @@ -1391,8 +1391,9 @@ "at least one of ramBufferSize and maxBufferedDocs must be enabled"); docWriter.setMaxBufferedDocs(maxBufferedDocs); pushMaxBufferedDocs(); - if (infoStream != null) - message("setMaxBufferedDocs " + maxBufferedDocs); + if (logger.isDebugEnabled()) { + safeDebugMsg("setMaxBufferedDocs " + maxBufferedDocs); + } } /** @@ -1407,8 +1408,9 @@ LogDocMergePolicy lmp = (LogDocMergePolicy) mp; final int maxBufferedDocs = docWriter.getMaxBufferedDocs(); if (lmp.getMinMergeDocs() != maxBufferedDocs) { - if (infoStream != null) - message("now push maxBufferedDocs " + maxBufferedDocs + " to LogDocMergePolicy"); + if (logger.isDebugEnabled()) { + safeDebugMsg("now push maxBufferedDocs " + maxBufferedDocs + " to LogDocMergePolicy"); + } lmp.setMinMergeDocs(maxBufferedDocs); } } @@ -1452,8 +1454,9 @@ throw new IllegalArgumentException( "at least one of ramBufferSize and maxBufferedDocs must be enabled"); docWriter.setRAMBufferSizeMB(mb); - if (infoStream != null) - message("setRAMBufferSizeMB " + mb); + if (logger.isDebugEnabled()) { + safeDebugMsg("setRAMBufferSizeMB " + mb); + } } /** @@ -1482,8 +1485,9 @@ throw new IllegalArgumentException( "maxBufferedDeleteTerms must at least be 1 when enabled"); docWriter.setMaxBufferedDeleteTerms(maxBufferedDeleteTerms); - if (infoStream != null) - message("setMaxBufferedDeleteTerms " + maxBufferedDeleteTerms); + if (logger.isDebugEnabled()) { + safeDebugMsg("setMaxBufferedDeleteTerms " + maxBufferedDeleteTerms); + } } /** @@ -1556,38 +1560,36 @@ maxSyncPauseSeconds = seconds; } - /** If non-null, this will be the default infoStream used - * by a newly instantiated IndexWriter. - * @see #setInfoStream + /** + * If non-null, this will be the default infoStream used by a newly + * instantiated IndexWriter. + * + * @deprecated this method does nothing. We use SLF4J for logging messages. */ public static void setDefaultInfoStream(PrintStream infoStream) { - IndexWriter.defaultInfoStream = infoStream; + // Do nothing. } /** * Returns the current default infoStream for newly * instantiated IndexWriters. - * @see #setDefaultInfoStream + * @deprecated always returns null. We now use SLF4J for logging messages. */ public static PrintStream getDefaultInfoStream() { - return IndexWriter.defaultInfoStream; + return null; } /** If non-null, information about merges, deletes and a * message when maxFieldLength is reached will be printed * to this. + * @deprecated this method does nothing. We use SLF4J for logging messages. */ public void setInfoStream(PrintStream infoStream) { - ensureOpen(); - setMessageID(infoStream); - docWriter.setInfoStream(infoStream); - deleter.setInfoStream(infoStream); - if (infoStream != null) - messageState(); + // Do nothing } - private void messageState() { - message("setInfoStream: dir=" + directory + + private String messageState() { + return "setInfoStream: dir=" + directory + " autoCommit=" + autoCommit + " mergePolicy=" + mergePolicy + " mergeScheduler=" + mergeScheduler + @@ -1595,23 +1597,17 @@ " maxBufferedDocs=" + docWriter.getMaxBufferedDocs() + " maxBuffereDeleteTerms=" + docWriter.getMaxBufferedDeleteTerms() + " maxFieldLength=" + maxFieldLength + - " index=" + segString()); + " index=" + segString(); } /** * Returns the current infoStream in use by this writer. - * @see #setInfoStream + * @deprecated always returns null. We now use SLF4J for logging messages. */ public PrintStream getInfoStream() { - ensureOpen(); - return infoStream; + return null; } - /** Returns true if verbosing is enabled (i.e., infoStream != null). */ - public boolean verbose() { - return infoStream != null; - } - /** * Sets the maximum time to wait for a write lock (in milliseconds) for this instance of IndexWriter. @see * @see #setDefaultWriteLockTimeout to change the default value for all instances of IndexWriter. @@ -1748,9 +1744,13 @@ docWriter.pauseAllThreads(); try { - if (infoStream != null) - message("now flush at close"); - + if (logger.isDebugEnabled()) { + safeDebugMsg("now flush at close"); + } + + // Required for TestIndexWriter.testOutOfMemoryErrorCausesCloseToFail(). + testPoint("now flush at close"); + docWriter.close(); // Only allow a new merge to be triggered if we are @@ -1768,13 +1768,15 @@ mergeScheduler.close(); - if (infoStream != null) - message("now call final commit()"); + if (logger.isDebugEnabled()) { + safeDebugMsg("now call final commit()"); + } commit(0); - if (infoStream != null) - message("at close: " + segString()); + if (logger.isDebugEnabled()) { + safeDebugMsg("at close: " + segString()); + } synchronized(this) { docWriter = null; @@ -1801,8 +1803,9 @@ if (!closed) { if (docWriter != null) docWriter.resumeAllThreads(); - if (infoStream != null) - message("hit exception while closing"); + if (logger.isDebugEnabled()) { + safeDebugMsg("hit exception while closing"); + } } } } @@ -1823,8 +1826,8 @@ docStoreSegment = docWriter.closeDocStore(); success = true; } finally { - if (!success && infoStream != null) { - message("hit exception closing doc store segment"); + if (!success && logger.isDebugEnabled()) { + safeDebugMsg("hit exception closing doc store segment"); } } @@ -1850,8 +1853,9 @@ } finally { if (!success) { - if (infoStream != null) - message("hit exception building compound file doc store for segment " + docStoreSegment); + if (logger.isDebugEnabled()) { + safeDebugMsg("hit exception building compound file doc store for segment " + docStoreSegment); + } deleter.deleteFile(compoundFileName); } } @@ -2046,8 +2050,9 @@ } finally { if (!success) { - if (infoStream != null) - message("hit exception adding document"); + if (logger.isDebugEnabled()) { + safeDebugMsg("hit exception adding document"); + } synchronized (this) { // If docWriter has some aborted files that were @@ -2206,8 +2211,9 @@ } finally { if (!success) { - if (infoStream != null) - message("hit exception updating document"); + if (logger.isDebugEnabled()) { + safeDebugMsg("hit exception updating document"); + } synchronized (this) { // If docWriter has some aborted files that were @@ -2269,11 +2275,6 @@ } } - /** If non-null, information about merges will be printed to this. - */ - private PrintStream infoStream = null; - private static PrintStream defaultInfoStream = null; - /** * Requests an "optimize" operation on an index, priming the index * for the fastest available search. Traditionally this has meant @@ -2404,8 +2405,9 @@ if (maxNumSegments < 1) throw new IllegalArgumentException("maxNumSegments must be >= 1; got " + maxNumSegments); - if (infoStream != null) - message("optimize: index now " + segString()); + if (logger.isDebugEnabled()) { + safeDebugMsg("optimize: index now " + segString()); + } flush(true, false, true); @@ -2503,8 +2505,9 @@ throws CorruptIndexException, IOException { ensureOpen(); - if (infoStream != null) - message("expungeDeletes: index now " + segString()); + if (logger.isDebugEnabled()) { + safeDebugMsg("expungeDeletes: index now " + segString()); + } MergePolicy.MergeSpecification spec; @@ -2689,8 +2692,9 @@ boolean success = false; try { - if (infoStream != null) - message("now start transaction"); + if (logger.isDebugEnabled()) { + safeDebugMsg("now start transaction"); + } assert docWriter.getNumBufferedDeleteTerms() == 0 : "calling startTransaction with buffered delete terms not supported: numBufferedDeleteTerms=" + docWriter.getNumBufferedDeleteTerms(); @@ -2728,8 +2732,9 @@ if (localAutoCommit) { - if (infoStream != null) - message("flush at startTransaction"); + if (logger.isDebugEnabled()) { + safeDebugMsg("flush at startTransaction"); + } flush(true, false, false); @@ -2753,8 +2758,9 @@ */ private synchronized void rollbackTransaction() throws IOException { - if (infoStream != null) - message("now rollback transaction"); + if (logger.isDebugEnabled()) { + safeDebugMsg("now rollback transaction"); + } // First restore autoCommit in case we hit an exception below: autoCommit = localAutoCommit; @@ -2806,8 +2812,9 @@ */ private synchronized void commitTransaction() throws IOException { - if (infoStream != null) - message("now commit transaction"); + if (logger.isDebugEnabled()) { + safeDebugMsg("now commit transaction"); + } // First restore autoCommit in case we hit an exception below: autoCommit = localAutoCommit; @@ -2822,8 +2829,9 @@ success = true; } finally { if (!success) { - if (infoStream != null) - message("hit exception committing transaction"); + if (logger.isDebugEnabled()) { + safeDebugMsg("hit exception committing transaction"); + } rollbackTransaction(); } } @@ -2924,8 +2932,9 @@ docWriter.resumeAllThreads(); closing = false; notifyAll(); - if (infoStream != null) - message("hit exception during rollback"); + if (logger.isDebugEnabled()) { + safeDebugMsg("hit exception during rollback"); + } } } } @@ -2942,8 +2951,9 @@ Iterator it = pendingMerges.iterator(); while(it.hasNext()) { final MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.next(); - if (infoStream != null) - message("now abort pending merge " + merge.segString(directory)); + if (logger.isDebugEnabled()) { + safeDebugMsg("now abort pending merge " + merge.segString(directory)); + } merge.abort(); mergeFinish(merge); } @@ -2952,8 +2962,9 @@ it = runningMerges.iterator(); while(it.hasNext()) { final MergePolicy.OneMerge merge = (MergePolicy.OneMerge) it.next(); - if (infoStream != null) - message("now abort running merge " + merge.segString(directory)); + if (logger.isDebugEnabled()) { + safeDebugMsg("now abort running merge " + merge.segString(directory)); + } merge.abort(); } @@ -2969,8 +2980,9 @@ // because the merge threads periodically check if // they are aborted. while(runningMerges.size() > 0) { - if (infoStream != null) - message("now wait for " + runningMerges.size() + " running merge to abort"); + if (logger.isDebugEnabled()) { + safeDebugMsg("now wait for " + runningMerges.size() + " running merge to abort"); + } doWait(); } @@ -2979,9 +2991,9 @@ assert 0 == mergingSegments.size(); - if (infoStream != null) - message("all running merges have aborted"); - + if (logger.isDebugEnabled()) { + safeDebugMsg("all running merges have aborted"); + } } else { // Ensure any running addIndexes finishes. It's fine // if a new one attempts to start because from our @@ -3056,8 +3068,9 @@ try { - if (infoStream != null) - message("flush at addIndexes"); + if (logger.isDebugEnabled()) { + safeDebugMsg("flush at addIndexes"); + } flush(true, false, true); boolean success = false; @@ -3179,8 +3192,9 @@ docWriter.pauseAllThreads(); try { - if (infoStream != null) - message("flush at addIndexesNoOptimize"); + if (logger.isDebugEnabled()) { + safeDebugMsg("flush at addIndexesNoOptimize"); + } flush(true, false, true); boolean success = false; @@ -3438,8 +3452,9 @@ } } finally { if (!success) { - if (infoStream != null) - message("hit exception in addIndexes during merge"); + if (logger.isDebugEnabled()) { + safeDebugMsg("hit exception in addIndexes during merge"); + } rollbackTransaction(); } else { commitTransaction(); @@ -3480,8 +3495,9 @@ deleter.decRef(files); if (!success) { - if (infoStream != null) - message("hit exception building compound file in addIndexes during merge"); + if (logger.isDebugEnabled()) { + safeDebugMsg("hit exception building compound file in addIndexes during merge"); + } rollbackTransaction(); } else { @@ -3587,8 +3603,9 @@ if (!autoCommit && pendingCommit != null) throw new IllegalStateException("prepareCommit was already called with no corresponding call to commit"); - if (infoStream != null) - message("prepareCommit: flush"); + if (logger.isDebugEnabled()) { + safeDebugMsg("prepareCommit: flush"); + } flush(true, true, true); @@ -3664,15 +3681,18 @@ waitForCommit(); try { - if (infoStream != null) - message("commit: start"); + if (logger.isDebugEnabled()) { + safeDebugMsg("commit: start"); + } if (autoCommit || pendingCommit == null) { - if (infoStream != null) - message("commit: now prepare"); + if (logger.isDebugEnabled()) { + safeDebugMsg("commit: now prepare"); + } prepareCommit(commitUserData, true); - } else if (infoStream != null) - message("commit: already prepared"); + } else if (logger.isDebugEnabled()) { + safeDebugMsg("commit: already prepared"); + } finishCommit(); } finally { @@ -3684,11 +3704,13 @@ if (pendingCommit != null) { try { - if (infoStream != null) - message("commit: pendingCommit != null"); + if (logger.isDebugEnabled()) { + safeDebugMsg("commit: pendingCommit != null"); + } pendingCommit.finishCommit(directory); - if (infoStream != null) - message("commit: wrote segments file \"" + pendingCommit.getCurrentSegmentFileName() + "\""); + if (logger.isDebugEnabled()) { + safeDebugMsg("commit: wrote segments file \"" + pendingCommit.getCurrentSegmentFileName() + "\""); + } lastCommitChangeCount = pendingCommitChangeCount; segmentInfos.updateGeneration(pendingCommit); segmentInfos.setUserData(pendingCommit.getUserData()); @@ -3700,11 +3722,13 @@ notifyAll(); } - } else if (infoStream != null) - message("commit: pendingCommit == null; skip"); + } else if (logger.isDebugEnabled()) { + safeDebugMsg("commit: pendingCommit == null; skip"); + } - if (infoStream != null) - message("commit: done"); + if (logger.isDebugEnabled()) { + safeDebugMsg("commit: done"); + } } /** @@ -3776,8 +3800,8 @@ boolean docStoreIsCompoundFile = false; - if (infoStream != null) { - message(" flush: segment=" + docWriter.getSegment() + + if (logger.isDebugEnabled()) { + safeDebugMsg(" flush: segment=" + docWriter.getSegment() + " docStoreSegment=" + docWriter.getDocStoreSegment() + " docStoreOffset=" + docStoreOffset + " flushDocs=" + flushDocs + @@ -3785,7 +3809,7 @@ " flushDocStores=" + flushDocStores + " numDocs=" + numDocs + " numBufDelTerms=" + docWriter.getNumBufferedDeleteTerms()); - message(" index before flush " + segString()); + safeDebugMsg(" index before flush " + segString()); } // Check if the doc stores must be separately flushed @@ -3793,8 +3817,9 @@ // to flush, reference it if (flushDocStores && (!flushDocs || !docWriter.getSegment().equals(docWriter.getDocStoreSegment()))) { // We must separately flush the doc store - if (infoStream != null) - message(" flush shared docStore segment " + docStoreSegment); + if (logger.isDebugEnabled()) { + safeDebugMsg(" flush shared docStore segment " + docStoreSegment); + } docStoreIsCompoundFile = flushDocStores(); flushDocStores = false; @@ -3815,8 +3840,9 @@ success = true; } finally { if (!success) { - if (infoStream != null) - message("hit exception flushing segment " + segment); + if (logger.isDebugEnabled()) { + safeDebugMsg("hit exception flushing segment " + segment); + } deleter.refresh(segment); } } @@ -3866,8 +3892,9 @@ success = true; } finally { if (!success) { - if (infoStream != null) - message("hit exception creating compound file for newly flushed segment " + segment); + if (logger.isDebugEnabled()) { + safeDebugMsg("hit exception creating compound file for newly flushed segment " + segment); + } deleter.deleteFile(segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION); } } @@ -3942,8 +3969,9 @@ final SegmentInfos sourceSegmentsClone = merge.segmentsClone; final SegmentInfos sourceSegments = merge.segments; - if (infoStream != null) - message("commitMergeDeletes " + merge.segString(directory)); + if (logger.isDebugEnabled()) { + safeDebugMsg("commitMergeDeletes " + merge.segString(directory)); + } // Carefully merge deletes that occurred after we // started merging: @@ -4019,8 +4047,9 @@ if (deletes != null) { merge.info.advanceDelGen(); - if (infoStream != null) - message("commit merge deletes to " + merge.info.getDelFileName()); + if (logger.isDebugEnabled()) { + safeDebugMsg("commit merge deletes to " + merge.info.getDelFileName()); + } deletes.write(directory, merge.info.getDelFileName()); merge.info.setDelCount(delCount); assert delCount == deletes.count(); @@ -4035,8 +4064,9 @@ if (hitOOM) return false; - if (infoStream != null) - message("commitMerge: " + merge.segString(directory) + " index=" + segString()); + if (logger.isDebugEnabled()) { + safeDebugMsg("commitMerge: " + merge.segString(directory) + " index=" + segString()); + } assert merge.registerDone; @@ -4047,8 +4077,9 @@ // file that current segments does not reference), we // abort this merge if (merge.isAborted()) { - if (infoStream != null) - message("commitMerge: skipping merge " + merge.segString(directory) + ": it was aborted"); + if (logger.isDebugEnabled()) { + safeDebugMsg("commitMerge: skipping merge " + merge.segString(directory) + ": it was aborted"); + } deleter.refresh(merge.info.name); return false; @@ -4153,8 +4184,9 @@ try { mergeInit(merge); - if (infoStream != null) - message("now merge\n merge=" + merge.segString(directory) + "\n merge=" + merge + "\n index=" + segString()); + if (logger.isDebugEnabled()) { + safeDebugMsg("now merge\n merge=" + merge.segString(directory) + "\n merge=" + merge + "\n index=" + segString()); + } mergeMiddle(merge); success = true; @@ -4168,8 +4200,9 @@ mergeFinish(merge); if (!success) { - if (infoStream != null) - message("hit exception during merge"); + if (logger.isDebugEnabled()) { + safeDebugMsg("hit exception during merge"); + } if (merge.info != null && !segmentInfos.contains(merge.info)) deleter.refresh(merge.info.name); } @@ -4222,8 +4255,9 @@ pendingMerges.add(merge); - if (infoStream != null) - message("add merge to pendingMerges: " + merge.segString(directory) + " [total " + pendingMerges.size() + " pending]"); + if (logger.isDebugEnabled()) { + safeDebugMsg("add merge to pendingMerges: " + merge.segString(directory) + " [total " + pendingMerges.size() + " pending]"); + } merge.mergeGen = mergeGen; merge.isExternal = isExternal; @@ -4363,8 +4397,9 @@ // TODO: if we know we are about to merge away these // newly flushed doc store files then we should not // make compound file out of them... - if (infoStream != null) - message("now flush at merge"); + if (logger.isDebugEnabled()) { + safeDebugMsg("now flush at merge"); + } doFlush(true, false); //flush(false, true, false); } @@ -4480,8 +4515,9 @@ SegmentInfos sourceSegmentsClone = merge.segmentsClone; final int numSegments = sourceSegments.size(); - if (infoStream != null) - message("merging " + merge.segString(directory)); + if (logger.isDebugEnabled()) { + safeDebugMsg("merging " + merge.segString(directory)); + } merger = new SegmentMerger(this, mergedName, merge); @@ -4498,8 +4534,8 @@ merger.add(reader); totDocCount += reader.numDocs(); } - if (infoStream != null) { - message("merge: total "+totDocCount+" docs"); + if (logger.isDebugEnabled()) { + safeDebugMsg("merge: total "+totDocCount+" docs"); } merge.checkAborted(directory); @@ -4555,8 +4591,9 @@ handleMergeException(t, merge); } finally { if (!success) { - if (infoStream != null) - message("hit exception creating compound file during merge"); + if (logger.isDebugEnabled()) { + safeDebugMsg("hit exception creating compound file during merge"); + } synchronized(this) { deleter.deleteFile(compoundFileName); } @@ -4564,8 +4601,9 @@ } if (merge.isAborted()) { - if (infoStream != null) - message("abort merge after building CFS"); + if (logger.isDebugEnabled()) { + safeDebugMsg("abort merge after building CFS"); + } deleter.deleteFile(compoundFileName); return 0; } @@ -4619,8 +4657,9 @@ success = true; } finally { if (!success) { - if (infoStream != null) - message("hit exception flushing deletes"); + if (logger.isDebugEnabled()) { + safeDebugMsg("hit exception flushing deletes"); + } // Carefully remove any partially written .del // files @@ -4790,8 +4829,9 @@ try { - if (infoStream != null) - message("startCommit(): start sizeInBytes=" + sizeInBytes); + if (logger.isDebugEnabled()) { + safeDebugMsg("startCommit(): start sizeInBytes=" + sizeInBytes); + } if (sizeInBytes > 0) syncPause(sizeInBytes); @@ -4821,8 +4861,9 @@ assert lastCommitChangeCount <= changeCount; if (changeCount == lastCommitChangeCount) { - if (infoStream != null) - message(" skip startCommit(): no changes pending"); + if (logger.isDebugEnabled()) { + safeDebugMsg(" skip startCommit(): no changes pending"); + } return; } @@ -4832,8 +4873,9 @@ // threads can be doing this at once, if say a large // merge and a small merge finish at the same time: - if (infoStream != null) - message("startCommit index=" + segString(segmentInfos) + " changeCount=" + changeCount); + if (logger.isDebugEnabled()) { + safeDebugMsg("startCommit index=" + segString(segmentInfos) + " changeCount=" + changeCount); + } toSync = (SegmentInfos) segmentInfos.clone(); @@ -4869,8 +4911,9 @@ // Because we incRef'd this commit point, above, // the file had better exist: assert directory.fileExists(fileName): "file '" + fileName + "' does not exist dir=" + directory; - if (infoStream != null) - message("now sync " + fileName); + if (logger.isDebugEnabled()) { + safeDebugMsg("now sync " + fileName); + } directory.sync(fileName); success = true; } finally { @@ -4901,8 +4944,9 @@ // Wait now for any current pending commit to complete: while(pendingCommit != null) { - if (infoStream != null) - message("wait for existing pendingCommit to finish..."); + if (logger.isDebugEnabled()) { + safeDebugMsg("wait for existing pendingCommit to finish..."); + } doWait(); } @@ -4931,15 +4975,18 @@ pendingCommitChangeCount = myChangeCount; success = true; } finally { - if (!success && infoStream != null) - message("hit exception committing segments file"); + if (!success && logger.isDebugEnabled()) { + safeDebugMsg("hit exception committing segments file"); + } } - } else if (infoStream != null) - message("sync superseded by newer infos"); + } else if (logger.isDebugEnabled()) { + safeDebugMsg("sync superseded by newer infos"); + } } - if (infoStream != null) - message("done all syncs"); + if (logger.isDebugEnabled()) { + safeDebugMsg("done all syncs"); + } assert testPoint("midStartCommitSuccess"); @@ -5042,6 +5089,7 @@ public static final MaxFieldLength LIMITED = new MaxFieldLength("LIMITED", DEFAULT_MAX_FIELD_LENGTH); } + // Used only by assert for testing. Current points: // startDoFlush Index: src/java/org/apache/lucene/index/LogMergePolicy.java =================================================================== --- src/java/org/apache/lucene/index/LogMergePolicy.java (revision 724107) +++ src/java/org/apache/lucene/index/LogMergePolicy.java (working copy) @@ -20,7 +20,8 @@ import java.io.IOException; import java.util.Set; -import org.apache.lucene.store.Directory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /**

This class implements a {@link MergePolicy} that tries * to merge segments into levels of exponentially @@ -39,6 +40,8 @@ public abstract class LogMergePolicy extends MergePolicy { + private static final Logger logger = LoggerFactory.getLogger(LogMergePolicy.class); + /** Defines the allowed range of log(size) for each * level. A level is computed by taking the max segment * log size, minuse LEVEL_LOG_SPAN, and finding all @@ -61,17 +64,7 @@ private boolean useCompoundFile = true; private boolean useCompoundDocStore = true; - private IndexWriter writer; - protected boolean verbose() { - return writer != null && writer.verbose(); - } - - private void message(String message) { - if (verbose()) - writer.message("LMP: " + message); - } - /**

Returns the number of segments that are merged at * once and also controls the total number of segments * allowed to accumulate in the index.

*/ @@ -256,29 +249,29 @@ */ public MergeSpecification findMergesToExpungeDeletes(SegmentInfos segmentInfos, IndexWriter writer) - throws CorruptIndexException, IOException - { - this.writer = writer; - + throws CorruptIndexException, IOException { final int numSegments = segmentInfos.size(); - if (verbose()) - message("findMergesToExpungeDeletes: " + numSegments + " segments"); + if (logger.isDebugEnabled()) { + logger.debug("findMergesToExpungeDeletes: " + numSegments + " segments"); + } MergeSpecification spec = new MergeSpecification(); int firstSegmentWithDeletions = -1; for(int i=0;i= 0 */ public static final int FORMAT = -1; @@ -91,12 +95,6 @@ private String userData; // Opaque String that user can specify during IndexWriter.commit - /** - * If non-null, information about loading segments_N files - * will be printed here. @see #setInfoStream. - */ - private static PrintStream infoStream; - public final SegmentInfo info(int i) { return (SegmentInfo) get(i); } @@ -429,9 +427,10 @@ /** If non-null, information about retries when loading * the segments file will be printed to this. + * @deprecated this method does nothing. We use SLF4J for logging messages. */ public static void setInfoStream(PrintStream infoStream) { - SegmentInfos.infoStream = infoStream; + // Do nothing } /* Advanced configuration of retry logic in loading @@ -491,15 +490,14 @@ /** * @see #setInfoStream + * @deprecated this method always returns null. We use SLF4J for logging messages. */ public static PrintStream getInfoStream() { - return infoStream; + return null; } private static void message(String message) { - if (infoStream != null) { - infoStream.println("SIS [" + Thread.currentThread().getName() + "]: " + message); - } + logger.debug("SIS [" + Thread.currentThread().getName() + "]: " + message); } /** @@ -570,7 +568,9 @@ if (files != null) genA = getCurrentSegmentGeneration(files); - message("directory listing genA=" + genA); + if (logger.isDebugEnabled()) { + message("directory listing genA=" + genA); + } // Method 2: open segments.gen and read its // contents. Then we take the larger of the two @@ -584,10 +584,14 @@ try { genInput = directory.openInput(IndexFileNames.SEGMENTS_GEN); } catch (FileNotFoundException e) { - message("segments.gen open: FileNotFoundException " + e); + if (logger.isDebugEnabled()) { + message("segments.gen open: FileNotFoundException " + e); + } break; } catch (IOException e) { - message("segments.gen open: IOException " + e); + if (logger.isDebugEnabled()) { + message("segments.gen open: IOException " + e); + } } if (genInput != null) { @@ -596,7 +600,9 @@ if (version == FORMAT_LOCKLESS) { long gen0 = genInput.readLong(); long gen1 = genInput.readLong(); - message("fallback check: " + gen0 + "; " + gen1); + if (logger.isDebugEnabled()) { + message("fallback check: " + gen0 + "; " + gen1); + } if (gen0 == gen1) { // The file is consistent. genB = gen0; @@ -617,7 +623,9 @@ } } - message(IndexFileNames.SEGMENTS_GEN + " check: genB=" + genB); + if (logger.isDebugEnabled()) { + message(IndexFileNames.SEGMENTS_GEN + " check: genB=" + genB); + } // Pick the larger of the two gen's: if (genA > genB) @@ -649,7 +657,9 @@ if (genLookaheadCount < defaultGenLookaheadCount) { gen++; genLookaheadCount++; - message("look ahead increment gen to " + gen); + if (logger.isDebugEnabled()) { + message("look ahead increment gen to " + gen); + } } } @@ -684,7 +694,7 @@ try { Object v = doBody(segmentFileName); - if (exc != null) { + if (exc != null && logger.isDebugEnabled()) { message("success on " + segmentFileName); } return v; @@ -695,7 +705,9 @@ exc = err; } - message("primary Exception on '" + segmentFileName + "': " + err + "'; will retry: retry=" + retry + "; gen = " + gen); + if (logger.isDebugEnabled()) { + message("primary Exception on '" + segmentFileName + "': " + err + "'; will retry: retry=" + retry + "; gen = " + gen); + } if (!retry && gen > 1) { @@ -715,15 +727,19 @@ prevExists = new File(fileDirectory, prevSegmentFileName).exists(); if (prevExists) { - message("fallback to prior segment file '" + prevSegmentFileName + "'"); + if (logger.isDebugEnabled()) { + message("fallback to prior segment file '" + prevSegmentFileName + "'"); + } try { Object v = doBody(prevSegmentFileName); - if (exc != null) { + if (exc != null && logger.isDebugEnabled()) { message("success on fallback " + prevSegmentFileName); } return v; } catch (IOException err2) { - message("secondary Exception on '" + prevSegmentFileName + "': " + err2 + "'; will retry"); + if (logger.isDebugEnabled()) { + message("secondary Exception on '" + prevSegmentFileName + "': " + err2 + "'; will retry"); + } } } } Index: src/test/org/apache/lucene/index/TestIndexModifier.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexModifier.java (revision 724107) +++ src/test/org/apache/lucene/index/TestIndexModifier.java (working copy) @@ -17,24 +17,22 @@ * limitations under the License. */ -import org.apache.lucene.util.LuceneTestCase; +import java.io.File; +import java.io.IOException; +import java.util.EmptyStackException; +import java.util.Random; +import java.util.Stack; + import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.SimpleAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; -import org.apache.lucene.document.Field.Index; -import org.apache.lucene.document.Field.Store; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.store.RAMDirectory; +import org.apache.lucene.util.LuceneTestCase; -import java.io.File; -import java.io.IOException; -import java.util.EmptyStackException; -import java.util.Random; -import java.util.Stack; - /** * Tests for the "IndexModifier" class, including accesses from two threads at the * same time. @@ -72,7 +70,6 @@ assertEquals(0, i.docCount()); // Lucene defaults: - assertNull(i.getInfoStream()); assertTrue(i.getUseCompoundFile()); assertEquals(IndexWriter.DISABLE_AUTO_FLUSH, i.getMaxBufferedDocs()); assertEquals(10000, i.getMaxFieldLength()); Index: src/test/org/apache/lucene/index/TestIndexWriter.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexWriter.java (revision 724107) +++ src/test/org/apache/lucene/index/TestIndexWriter.java (working copy) @@ -17,10 +17,8 @@ * limitations under the License. */ -import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; -import java.io.PrintStream; import java.io.Reader; import java.util.ArrayList; import java.util.Arrays; @@ -39,6 +37,7 @@ import org.apache.lucene.analysis.tokenattributes.TermAttribute; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexWriter.MaxFieldLength; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; @@ -4212,17 +4211,17 @@ final List thrown = new ArrayList(); - final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer()) { - public void message(final String message) { - if (message.startsWith("now flush at close") && 0 == thrown.size()) { + final IndexWriter writer = new IndexWriter(new MockRAMDirectory(), new StandardAnalyzer(), MaxFieldLength.UNLIMITED) { + boolean testPoint(String name) { + if (name.startsWith("now flush at close") && 0 == thrown.size()) { thrown.add(null); - throw new OutOfMemoryError("fake OOME at " + message); + throw new OutOfMemoryError("fake OOME at " + name); } + + return super.testPoint(name); } }; - // need to set an info stream so message is called - writer.setInfoStream(new PrintStream(new ByteArrayOutputStream())); try { writer.close(); fail("OutOfMemoryError expected"); Index: src/test/org/apache/lucene/index/TestIndexWriterExceptions.java =================================================================== --- src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (revision 724107) +++ src/test/org/apache/lucene/index/TestIndexWriterExceptions.java (working copy) @@ -137,9 +137,6 @@ //writer.setMaxBufferedDocs(10); writer.setRAMBufferSizeMB(0.1); - if (DEBUG) - writer.setInfoStream(System.out); - IndexerThread thread = new IndexerThread(0, writer); thread.run(); if (thread.failure != null) { @@ -175,9 +172,6 @@ //writer.setMaxBufferedDocs(10); writer.setRAMBufferSizeMB(0.2); - if (DEBUG) - writer.setInfoStream(System.out); - final int NUM_THREADS = 4; final IndexerThread[] threads = new IndexerThread[NUM_THREADS];