Index: lucene/core/src/java/org/apache/lucene/store/BufferedIndexOutput.java =================================================================== --- lucene/core/src/java/org/apache/lucene/store/BufferedIndexOutput.java (revision 1512797) +++ lucene/core/src/java/org/apache/lucene/store/BufferedIndexOutput.java (working copy) @@ -21,8 +21,11 @@ /** Base implementation class for buffered {@link IndexOutput}. */ public abstract class BufferedIndexOutput extends IndexOutput { - /** The default buffer size in bytes ({@value #DEFAULT_BUFFER_SIZE}). */ - public static final int DEFAULT_BUFFER_SIZE = 16384; + /** + * Default buffer size: 8192 bytes (this is the size up to which the JDK + does not allocate additional arrays while reading/writing) + */ + public static final int DEFAULT_BUFFER_SIZE = 8192; private final int bufferSize; private final byte[] buffer; Index: lucene/core/src/java/org/apache/lucene/store/FSDirectory.java =================================================================== --- lucene/core/src/java/org/apache/lucene/store/FSDirectory.java (revision 1512797) +++ lucene/core/src/java/org/apache/lucene/store/FSDirectory.java (working copy) @@ -31,6 +31,7 @@ import org.apache.lucene.util.ThreadInterruptedException; import org.apache.lucene.util.Constants; +import org.apache.lucene.util.IOUtils; /** * Base class for Directory implementations that store index @@ -112,9 +113,10 @@ public abstract class FSDirectory extends Directory { /** - * Default read chunk size: 2*{@link BufferedIndexInput#MERGE_BUFFER_SIZE}. + * Default read chunk size: 8192 bytes (this is the size up to which the JDK + does not allocate additional arrays while reading/writing) */ - public static final int DEFAULT_READ_CHUNK_SIZE = BufferedIndexInput.MERGE_BUFFER_SIZE * 2; + public static final int DEFAULT_READ_CHUNK_SIZE = 8192; protected final File directory; // The underlying filesystem directory protected final Set staleFiles = synchronizedSet(new HashSet()); // Files written, but not yet sync'ed @@ -358,16 +360,19 @@ * *

This was introduced due to Sun - * JVM Bug 6478546, which throws an incorrect - * OutOfMemoryError when attempting to read too many bytes - * at once. It only happens on 32bit JVMs with a large - * maximum heap size.

+ * JVM Bug 6478546, which throws an {@code OutOfMemoryError} when + * attempting to read too many bytes at once.

* *

Changes to this value will not impact any * already-opened {@link IndexInput}s. You should call * this before attempting to open an index on the * directory.

+ * + * @deprecated It is no longer needed to raise this value + * from the default or lower this value on OOMs, + * because Lucene's defaults to use sensible values. */ + @Deprecated public final void setReadChunkSize(int chunkSize) { // LUCENE-1566 if (chunkSize <= 0) { @@ -379,10 +384,14 @@ /** * The maximum number of bytes to read at once from the * underlying file during {@link IndexInput#readBytes}. + * + * @deprecated It is no longer needed to raise this value + * from the default or lower this value on OOMs, + * because Lucene's defaults to use sensible values. * @see #setReadChunkSize */ + @Deprecated public final int getReadChunkSize() { - // LUCENE-1566 return chunkSize; } @@ -404,7 +413,7 @@ /** output methods: */ @Override - public void flushBuffer(byte[] b, int offset, int size) throws IOException { + protected void flushBuffer(byte[] b, int offset, int size) throws IOException { assert isOpen; file.write(b, offset, size); } @@ -414,21 +423,14 @@ parent.onIndexOutputClosed(name); // only close the file if it has not been closed yet if (isOpen) { - boolean success = false; + IOException priorE = null; try { super.close(); - success = true; + } catch (IOException ioe) { + priorE = ioe; } finally { isOpen = false; - if (!success) { - try { - file.close(); - } catch (Throwable t) { - // Suppress so we don't mask original exception - } - } else { - file.close(); - } + IOUtils.closeWhileHandlingException(priorE, file); } } } Index: lucene/core/src/java/org/apache/lucene/store/NIOFSDirectory.java =================================================================== --- lucene/core/src/java/org/apache/lucene/store/NIOFSDirectory.java (revision 1512797) +++ lucene/core/src/java/org/apache/lucene/store/NIOFSDirectory.java (working copy) @@ -164,7 +164,6 @@ @Override protected void readInternal(byte[] b, int offset, int len) throws IOException { - final ByteBuffer bb; // Determine the ByteBuffer we should use @@ -179,8 +178,8 @@ } int readOffset = bb.position(); - int readLength = bb.limit() - readOffset; - assert readLength == len; + int readLength = len; + assert bb.limit() - readOffset == len; long pos = getFilePointer() + off; @@ -190,32 +189,15 @@ try { while (readLength > 0) { - final int limit; - if (readLength > chunkSize) { - // LUCENE-1566 - work around JVM Bug by breaking - // very large reads into chunks - limit = readOffset + chunkSize; - } else { - limit = readOffset + readLength; + bb.limit(readOffset + Math.min(chunkSize, readLength)); + final int i = channel.read(bb, pos); + if (i < 0) { // be defensive here, even though we checked before hand, something could have changed + throw new EOFException("read past EOF: " + this + " off: " + offset + " len: " + len + " pos: " + pos + " readLen: " + readLength + " end: " + end); } - bb.limit(limit); - int i = channel.read(bb, pos); - if (i < 0){//be defensive here, even though we checked before hand, something could have changed - throw new EOFException("read past EOF: " + this + " off: " + offset + " len: " + len + " pos: " + pos + " limit: " + limit + " end: " + end); - } pos += i; readOffset += i; readLength -= i; } - } catch (OutOfMemoryError e) { - // propagate OOM up and add a hint for 32bit VM Users hitting the bug - // with a large chunk size in the fast path. - final OutOfMemoryError outOfMemoryError = new OutOfMemoryError( - "OutOfMemoryError likely caused by the Sun VM Bug described in " - + "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize " - + "with a value smaller than the current chunk size (" + chunkSize + ")"); - outOfMemoryError.initCause(e); - throw outOfMemoryError; } catch (IOException ioe) { throw new IOException(ioe.getMessage() + ": " + this, ioe); } Index: lucene/core/src/java/org/apache/lucene/store/SimpleFSDirectory.java =================================================================== --- lucene/core/src/java/org/apache/lucene/store/SimpleFSDirectory.java (revision 1512797) +++ lucene/core/src/java/org/apache/lucene/store/SimpleFSDirectory.java (working copy) @@ -146,29 +146,14 @@ } try { - do { - final int readLength; - if (total + chunkSize > len) { - readLength = len - total; - } else { - // LUCENE-1566 - work around JVM Bug by breaking very large reads into chunks - readLength = chunkSize; - } + while (total < len) { + final int readLength = Math.min(chunkSize, len - total); final int i = file.read(b, offset + total, readLength); - if (i < 0){//be defensive here, even though we checked before hand, something could have changed + if (i < 0) { // be defensive here, even though we checked before hand, something could have changed throw new EOFException("read past EOF: " + this + " off: " + offset + " len: " + len + " total: " + total + " readLen: " + readLength + " end: " + end); } total += i; - } while (total < len); - } catch (OutOfMemoryError e) { - // propagate OOM up and add a hint for 32bit VM Users hitting the bug - // with a large chunk size in the fast path. - final OutOfMemoryError outOfMemoryError = new OutOfMemoryError( - "OutOfMemoryError likely caused by the Sun VM Bug described in " - + "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize " - + "with a value smaller than the current chunk size (" + chunkSize + ")"); - outOfMemoryError.initCause(e); - throw outOfMemoryError; + } } catch (IOException ioe) { throw new IOException(ioe.getMessage() + ": " + this, ioe); }