Index: src/test/org/apache/lucene/store/TestHugeRamFile.java =================================================================== --- src/test/org/apache/lucene/store/TestHugeRamFile.java (revision 916299) +++ src/test/org/apache/lucene/store/TestHugeRamFile.java (working copy) @@ -33,7 +33,7 @@ private long capacity = 0; private HashMap singleBuffers = new HashMap(); @Override - byte[] newBuffer(int size) { + protected byte[] newBuffer(int size) { capacity += size; if (capacity <= MAX_VALUE) { // below maxint we reuse buffers Index: src/java/org/apache/lucene/index/StoredFieldsWriter.java =================================================================== --- src/java/org/apache/lucene/index/StoredFieldsWriter.java (revision 916299) +++ src/java/org/apache/lucene/index/StoredFieldsWriter.java (working copy) @@ -21,12 +21,15 @@ import org.apache.lucene.store.RAMOutputStream; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.RamUsageEstimator; +import org.apache.lucene.store.RAMFile; /** This is a DocFieldConsumer that writes stored fields. */ final class StoredFieldsWriter { FieldsWriter fieldsWriter; final DocumentsWriter docWriter; + final ByteBlockPool.Allocator allocator; + final FieldInfos fieldInfos; int lastDocID; @@ -35,6 +38,7 @@ public StoredFieldsWriter(DocumentsWriter docWriter, FieldInfos fieldInfos) { this.docWriter = docWriter; + this.allocator = docWriter.storedFieldsAllocator; this.fieldInfos = fieldInfos; } @@ -163,16 +167,24 @@ assert 0 == perDoc.numStoredFields; assert 0 == perDoc.fdt.length(); assert 0 == perDoc.fdt.getFilePointer(); + perDoc.recycle(); docFreeList[freeCount++] = perDoc; } class PerDoc extends DocumentsWriter.DocWriter { - - // TODO: use something more memory efficient; for small - // docs the 1024 buffer size of RAMOutputStream wastes alot - RAMOutputStream fdt = new RAMOutputStream(); + final PerDocBuffer buffer = new PerDocBuffer(); + RAMOutputStream fdt = new RAMOutputStream(buffer); int numStoredFields; + void recycle() { + try { + fdt.close(); + } catch (IOException e) { + throw new RuntimeException(e); // should never happen + } + buffer.recycle(); + } + void reset() { fdt.reset(); numStoredFields = 0; @@ -186,7 +198,7 @@ @Override public long sizeInBytes() { - return fdt.sizeInBytes(); + return buffer.getSizeInBytes(); } @Override @@ -194,4 +206,37 @@ finishDocument(this); } } + + class PerDocBuffer extends RAMFile { + + /** + * Allocate bytes used from shared pool. + */ + protected byte[] newBuffer(int size) { + assert size == DocumentsWriter.STORED_FIELDS_BLOCK_SIZE; + return allocator.getByteBlock(true); + } + + /** + * Recycle the bytes used. + */ + synchronized void recycle() { + if (buffers.size() > 0) { + setLength(0); + + // Recycle the blocks + final int blockCount = buffers.size(); + + final byte[][] blocks = buffers.toArray( new byte[blockCount][] ); + allocator.recycleByteBlocks(blocks, 0, blockCount); + buffers.clear(); + sizeInBytes = 0; + + assert numBuffers() == 0; + + // Tell doc writer the bytes were reclaimed + docWriter.bytesUsed(-blockCount * DocumentsWriter.STORED_FIELDS_BLOCK_SIZE); + } + } + } } Index: src/java/org/apache/lucene/index/DocumentsWriter.java =================================================================== --- src/java/org/apache/lucene/index/DocumentsWriter.java (revision 916299) +++ src/java/org/apache/lucene/index/DocumentsWriter.java (working copy) @@ -1199,7 +1199,12 @@ final static int BYTE_BLOCK_NOT_MASK = ~BYTE_BLOCK_MASK; private class ByteBlockAllocator extends ByteBlockPool.Allocator { + final int blockSize; + ByteBlockAllocator(int blockSize) { + this.blockSize = blockSize; + } + ArrayList freeByteBlocks = new ArrayList(); /* Allocate another byte[] from the shared pool */ @@ -1215,12 +1220,12 @@ // things that don't track allocations (term // vectors) and things that do (freq/prox // postings). - numBytesAlloc += BYTE_BLOCK_SIZE; - b = new byte[BYTE_BLOCK_SIZE]; + numBytesAlloc += blockSize; + b = new byte[blockSize]; } else b = freeByteBlocks.remove(size-1); if (trackAllocations) - numBytesUsed += BYTE_BLOCK_SIZE; + numBytesUsed += blockSize; assert numBytesUsed <= numBytesAlloc; return b; } @@ -1281,8 +1286,13 @@ freeIntBlocks.add(blocks[i]); } - ByteBlockAllocator byteBlockAllocator = new ByteBlockAllocator(); + ByteBlockAllocator byteBlockAllocator = new ByteBlockAllocator(BYTE_BLOCK_SIZE); + final static int STORED_FIELDS_BLOCK_SIZE = 1024; + + ByteBlockAllocator storedFieldsAllocator = new ByteBlockAllocator(STORED_FIELDS_BLOCK_SIZE); + + /* Initial chunk size of the shared char[] blocks used to store term text */ final static int CHAR_BLOCK_SHIFT = 14; @@ -1321,10 +1331,12 @@ return nf.format(v/1024./1024.); } - /* We have three pools of RAM: Postings, byte blocks - * (holds freq/prox posting data) and char blocks (holds - * characters in the term). Different docs require - * varying amount of storage from these three classes. + /* We have four pools of RAM: Postings, byte blocks + * (holds freq/prox posting data), char blocks (holds + * characters in the term) and stored fields buffers. + * Different docs require varying amount of storage from + * these four classes. + * * For example, docs with many unique single-occurrence * short terms will use up the Postings RAM and hardly any * of the other two. Whereas docs with very large terms @@ -1348,6 +1360,7 @@ " deletesMB=" + toMB(deletesRAMUsed) + " vs trigger=" + toMB(freeTrigger) + " byteBlockFree=" + toMB(byteBlockAllocator.freeByteBlocks.size()*BYTE_BLOCK_SIZE) + + " storedFieldsFree=" + toMB(storedFieldsAllocator.freeByteBlocks.size()*STORED_FIELDS_BLOCK_SIZE) + " charBlockFree=" + toMB(freeCharBlocks.size()*CHAR_BLOCK_SIZE*CHAR_NUM_BYTE)); final long startBytesAlloc = numBytesAlloc + deletesRAMUsed; @@ -1363,7 +1376,11 @@ while(numBytesAlloc+deletesRAMUsed > freeLevel) { synchronized(this) { - if (0 == byteBlockAllocator.freeByteBlocks.size() && 0 == freeCharBlocks.size() && 0 == freeIntBlocks.size() && !any) { + if (0 == storedFieldsAllocator.freeByteBlocks.size() + && 0 == byteBlockAllocator.freeByteBlocks.size() + && 0 == freeCharBlocks.size() + && 0 == freeIntBlocks.size() + && !any) { // Nothing else to free -- must flush now. bufferIsFull = numBytesUsed+deletesRAMUsed > flushTrigger; if (infoStream != null) { @@ -1376,23 +1393,34 @@ break; } - if ((0 == iter % 4) && byteBlockAllocator.freeByteBlocks.size() > 0) { + if ((0 == iter % 5) && byteBlockAllocator.freeByteBlocks.size() > 0) { byteBlockAllocator.freeByteBlocks.remove(byteBlockAllocator.freeByteBlocks.size()-1); numBytesAlloc -= BYTE_BLOCK_SIZE; } - if ((1 == iter % 4) && freeCharBlocks.size() > 0) { + if ((1 == iter % 5) && freeCharBlocks.size() > 0) { freeCharBlocks.remove(freeCharBlocks.size()-1); numBytesAlloc -= CHAR_BLOCK_SIZE * CHAR_NUM_BYTE; } - if ((2 == iter % 4) && freeIntBlocks.size() > 0) { + if ((2 == iter % 5) && freeIntBlocks.size() > 0) { freeIntBlocks.remove(freeIntBlocks.size()-1); numBytesAlloc -= INT_BLOCK_SIZE * INT_NUM_BYTE; } + + if ((3 == iter % 5) && storedFieldsAllocator.freeByteBlocks.size() > 0) { + // Remove upwards of 32 blocks (each block is 1K) + for (int i = 0; i < 32; ++i) { + storedFieldsAllocator.freeByteBlocks.remove(storedFieldsAllocator.freeByteBlocks.size() - 1); + numBytesAlloc -= STORED_FIELDS_BLOCK_SIZE; + if (storedFieldsAllocator.freeByteBlocks.size() == 0) { + break; + } + } + } } - if ((3 == iter % 4) && any) + if ((4 == iter % 5) && any) // Ask consumer to free any recycled state any = consumer.freeRAM(); Index: src/java/org/apache/lucene/store/RAMFile.java =================================================================== --- src/java/org/apache/lucene/store/RAMFile.java (revision 916299) +++ src/java/org/apache/lucene/store/RAMFile.java (working copy) @@ -20,44 +20,44 @@ import java.util.ArrayList; import java.io.Serializable; -class RAMFile implements Serializable { +public class RAMFile implements Serializable { private static final long serialVersionUID = 1l; - private ArrayList buffers = new ArrayList(); + protected ArrayList buffers = new ArrayList(); long length; RAMDirectory directory; - long sizeInBytes; + protected long sizeInBytes; // This is publicly modifiable via Directory.touchFile(), so direct access not supported private long lastModified = System.currentTimeMillis(); // File used as buffer, in no RAMDirectory - RAMFile() {} + protected RAMFile() {} RAMFile(RAMDirectory directory) { this.directory = directory; } // For non-stream access from thread that might be concurrent with writing - synchronized long getLength() { + public synchronized long getLength() { return length; } - synchronized void setLength(long length) { + protected synchronized void setLength(long length) { this.length = length; } // For non-stream access from thread that might be concurrent with writing - synchronized long getLastModified() { + public synchronized long getLastModified() { return lastModified; } - synchronized void setLastModified(long lastModified) { + protected synchronized void setLastModified(long lastModified) { this.lastModified = lastModified; } - final byte[] addBuffer(int size) { + protected final byte[] addBuffer(int size) { byte[] buffer = newBuffer(size); synchronized(this) { buffers.add(buffer); @@ -70,11 +70,11 @@ return buffer; } - final synchronized byte[] getBuffer(int index) { + protected final synchronized byte[] getBuffer(int index) { return buffers.get(index); } - final synchronized int numBuffers() { + protected final synchronized int numBuffers() { return buffers.size(); } @@ -84,11 +84,11 @@ * @param size size of allocated buffer. * @return allocated buffer. */ - byte[] newBuffer(int size) { + protected byte[] newBuffer(int size) { return new byte[size]; } - synchronized long getSizeInBytes() { + public synchronized long getSizeInBytes() { return sizeInBytes; } Index: src/java/org/apache/lucene/store/RAMOutputStream.java =================================================================== --- src/java/org/apache/lucene/store/RAMOutputStream.java (revision 916299) +++ src/java/org/apache/lucene/store/RAMOutputStream.java (working copy) @@ -40,7 +40,7 @@ this(new RAMFile()); } - RAMOutputStream(RAMFile f) { + public RAMOutputStream(RAMFile f) { file = f; // make sure that we switch to the @@ -80,6 +80,11 @@ @Override public void close() throws IOException { flush(); + currentBuffer = null; + currentBufferIndex = -1; + bufferPosition = 0; + bufferStart = 0; + bufferLength = 0; } @Override