diff --git a/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessor.java b/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessor.java
index 302c63d..aef29df 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessor.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessor.java
@@ -59,7 +59,7 @@ final class DocFieldProcessor extends DocConsumer {
   int hashMask = 1;
   int totalFieldCount;
 
-  float docBoost;
+  float docBoost; // nocommit - this field seems unused?
   int fieldGen;
   final DocumentsWriterPerThread.DocState docState;
 
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
index 705a42c..f2e8b6f 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
@@ -186,6 +186,7 @@ class DocumentsWriterPerThread {
   DeleteSlice deleteSlice;
   private final NumberFormat nf = NumberFormat.getInstance(Locale.ROOT);
   final Allocator byteBlockAllocator;
+  final IntBlockPool.Allocator intBlockAllocator;
 
   
   public DocumentsWriterPerThread(Directory directory, DocumentsWriter parent,
@@ -201,9 +202,12 @@ class DocumentsWriterPerThread {
     this.docState.similarity = parent.indexWriter.getConfig().getSimilarity();
     bytesUsed = Counter.newCounter();
     byteBlockAllocator = new DirectTrackingAllocator(bytesUsed);
-    consumer = indexingChain.getChain(this);
     pendingDeletes = new BufferedDeletes();
+    intBlockAllocator = new IntBlockAllocator(bytesUsed);
     initialize();
+    // this should be the last call in the ctor 
+    // it really sucks that we need to pull this within the ctor and pass this ref to the chain!
+    consumer = indexingChain.getChain(this);
   }
   
   public DocumentsWriterPerThread(DocumentsWriterPerThread other, FieldInfos.Builder fieldInfos) {
@@ -619,23 +623,28 @@ class DocumentsWriterPerThread {
    * getTerms/getTermsIndex requires <= 32768 */
   final static int MAX_TERM_LENGTH_UTF8 = BYTE_BLOCK_SIZE-2;
 
-  /* Initial chunks size of the shared int[] blocks used to
-     store postings data */
-  final static int INT_BLOCK_SHIFT = 13;
-  final static int INT_BLOCK_SIZE = 1 << INT_BLOCK_SHIFT;
-  final static int INT_BLOCK_MASK = INT_BLOCK_SIZE - 1;
-
-  /* Allocate another int[] from the shared pool */
-  int[] getIntBlock() {
-    int[] b = new int[INT_BLOCK_SIZE];
-    bytesUsed.addAndGet(INT_BLOCK_SIZE*RamUsageEstimator.NUM_BYTES_INT);
-    return b;
-  }
-  
-  void recycleIntBlocks(int[][] blocks, int offset, int length) {
-    bytesUsed.addAndGet(-(length *(INT_BLOCK_SIZE*RamUsageEstimator.NUM_BYTES_INT)));
-  }
 
+  private static class IntBlockAllocator extends IntBlockPool.Allocator {
+    private final Counter bytesUsed;
+    
+    public IntBlockAllocator(Counter bytesUsed) {
+      super(IntBlockPool.INT_BLOCK_SIZE);
+      this.bytesUsed = bytesUsed;
+    }
+    
+    /* Allocate another int[] from the shared pool */
+    public int[] getIntBlock() {
+      int[] b = new int[IntBlockPool.INT_BLOCK_SIZE];
+      bytesUsed.addAndGet(IntBlockPool.INT_BLOCK_SIZE
+          * RamUsageEstimator.NUM_BYTES_INT);
+      return b;
+    }
+    
+    public void recycleIntBlocks(int[][] blocks, int offset, int length) {
+      bytesUsed.addAndGet(-(length * (IntBlockPool.INT_BLOCK_SIZE * RamUsageEstimator.NUM_BYTES_INT)));
+    }
+    
+  }
   PerDocWriteState newPerDocWriteState(String segmentSuffix) {
     assert segmentInfo != null;
     return new PerDocWriteState(infoStream, directory, segmentInfo, bytesUsed, segmentSuffix, IOContext.DEFAULT);
diff --git a/lucene/core/src/java/org/apache/lucene/index/IntBlockPool.java b/lucene/core/src/java/org/apache/lucene/index/IntBlockPool.java
index 531b287..aa9069c 100644
--- a/lucene/core/src/java/org/apache/lucene/index/IntBlockPool.java
+++ b/lucene/core/src/java/org/apache/lucene/index/IntBlockPool.java
@@ -1,7 +1,5 @@
 package org.apache.lucene.index;
 
-import java.util.Arrays;
-
 /*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with
@@ -19,47 +17,343 @@ import java.util.Arrays;
  * limitations under the License.
  */
 
-final class IntBlockPool {
+import java.util.Arrays;
+
+
+/**
+ * @lucene.internal
+ */
+public final class IntBlockPool {
+//TODO move to o.a.l.utils
+  public final static int INT_BLOCK_SHIFT = 13;
+  public final static int INT_BLOCK_SIZE = 1 << INT_BLOCK_SHIFT;
+  public final static int INT_BLOCK_MASK = INT_BLOCK_SIZE - 1;
+  
+  /** Abstract class for allocating and freeing int
+   *  blocks. */
+  public abstract static class Allocator {
+    protected final int blockSize;
+
+    public Allocator(int blockSize) {
+      this.blockSize = blockSize;
+    }
+
+    public abstract void recycleIntBlocks(int[][] blocks, int start, int end);
+
+    public int[] getIntBlock() {
+      return new int[blockSize];
+    }
+  }
+  
+  /** A simple {@link Allocator} that never recycles. */
+  public static final class DirectAllocator extends Allocator {
+
+    /**
+     * Creates a new {@link DirectAllocator} with a default block size
+     */
+    public DirectAllocator() {
+      super(INT_BLOCK_SIZE);
+    }
 
+    @Override
+    public void recycleIntBlocks(int[][] blocks, int start, int end) {
+    }
+  }
+  
   public int[][] buffers = new int[10][];
 
   int bufferUpto = -1;                        // Which buffer we are upto
-  public int intUpto = DocumentsWriterPerThread.INT_BLOCK_SIZE;             // Where we are in head buffer
-
+  public int intUpto = INT_BLOCK_SIZE;             // Where we are in head buffer
   public int[] buffer;                              // Current head buffer
-  public int intOffset = -DocumentsWriterPerThread.INT_BLOCK_SIZE;          // Current head offset
+  public int intOffset = -INT_BLOCK_SIZE;          // Current head offset
 
-  final private DocumentsWriterPerThread docWriter;
+  private final Allocator allocator;
 
-  public IntBlockPool(DocumentsWriterPerThread docWriter) {
-    this.docWriter = docWriter;
+  /**
+   * Creates a new {@link IntBlockPool} with a default {@link Allocator}.
+   * @see IntBlockPool#nextBuffer()
+   */
+  public IntBlockPool() {
+    this(new DirectAllocator());
   }
-
+  
+  /**
+   * Creates a new {@link IntBlockPool} with the given {@link Allocator}.
+   * @see IntBlockPool#nextBuffer()
+   */
+  public IntBlockPool(Allocator allocator) {
+    this.allocator = allocator;
+  }
+  
+  /**
+   * Resets the pool to its initial state reusing the first buffer. Calling
+   * {@link IntBlockPool#nextBuffer()} is not needed after reset.
+   */
   public void reset() {
+    this.reset(false);
+  }
+  
+  /**
+   * Resets the pool to its initial state reusing the first buffer. Calling
+   * {@link IntBlockPool#nextBuffer()} is not needed after reset. 
+   * @param clearBuffers if <code>true</code> the buffers are filled with <tt>0</tt>. 
+   *        This should be set to <code>true</code> if this pool is used with 
+   *        {@link SliceWriter}.
+   */
+  public void reset(boolean clearBuffers) {
     if (bufferUpto != -1) {
       // Reuse first buffer
       if (bufferUpto > 0) {
-        docWriter.recycleIntBlocks(buffers, 1, bufferUpto-1);
+        // TODO we need to make sure that if we hold on to the buffers in the allocator that we clear them as well - maybe 
+        // add this to the allocator interface?
+        allocator.recycleIntBlocks(buffers, 1, bufferUpto-1);
         Arrays.fill(buffers, 1, bufferUpto, null);
       }
       bufferUpto = 0;
       intUpto = 0;
       intOffset = 0;
       buffer = buffers[0];
+      if (clearBuffers) { // for slices we need to fill them all with 0
+        Arrays.fill(buffer, 0);
+      }
     }
   }
-
+  
+  /**
+   * Advances the pool to its next buffer. This method should be called once
+   * after the constructor to initialize the pool. In contrast to the
+   * constructor a {@link IntBlockPool#reset()} call will advance the pool to
+   * its first buffer immediately.
+   */
   public void nextBuffer() {
     if (1+bufferUpto == buffers.length) {
       int[][] newBuffers = new int[(int) (buffers.length*1.5)][];
       System.arraycopy(buffers, 0, newBuffers, 0, buffers.length);
       buffers = newBuffers;
     }
-    buffer = buffers[1+bufferUpto] = docWriter.getIntBlock();
+    buffer = buffers[1+bufferUpto] = allocator.getIntBlock();
     bufferUpto++;
 
     intUpto = 0;
-    intOffset += DocumentsWriterPerThread.INT_BLOCK_SIZE;
+    intOffset += INT_BLOCK_SIZE;
+  }
+  
+  /**
+   * Creates a new int slice with the given starting size and returns the slices offset in the pool.
+   * @see SliceReader
+   */
+  private int newSlice(final int size) {
+    if (intUpto > INT_BLOCK_SIZE-size) {
+      nextBuffer();
+      assert assertSliceBuffer(buffer);
+    }
+      
+    final int upto = intUpto;
+    intUpto += size;
+    buffer[intUpto-1] = 1;
+    return upto;
+  }
+  
+  private static final boolean assertSliceBuffer(int[] buffer) {
+    int count = 0;
+    for (int i = 0; i < buffer.length; i++) {
+      count += buffer[i]; // for slices the buffer must only have 0 values
+    }
+    return count == 0;
+  }
+  
+  
+  // no need to make this public unless we support different sizes
+  // TODO make the levels and the sizes configurable
+  /**
+   * An array holding the offset into the {@link IntBlockPool#LEVEL_SIZE_ARRAY}
+   * to quickly navigate to the next slice level.
+   */
+  private final static int[] NEXT_LEVEL_ARRAY = {1, 2, 3, 4, 5, 6, 7, 8, 9, 9};
+  /**
+   * An array holding the level sizes for int slices.
+   */
+  private final static int[] LEVEL_SIZE_ARRAY = {2, 4, 8, 16, 32, 64, 128, 256, 512, 1024};
+  
+  /**
+   * The first level size for new slices
+   */
+  private final static int FIRST_LEVEL_SIZE = LEVEL_SIZE_ARRAY[0];
+
+  /**
+   * Allocates a new slice from the given offset
+   */
+  private int allocSlice(final int[] slice, final int sliceOffset) {
+    final int level = slice[sliceOffset];
+    final int newLevel = NEXT_LEVEL_ARRAY[level-1];
+    final int newSize = LEVEL_SIZE_ARRAY[newLevel];
+    // Maybe allocate another block
+    if (intUpto > INT_BLOCK_SIZE-newSize) {
+      nextBuffer();
+      assert assertSliceBuffer(buffer);
+    }
+
+    final int newUpto = intUpto;
+    final int offset = newUpto + intOffset;
+    intUpto += newSize;
+    // Write forwarding address at end of last slice:
+    slice[sliceOffset] = offset;
+        
+    // Write new level:
+    buffer[intUpto-1] = newLevel;
+
+    return newUpto;
+  }
+  
+  /**
+   * A {@link SliceWriter} that allows to write multiple integer slices into a given {@link IntBlockPool}.
+   * 
+   *  @see SliceReader
+   *  @lucene.internal
+   */
+  public static class SliceWriter {
+    
+    private int offset;
+    private final IntBlockPool pool;
+    
+    
+    public SliceWriter(IntBlockPool pool) {
+      this.pool = pool;
+    }
+    /**
+     * 
+     */
+    public void reset(int sliceOffset) {
+      this.offset = sliceOffset;
+    }
+    
+    /**
+     * Writes the given value into the slice and resizes the slice if needed
+     */
+    public void writeInt(int value) {
+      int[] ints = pool.buffers[offset >> INT_BLOCK_SHIFT];
+      assert ints != null;
+      int relativeOffset = offset & INT_BLOCK_MASK;
+      if (ints[relativeOffset] != 0) {
+        // End of slice; allocate a new one
+          relativeOffset = pool.allocSlice(ints, relativeOffset);
+        ints = pool.buffer;
+        offset = relativeOffset + pool.intOffset;
+      }
+      ints[relativeOffset] = value;
+      offset++; 
+    }
+    
+    /**
+     * starts a new slice and returns the start offset. The returned value
+     * should be used as the start offset to initialize a {@link SliceReader}.
+     */
+    public int startNewSlice() {
+      return offset = pool.newSlice(FIRST_LEVEL_SIZE) + pool.intOffset;
+      
+    }
+    
+    /**
+     * Returns the offset of the currently written slice. The returned value
+     * should be used as the end offset to initialize a {@link SliceReader} once
+     * this slice is fully written or to reset the this writer if another slice
+     * needs to be written.
+     */
+    public int getCurrentOffset() {
+      return offset;
+    }
+  }
+  
+  /**
+   * A {@link SliceReader} that can read int slices written by a {@link SliceWriter}
+   * @lucene.internal
+   */
+  public static final class SliceReader {
+    
+    private final IntBlockPool pool;
+    private int upto;
+    private int bufferUpto;
+    private int bufferOffset;
+    private int[] buffer;
+    private int limit;
+    private int level;
+    private int end;
+    
+    /**
+     * Creates a new {@link SliceReader} on the given pool
+     */
+    public SliceReader(IntBlockPool pool) {
+      this.pool = pool;
+    }
+
+    /**
+     * Resets the reader to a slice give the slices absolute start and end offset in the pool
+     */
+    public void reset(int startOffset, int endOffset) {
+      bufferUpto = startOffset / INT_BLOCK_SIZE;
+      bufferOffset = bufferUpto * INT_BLOCK_SIZE;
+      this.end = endOffset;
+      upto = startOffset;
+      level = 1;
+      
+      buffer = pool.buffers[bufferUpto];
+      upto = startOffset & INT_BLOCK_MASK;
+
+      final int firstSize = IntBlockPool.LEVEL_SIZE_ARRAY[0];
+      if (startOffset+firstSize >= endOffset) {
+        // There is only this one slice to read
+        limit = endOffset & INT_BLOCK_MASK;
+      } else {
+        limit = upto+firstSize-1;
+      }
+
+    }
+    
+    /**
+     * Returns <code>true</code> iff the current slice is fully read. If this
+     * method returns <code>true</code> {@link SliceReader#readInt()} should not
+     * be called again on this slice.
+     */
+    public boolean endOfSlice() {
+      assert upto + bufferOffset <= end;
+      return upto + bufferOffset == end;
+    }
+    
+    /**
+     * Reads the next int from the current slice and returns it.
+     * @see SliceReader#endOfSlice()
+     */
+    public int readInt() {
+      assert !endOfSlice();
+      assert upto <= limit;
+      if (upto == limit)
+        nextSlice();
+      return buffer[upto++];
+    }
+    
+    private void nextSlice() {
+      // Skip to our next slice
+      final int nextIndex = buffer[limit];
+      level = NEXT_LEVEL_ARRAY[level-1];
+      final int newSize = LEVEL_SIZE_ARRAY[level];
+
+      bufferUpto = nextIndex / INT_BLOCK_SIZE;
+      bufferOffset = bufferUpto * INT_BLOCK_SIZE;
+
+      buffer = pool.buffers[bufferUpto];
+      upto = nextIndex & INT_BLOCK_MASK;
+
+      if (nextIndex + newSize >= end) {
+        // We are advancing to the final slice
+        assert end - nextIndex > 0;
+        limit = end - bufferOffset;
+      } else {
+        // This is not the final slice (subtract 4 for the
+        // forwarding address at the end of this new slice)
+        limit = upto+newSize-1;
+      }
+    }
+    
   }
 }
 
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermsHash.java b/lucene/core/src/java/org/apache/lucene/index/TermsHash.java
index 090540d..52511e9 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermsHash.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermsHash.java
@@ -23,6 +23,7 @@ import java.util.Map;
 
 import org.apache.lucene.util.ByteBlockPool;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.Counter;
 
 /** This class implements {@link InvertedDocConsumer}, which
  *  is passed each token produced by the analyzer on each
@@ -36,11 +37,11 @@ final class TermsHash extends InvertedDocConsumer {
 
   final TermsHashConsumer consumer;
   final TermsHash nextTermsHash;
-  final DocumentsWriterPerThread docWriter;
 
   final IntBlockPool intPool;
   final ByteBlockPool bytePool;
   ByteBlockPool termBytePool;
+  final Counter bytesUsed;
 
   final boolean primary;
   final DocumentsWriterPerThread.DocState docState;
@@ -56,11 +57,11 @@ final class TermsHash extends InvertedDocConsumer {
 
   public TermsHash(final DocumentsWriterPerThread docWriter, final TermsHashConsumer consumer, boolean trackAllocations, final TermsHash nextTermsHash) {
     this.docState = docWriter.docState;
-    this.docWriter = docWriter;
     this.consumer = consumer;
     this.trackAllocations = trackAllocations; 
     this.nextTermsHash = nextTermsHash;
-    intPool = new IntBlockPool(docWriter);
+    this.bytesUsed = trackAllocations ? docWriter.bytesUsed : Counter.newCounter();
+    intPool = new IntBlockPool(docWriter.intBlockAllocator);
     bytePool = new ByteBlockPool(docWriter.byteBlockAllocator);
 
     if (nextTermsHash != null) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermsHashPerField.java b/lucene/core/src/java/org/apache/lucene/index/TermsHashPerField.java
index a3b56bf..7a3c6a6 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermsHashPerField.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermsHashPerField.java
@@ -62,8 +62,7 @@ final class TermsHashPerField extends InvertedDocConsumerPerField {
     termBytePool = termsHash.termBytePool;
     docState = termsHash.docState;
     this.termsHash = termsHash;
-    bytesUsed = termsHash.trackAllocations ? termsHash.docWriter.bytesUsed
-        : Counter.newCounter();
+    bytesUsed = termsHash.bytesUsed;
     fieldState = docInverterPerField.fieldState;
     this.consumer = termsHash.consumer.addField(this, fieldInfo);
     PostingsBytesStartArray byteStarts = new PostingsBytesStartArray(this, bytesUsed);
@@ -99,8 +98,8 @@ final class TermsHashPerField extends InvertedDocConsumerPerField {
   public void initReader(ByteSliceReader reader, int termID, int stream) {
     assert stream < streamCount;
     int intStart = postingsArray.intStarts[termID];
-    final int[] ints = intPool.buffers[intStart >> DocumentsWriterPerThread.INT_BLOCK_SHIFT];
-    final int upto = intStart & DocumentsWriterPerThread.INT_BLOCK_MASK;
+    final int[] ints = intPool.buffers[intStart >> IntBlockPool.INT_BLOCK_SHIFT];
+    final int upto = intStart & IntBlockPool.INT_BLOCK_MASK;
     reader.init(bytePool,
                 postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
                 ints[upto+stream]);
@@ -143,7 +142,7 @@ final class TermsHashPerField extends InvertedDocConsumerPerField {
       // First time we are seeing this token since we last
       // flushed the hash.
       // Init stream slices
-      if (numPostingInt + intPool.intUpto > DocumentsWriterPerThread.INT_BLOCK_SIZE)
+      if (numPostingInt + intPool.intUpto > IntBlockPool.INT_BLOCK_SIZE)
         intPool.nextBuffer();
 
       if (ByteBlockPool.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE) {
@@ -167,8 +166,8 @@ final class TermsHashPerField extends InvertedDocConsumerPerField {
     } else {
       termID = (-termID)-1;
       int intStart = postingsArray.intStarts[termID];
-      intUptos = intPool.buffers[intStart >> DocumentsWriterPerThread.INT_BLOCK_SHIFT];
-      intUptoStart = intStart & DocumentsWriterPerThread.INT_BLOCK_MASK;
+      intUptos = intPool.buffers[intStart >> IntBlockPool.INT_BLOCK_SHIFT];
+      intUptoStart = intStart & IntBlockPool.INT_BLOCK_MASK;
       consumer.addTerm(termID);
     }
   }
@@ -205,7 +204,7 @@ final class TermsHashPerField extends InvertedDocConsumerPerField {
     if (termID >= 0) {// New posting
       bytesHash.byteStart(termID);
       // Init stream slices
-      if (numPostingInt + intPool.intUpto > DocumentsWriterPerThread.INT_BLOCK_SIZE) {
+      if (numPostingInt + intPool.intUpto > IntBlockPool.INT_BLOCK_SIZE) {
         intPool.nextBuffer();
       }
 
@@ -230,8 +229,8 @@ final class TermsHashPerField extends InvertedDocConsumerPerField {
     } else {
       termID = (-termID)-1;
       final int intStart = postingsArray.intStarts[termID];
-      intUptos = intPool.buffers[intStart >> DocumentsWriterPerThread.INT_BLOCK_SHIFT];
-      intUptoStart = intStart & DocumentsWriterPerThread.INT_BLOCK_MASK;
+      intUptos = intPool.buffers[intStart >> IntBlockPool.INT_BLOCK_SHIFT];
+      intUptoStart = intStart & IntBlockPool.INT_BLOCK_MASK;
       consumer.addTerm(termID);
     }
 
diff --git a/lucene/core/src/java/org/apache/lucene/util/ByteBlockPool.java b/lucene/core/src/java/org/apache/lucene/util/ByteBlockPool.java
index 9a863ff..44ae694 100644
--- a/lucene/core/src/java/org/apache/lucene/util/ByteBlockPool.java
+++ b/lucene/core/src/java/org/apache/lucene/util/ByteBlockPool.java
@@ -67,6 +67,30 @@ public final class ByteBlockPool {
     public byte[] getByteBlock() {
       return new byte[blockSize];
     }
+    
+    /**
+     * Returns a synchronized allocator delegating all calls to the given delegate.
+     */
+    public static Allocator synchronizedAllocator(final Allocator delegate) {
+      return new Allocator(delegate.blockSize) {
+        
+        @Override
+        public synchronized void recycleByteBlocks(List<byte[]> blocks) {
+          delegate.recycleByteBlocks(blocks);
+        }
+
+        @Override
+        public synchronized byte[] getByteBlock() {
+          return delegate.getByteBlock();
+        }
+
+        @Override
+        public synchronized void recycleByteBlocks(byte[][] blocks, int start, int end) {
+          delegate.recycleByteBlocks(blocks, start, end);
+        }
+      };
+    }
+    
   }
   
   /** A simple {@link Allocator} that never recycles. */
diff --git a/lucene/core/src/java/org/apache/lucene/util/RecyclingByteBlockAllocator.java b/lucene/core/src/java/org/apache/lucene/util/RecyclingByteBlockAllocator.java
index 6fd2b79..ee8c72b 100644
--- a/lucene/core/src/java/org/apache/lucene/util/RecyclingByteBlockAllocator.java
+++ b/lucene/core/src/java/org/apache/lucene/util/RecyclingByteBlockAllocator.java
@@ -1,7 +1,5 @@
 package org.apache.lucene.util;
 
-import java.util.concurrent.atomic.AtomicLong;
-
 import org.apache.lucene.util.ByteBlockPool.Allocator;
 
 /*
@@ -22,7 +20,7 @@ import org.apache.lucene.util.ByteBlockPool.Allocator;
  */
 
 /**
- * A threadsafe {@link ByteBlockPool.Allocator} implementation that recycles unused byte
+ * A {@link ByteBlockPool.Allocator} implementation that recycles unused byte
  * blocks in a buffer and reuses them in subsequent calls to
  * {@link #getByteBlock()}.
  * 
@@ -32,7 +30,7 @@ public final class RecyclingByteBlockAllocator extends ByteBlockPool.Allocator {
   private byte[][] freeByteBlocks;
   private final int maxBufferedBlocks;
   private int freeBlocks = 0;
-  private final AtomicLong bytesUsed;
+  private final Counter bytesUsed;
   public static final int DEFAULT_BUFFERED_BLOCKS = 64;
 
   /**
@@ -43,10 +41,10 @@ public final class RecyclingByteBlockAllocator extends ByteBlockPool.Allocator {
    * @param maxBufferedBlocks
    *          maximum number of buffered byte block
    * @param bytesUsed
-   *          {@link AtomicLong} reference counting internally allocated bytes
+   *          {@link Counter} reference counting internally allocated bytes
    */
   public RecyclingByteBlockAllocator(int blockSize, int maxBufferedBlocks,
-      AtomicLong bytesUsed) {
+      Counter bytesUsed) {
     super(blockSize);
     freeByteBlocks = new byte[Math.min(10, maxBufferedBlocks)][];
     this.maxBufferedBlocks = maxBufferedBlocks;
@@ -62,7 +60,7 @@ public final class RecyclingByteBlockAllocator extends ByteBlockPool.Allocator {
    *          maximum number of buffered byte block
    */
   public RecyclingByteBlockAllocator(int blockSize, int maxBufferedBlocks) {
-    this(blockSize, maxBufferedBlocks, new AtomicLong());
+    this(blockSize, maxBufferedBlocks, Counter.newCounter(false));
   }
 
   /**
@@ -72,11 +70,11 @@ public final class RecyclingByteBlockAllocator extends ByteBlockPool.Allocator {
    * 
    */
   public RecyclingByteBlockAllocator() {
-    this(ByteBlockPool.BYTE_BLOCK_SIZE, 64, new AtomicLong());
+    this(ByteBlockPool.BYTE_BLOCK_SIZE, 64, Counter.newCounter(false));
   }
 
   @Override
-  public synchronized byte[] getByteBlock() {
+  public byte[] getByteBlock() {
     if (freeBlocks == 0) {
       bytesUsed.addAndGet(blockSize);
       return new byte[blockSize];
@@ -87,7 +85,7 @@ public final class RecyclingByteBlockAllocator extends ByteBlockPool.Allocator {
   }
 
   @Override
-  public synchronized void recycleByteBlocks(byte[][] blocks, int start, int end) {
+  public void recycleByteBlocks(byte[][] blocks, int start, int end) {
     final int numBlocks = Math.min(maxBufferedBlocks - freeBlocks, end - start);
     final int size = freeBlocks + numBlocks;
     if (size >= freeByteBlocks.length) {
@@ -111,14 +109,14 @@ public final class RecyclingByteBlockAllocator extends ByteBlockPool.Allocator {
   /**
    * @return the number of currently buffered blocks
    */
-  public synchronized int numBufferedBlocks() {
+  public int numBufferedBlocks() {
     return freeBlocks;
   }
 
   /**
    * @return the number of bytes currently allocated by this {@link Allocator}
    */
-  public synchronized long bytesUsed() {
+  public long bytesUsed() {
     return bytesUsed.get();
   }
 
@@ -136,8 +134,8 @@ public final class RecyclingByteBlockAllocator extends ByteBlockPool.Allocator {
    *          the number of byte blocks to remove
    * @return the number of actually removed buffers
    */
-  public synchronized int freeBlocks(int num) {
-    assert num >= 0;
+  public int freeBlocks(int num) {
+    assert num >= 0 : "free blocks must be >= 0 but was: "+ num;
     final int stop;
     final int count;
     if (num > freeBlocks) {
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIntBlockPool.java b/lucene/core/src/test/org/apache/lucene/index/TestIntBlockPool.java
new file mode 100644
index 0000000..72ad438
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIntBlockPool.java
@@ -0,0 +1,111 @@
+package org.apache.lucene.index;
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.util.LuceneTestCase;
+
+/**
+ * tests basic {@link IntBlockPool} functionality
+ */
+public class TestIntBlockPool extends LuceneTestCase {
+  
+  public void testSingleWriterReader() {
+    IntBlockPool pool = new IntBlockPool();
+    for (int j = 0; j < 2; j++) {
+      IntBlockPool.SliceWriter writer = new IntBlockPool.SliceWriter(pool);
+      int start = writer.startNewSlice();
+      int num = atLeast(100);
+      for (int i = 0; i < num; i++) {
+        writer.writeInt(i);
+      }
+      
+      int upto = writer.getCurrentOffset();
+      IntBlockPool.SliceReader reader = new IntBlockPool.SliceReader(pool);
+      reader.reset(start, upto);
+      for (int i = 0; i < num; i++) {
+        assertEquals(i, reader.readInt());
+      }
+      assertTrue(reader.endOfSlice());
+      pool.reset(true);
+    }
+  }
+  
+  public void testMultipleWriterReader() {
+    IntBlockPool pool = new IntBlockPool();
+    for (int j = 0; j < 2; j++) {
+      List<StartEndAndValues> holders = new ArrayList<TestIntBlockPool.StartEndAndValues>();
+      int num = atLeast(4);
+      for (int i = 0; i < num; i++) {
+        holders.add(new StartEndAndValues(random().nextInt(1000)));
+      }
+      IntBlockPool.SliceWriter writer = new IntBlockPool.SliceWriter(pool);
+      IntBlockPool.SliceReader reader = new IntBlockPool.SliceReader(pool);
+      
+      int numValuesToWrite = atLeast(10000);
+      for (int i = 0; i < numValuesToWrite; i++) {
+        StartEndAndValues values = holders
+            .get(random().nextInt(holders.size()));
+        if (values.valueCount == 0) {
+          values.start = writer.startNewSlice();
+        } else {
+          writer.reset(values.end);
+        }
+        writer.writeInt(values.nextValue());
+        values.end = writer.getCurrentOffset();
+        if (random().nextInt(5) == 0) {
+          // pick one and reader the ints
+          assertReader(reader, holders.get(random().nextInt(holders.size())));
+        }
+      }
+      
+      while (!holders.isEmpty()) {
+        StartEndAndValues values = holders.remove(random().nextInt(
+            holders.size()));
+        assertReader(reader, values);
+    }
+     pool.reset(true);
+    }
+  }
+
+  private void assertReader(IntBlockPool.SliceReader reader,
+      StartEndAndValues values) {
+    reader.reset(values.start, values.end);
+    for (int i = 0; i < values.valueCount; i++) {
+      assertEquals(values.valueOffset + i, reader.readInt());        
+    }
+    assertTrue(reader.endOfSlice());
+  }
+  
+  private static class StartEndAndValues {
+    int valueOffset;
+    int valueCount;
+    int start;
+    int end;
+    
+    public StartEndAndValues(int valueOffset) {
+      this.valueOffset = valueOffset;
+    }
+    
+    public int nextValue() {
+      return valueOffset + valueCount++;
+    }
+    
+  }
+  
+}
diff --git a/lucene/core/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java b/lucene/core/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java
index 504cba6..8d81f65 100644
--- a/lucene/core/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java
+++ b/lucene/core/src/test/org/apache/lucene/util/TestRecyclingByteBlockAllocator.java
@@ -39,7 +39,7 @@ public class TestRecyclingByteBlockAllocator extends LuceneTestCase {
 
   private RecyclingByteBlockAllocator newAllocator() {
     return new RecyclingByteBlockAllocator(1 << (2 + random().nextInt(15)),
-        random().nextInt(97), new AtomicLong());
+        random().nextInt(97), Counter.newCounter());
   }
 
   @Test
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java
index 4412738..21c0fd8 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java
@@ -44,6 +44,8 @@ import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.search.spans.SpanTermQuery;
 import org.apache.lucene.search.spans.Spans;
 import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ByteBlockPool;
+import org.apache.lucene.util.RecyclingByteBlockAllocator;
 
 /**
  * Class used to extract {@link WeightedSpanTerm}s from a {@link Query} based on whether 
@@ -53,7 +55,9 @@ public class WeightedSpanTermExtractor {
 
   private String fieldName;
   private TokenStream tokenStream;
-  private Map<String,AtomicReaderContext> readers = new HashMap<String,AtomicReaderContext>(10); 
+  private Map<String,AtomicReaderContext> readers = new HashMap<String,AtomicReaderContext>(10);
+  private List<MemoryIndex> createdMemIndices = new ArrayList<MemoryIndex>();
+  private ByteBlockPool.Allocator allocator = new RecyclingByteBlockAllocator(); //TODO make this configurable
   private String defaultField;
   private boolean expandMultiTermQuery;
   private boolean cachedTokenStream;
@@ -79,6 +83,10 @@ public class WeightedSpanTermExtractor {
         // alert?
       }
     }
+    
+    for (MemoryIndex memIdx : createdMemIndices) {
+      memIdx.reset(); // releases the memory to the pool
+    }
   }
 
   /**
@@ -345,12 +353,13 @@ public class WeightedSpanTermExtractor {
     }
     AtomicReaderContext context = readers.get(field);
     if (context == null) {
-      MemoryIndex indexer = new MemoryIndex();
+      MemoryIndex indexer = new MemoryIndex(false, this.allocator);
       indexer.addField(field, new OffsetLimitTokenFilter(tokenStream, maxDocCharsToAnalyze));
       tokenStream.reset();
       IndexSearcher searcher = indexer.createSearcher();
       // MEM index has only atomic ctx
       context = (AtomicReaderContext) searcher.getTopReaderContext();
+      createdMemIndices.add(indexer);
       readers.put(field, context);
     }
 
diff --git a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
index de10a04..d9455ec 100644
--- a/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
+++ b/lucene/memory/src/java/org/apache/lucene/index/memory/MemoryIndex.java
@@ -36,6 +36,9 @@ import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
 import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.IntBlockPool;
+import org.apache.lucene.index.IntBlockPool.SliceReader;
+import org.apache.lucene.index.IntBlockPool.SliceWriter;
 import org.apache.lucene.index.Norm;
 import org.apache.lucene.index.DocValues;
 import org.apache.lucene.index.DocsAndPositionsEnum;
@@ -58,7 +61,10 @@ import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.store.RAMDirectory; // for javadocs
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.ByteBlockPool;
 import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.BytesRefHash;
+import org.apache.lucene.util.BytesRefHash.DirectBytesStartArray;
 import org.apache.lucene.util.Constants; // for javadocs
 import org.apache.lucene.util.RamUsageEstimator;
 
@@ -191,14 +197,15 @@ public class MemoryIndex {
   /** fields sorted ascending by fieldName; lazily computed on demand */
   private transient Map.Entry<String,Info>[] sortedFields; 
   
-  /** pos: positions[3*i], startOffset: positions[3*i +1], endOffset: positions[3*i +2] */
-  private final int stride;
+  private final boolean storeOffsets;
   
-  /** Could be made configurable; */
-  private static final float docBoost = 1.0f;
-
   private static final boolean DEBUG = false;
 
+  private final ByteBlockPool byteBlockPool;
+  private final IntBlockPool intBlockPool;
+//  private final IntBlockPool.SliceReader postingsReader;
+  private final IntBlockPool.SliceWriter postingsWriter;
+  
   private HashMap<String,FieldInfo> fieldInfos = new HashMap<String,FieldInfo>();
   
   /**
@@ -233,8 +240,17 @@ public class MemoryIndex {
    *            whether or not to store the start and end character offset of
    *            each token term in the text
    */
-  protected MemoryIndex(boolean storeOffsets) {
-    this.stride = storeOffsets ? 3 : 1;
+  public MemoryIndex(boolean storeOffsets) {
+    this(storeOffsets, new ByteBlockPool.DirectAllocator());
+    //nocommit why was this protected
+    
+  }
+  
+  public MemoryIndex(boolean storeOffsets, ByteBlockPool.Allocator allocator) {
+    this.storeOffsets = storeOffsets;
+    byteBlockPool = new ByteBlockPool(allocator);
+    intBlockPool = new IntBlockPool(); // nocommit expose allocator and impl a recycling one
+    postingsWriter = new SliceWriter(intBlockPool);
   }
   
   /**
@@ -265,7 +281,7 @@ public class MemoryIndex {
       throw new RuntimeException(ex);
     }
 
-    addField(fieldName, stream);
+    addField(fieldName, stream, 1.0f, analyzer.getPositionIncrementGap(fieldName));
   }
   
   /**
@@ -319,7 +335,7 @@ public class MemoryIndex {
   public void addField(String fieldName, TokenStream stream) {
     addField(fieldName, stream, 1.0f);
   }
-
+  
   /**
    * Iterates over the given token stream and adds the resulting terms to the index;
    * Equivalent to adding a tokenized, indexed, termVectorStored, unstored,
@@ -333,9 +349,14 @@ public class MemoryIndex {
    *            the token stream to retrieve tokens from.
    * @param boost
    *            the boost factor for hits for this field
+   *  
    * @see org.apache.lucene.document.Field#setBoost(float)
    */
+  
   public void addField(String fieldName, TokenStream stream, float boost) {
+    addField(fieldName, stream, boost, 0);
+  }
+  public void addField(String fieldName, TokenStream stream, float boost, int positionIncrementGap) {
     try {
       if (fieldName == null)
         throw new IllegalArgumentException("fieldName must not be null");
@@ -343,24 +364,34 @@ public class MemoryIndex {
           throw new IllegalArgumentException("token stream must not be null");
       if (boost <= 0.0f)
           throw new IllegalArgumentException("boost factor must be greater than 0.0");
-      if (fields.get(fieldName) != null)
-        throw new IllegalArgumentException("field must not be added more than once");
-      
-      HashMap<BytesRef,ArrayIntList> terms = new HashMap<BytesRef,ArrayIntList>();
       int numTokens = 0;
       int numOverlapTokens = 0;
       int pos = -1;
+      final BytesRefHash terms;
+      final SliceByteStartArray sliceArray;
+      Info info = null;
+      if ((info = fields.get(fieldName)) != null) {
+        numTokens = info.numTokens;
+        numOverlapTokens = info.numOverlapTokens;
+        pos = info.lastPosition + positionIncrementGap;
+        terms = info.terms;
+        boost *= info.boost;
+        sliceArray = info.sliceArray;
+      } else {
+        sliceArray = new SliceByteStartArray(BytesRefHash.DEFAULT_CAPACITY);
+        terms = new BytesRefHash(byteBlockPool, BytesRefHash.DEFAULT_CAPACITY, sliceArray);
+      }
 
       if (!fieldInfos.containsKey(fieldName)) {
         fieldInfos.put(fieldName, 
             new FieldInfo(fieldName, true, fieldInfos.size(), false, false, false, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, null, null, null));
       }
-      
       TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class);
       PositionIncrementAttribute posIncrAttribute = stream.addAttribute(PositionIncrementAttribute.class);
       OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class);
       BytesRef ref = termAtt.getBytesRef();
       stream.reset();
+      
       while (stream.incrementToken()) {
         termAtt.fillBytesRef();
         if (ref.length == 0) continue; // nothing to do
@@ -370,24 +401,28 @@ public class MemoryIndex {
         if (posIncr == 0)
           numOverlapTokens++;
         pos += posIncr;
-        
-        ArrayIntList positions = terms.get(ref);
-        if (positions == null) { // term not seen before
-          positions = new ArrayIntList(stride);
-          terms.put(BytesRef.deepCopyOf(ref), positions);
+        int ord = terms.add(ref);
+        if (ord < 0) {
+          ord = (-ord) - 1;
+          postingsWriter.reset(sliceArray.end[ord]);
+        } else {
+          sliceArray.start[ord] = postingsWriter.startNewSlice();
         }
-        if (stride == 1) {
-          positions.add(pos);
+        sliceArray.freq[ord]++;
+        if (!storeOffsets) {
+          postingsWriter.writeInt(pos);
         } else {
-          positions.add(pos, offsetAtt.startOffset(), offsetAtt.endOffset());
+          postingsWriter.writeInt(pos);
+          postingsWriter.writeInt(offsetAtt.startOffset());
+          postingsWriter.writeInt(offsetAtt.endOffset());
         }
+        sliceArray.end[ord] = postingsWriter.getCurrentOffset();
       }
       stream.end();
 
       // ensure infos.numTokens > 0 invariant; needed for correct operation of terms()
       if (numTokens > 0) {
-        boost = boost * docBoost; // see DocumentWriter.addDocument(...)
-        fields.put(fieldName, new Info(terms, numTokens, numOverlapTokens, boost));
+        fields.put(fieldName, new Info(terms, sliceArray, numTokens, numOverlapTokens, boost, pos));
         sortedFields = null;    // invalidate sorted view, if any
       }
     } catch (IOException e) { // can never happen
@@ -484,10 +519,6 @@ public class MemoryIndex {
     return RamUsageEstimator.sizeOf(this);
   }
 
-  private int numPositions(ArrayIntList positions) {
-    return positions.size() / stride;
-  }
-  
   /** sorts into ascending order (on demand), reusing memory along the way */
   private void sortFields() {
     if (sortedFields == null) sortedFields = sort(fields);
@@ -519,31 +550,50 @@ public class MemoryIndex {
     sortFields();   
     int sumPositions = 0;
     int sumTerms = 0;
-    
+    final BytesRef spare = new BytesRef();
     for (int i=0; i < sortedFields.length; i++) {
       Map.Entry<String,Info> entry = sortedFields[i];
       String fieldName = entry.getKey();
       Info info = entry.getValue();
       info.sortTerms();
       result.append(fieldName + ":\n");
-      
+      SliceByteStartArray sliceArray = info.sliceArray;
       int numPositions = 0;
-      for (int j=0; j < info.sortedTerms.length; j++) {
-        Map.Entry<BytesRef,ArrayIntList> e = info.sortedTerms[j];
-        BytesRef term = e.getKey();
-        ArrayIntList positions = e.getValue();
-        result.append("\t'" + term + "':" + numPositions(positions) + ":");
-        result.append(positions.toString(stride)); // ignore offsets
+      SliceReader postingsReader = new SliceReader(intBlockPool);
+      for (int j=0; j < info.terms.size(); j++) {
+        int ord = info.sortedTerms[j];
+        info.terms.get(ord, spare);
+        int freq = sliceArray.freq[ord];
+        result.append("\t'" + spare + "':" + freq + ":");
+        postingsReader.reset(sliceArray.start[ord], sliceArray.end[ord]);
+        result.append(" [");
+        final int iters = storeOffsets ? 3 : 1; 
+        while(!postingsReader.endOfSlice()) {
+          result.append("(");
+          
+          for (int k = 0; k < iters; k++) {
+            result.append(postingsReader.readInt());
+            if (k < iters-1) {
+              result.append(", ");
+            }
+          }
+          result.append(")");
+          if (!postingsReader.endOfSlice()) {
+            result.append(",");
+          }
+          
+        }
+        result.append("]");
         result.append("\n");
-        numPositions += numPositions(positions);
+        numPositions += freq;
       }
       
-      result.append("\tterms=" + info.sortedTerms.length);
+      result.append("\tterms=" + info.terms.size());
       result.append(", positions=" + numPositions);
       result.append(", memory=" + RamUsageEstimator.humanReadableUnits(RamUsageEstimator.sizeOf(info)));
       result.append("\n");
       sumPositions += numPositions;
-      sumTerms += info.sortedTerms.length;
+      sumTerms += info.terms.size();
     }
     
     result.append("\nfields=" + sortedFields.length);
@@ -563,10 +613,12 @@ public class MemoryIndex {
      * Term strings and their positions for this field: Map <String
      * termText, ArrayIntList positions>
      */
-    private final HashMap<BytesRef,ArrayIntList> terms; 
+    private final BytesRefHash terms; 
+    
+    private final SliceByteStartArray sliceArray;
     
     /** Terms sorted ascending by term text; computed on demand */
-    private transient Map.Entry<BytesRef,ArrayIntList>[] sortedTerms;
+    private transient int[] sortedTerms;
     
     /** Number of added tokens for this field */
     private final int numTokens;
@@ -579,16 +631,21 @@ public class MemoryIndex {
 
     private final long sumTotalTermFreq;
 
-    public Info(HashMap<BytesRef,ArrayIntList> terms, int numTokens, int numOverlapTokens, float boost) {
+    /** the last position encountered in this field for multi field support*/
+    private int lastPosition;
+
+    public Info(BytesRefHash terms, SliceByteStartArray sliceArray, int numTokens, int numOverlapTokens, float boost, int lastPosition) {
       this.terms = terms;
+      this.sliceArray = sliceArray; 
       this.numTokens = numTokens;
       this.numOverlapTokens = numOverlapTokens;
       this.boost = boost;
       long sum = 0;
-      for(Map.Entry<BytesRef,ArrayIntList> ent : terms.entrySet()) {
-        sum += ent.getValue().size();
+      for (int i = 0; i < terms.size(); i++) {
+        sum+=sliceArray.freq[i];
       }
       sumTotalTermFreq = sum;
+      this.lastPosition = lastPosition;
     }
 
     public long getSumTotalTermFreq() {
@@ -604,83 +661,15 @@ public class MemoryIndex {
      * apart from more sophisticated Tries / prefix trees).
      */
     public void sortTerms() {
-      if (sortedTerms == null) sortedTerms = sort(terms);
+      if (sortedTerms == null) 
+        sortedTerms = terms.sort(BytesRef.getUTF8SortedAsUnicodeComparator());
     }
         
     public float getBoost() {
       return boost;
     }
-    
-  }
-  
-  
-  ///////////////////////////////////////////////////////////////////////////////
-  // Nested classes:
-  ///////////////////////////////////////////////////////////////////////////////
-  /**
-   * Efficient resizable auto-expanding list holding <code>int</code> elements;
-   * implemented with arrays.
-   */
-  private static final class ArrayIntList {
-
-    private int[] elements;
-    private int size = 0;
-      
-    public ArrayIntList(int initialCapacity) {
-      elements = new int[initialCapacity];
-    }
-
-    public void add(int elem) {
-      if (size == elements.length) ensureCapacity(size + 1);
-      elements[size++] = elem;
-    }
-
-    public void add(int pos, int start, int end) {
-      if (size + 3 > elements.length) ensureCapacity(size + 3);
-      elements[size] = pos;
-      elements[size+1] = start;
-      elements[size+2] = end;
-      size += 3;
-    }
-
-    public int get(int index) {
-      if (index >= size) throwIndex(index);
-      return elements[index];
-    }
-    
-    public int size() {
-      return size;
-    }
-    
-    private void ensureCapacity(int minCapacity) {
-      int newCapacity = Math.max(minCapacity, (elements.length * 3) / 2 + 1);
-      int[] newElements = new int[newCapacity];
-      System.arraycopy(elements, 0, newElements, 0, size);
-      elements = newElements;
-    }
-
-    private void throwIndex(int index) {
-      throw new IndexOutOfBoundsException("index: " + index
-            + ", size: " + size);
-    }
-    
-    /** returns the first few positions (without offsets); debug only */
-    public String toString(int stride) {
-      int s = size() / stride;
-      int len = Math.min(10, s); // avoid printing huge lists
-      StringBuilder buf = new StringBuilder(4*len);
-      buf.append("[");
-      for (int i = 0; i < len; i++) {
-        buf.append(get(i*stride));
-        if (i < len-1) buf.append(", ");
-      }
-      if (len != s) buf.append(", ..."); // and some more...
-      buf.append("]");
-      return buf.toString();
-    }   
   }
   
-  
   ///////////////////////////////////////////////////////////////////////////////
   // Nested classes:
   ///////////////////////////////////////////////////////////////////////////////
@@ -764,7 +753,7 @@ public class MemoryIndex {
 
             @Override
             public long size() {
-              return info.sortedTerms.length;
+              return info.terms.size();
             }
 
             @Override
@@ -775,17 +764,17 @@ public class MemoryIndex {
             @Override
             public long getSumDocFreq() {
               // each term has df=1
-              return info.sortedTerms.length;
+              return info.terms.size();
             }
 
             @Override
             public int getDocCount() {
-              return info.sortedTerms.length > 0 ? 1 : 0;
+              return info.terms.size() > 0 ? 1 : 0;
             }
 
             @Override
             public boolean hasOffsets() {
-              return stride == 3;
+              return storeOffsets;
             }
 
             @Override
@@ -822,48 +811,62 @@ public class MemoryIndex {
         this.info = info;
         info.sortTerms();
       }
+      
+      private final int binarySearch(BytesRef b, BytesRef bytesRef, int low,
+          int high, BytesRefHash hash, int[] ords, Comparator<BytesRef> comparator) {
+        int mid = 0;
+        while (low <= high) {
+          mid = (low + high) >>> 1;
+          hash.get(ords[mid], bytesRef);
+          final int cmp = comparator.compare(bytesRef, b);
+          if (cmp < 0) {
+            low = mid + 1;
+          } else if (cmp > 0) {
+            high = mid - 1;
+          } else {
+            return mid;
+          }
+        }
+        assert comparator.compare(bytesRef, b) != 0;
+        return -(low + 1);
+      }
+    
 
       @Override
       public boolean seekExact(BytesRef text, boolean useCache) {
-        termUpto = Arrays.binarySearch(info.sortedTerms, text, termComparator);
-        if (termUpto >= 0) {
-          br.copyBytes(info.sortedTerms[termUpto].getKey());
-          return true;
-        } else {
-          return false;
-        }
+        termUpto = binarySearch(text, br, 0, info.terms.size()-1, info.terms, info.sortedTerms, BytesRef.getUTF8SortedAsUnicodeComparator());
+        return termUpto >= 0;
       }
 
       @Override
       public SeekStatus seekCeil(BytesRef text, boolean useCache) {
-        termUpto = Arrays.binarySearch(info.sortedTerms, text, termComparator);
+        termUpto = binarySearch(text, br, 0, info.terms.size()-1, info.terms, info.sortedTerms, BytesRef.getUTF8SortedAsUnicodeComparator());
         if (termUpto < 0) { // not found; choose successor
-          termUpto = -termUpto -1;
-          if (termUpto >= info.sortedTerms.length) {
+          termUpto = -termUpto-1;
+          if (termUpto >= info.terms.size()) {
             return SeekStatus.END;
           } else {
-            br.copyBytes(info.sortedTerms[termUpto].getKey());
+            info.terms.get(info.sortedTerms[termUpto], br);
             return SeekStatus.NOT_FOUND;
           }
         } else {
-          br.copyBytes(info.sortedTerms[termUpto].getKey());
           return SeekStatus.FOUND;
         }
       }
 
       @Override
       public void seekExact(long ord) {
-        assert ord < info.sortedTerms.length;
+        assert ord < info.terms.size();
         termUpto = (int) ord;
       }
       
       @Override
       public BytesRef next() {
         termUpto++;
-        if (termUpto >= info.sortedTerms.length) {
+        if (termUpto >= info.terms.size()) {
           return null;
         } else {
-          br.copyBytes(info.sortedTerms[termUpto].getKey());
+          info.terms.get(info.sortedTerms[termUpto], br);
           return br;
         }
       }
@@ -885,7 +888,7 @@ public class MemoryIndex {
 
       @Override
       public long totalTermFreq() {
-        return info.sortedTerms[termUpto].getValue().size();
+        return info.sliceArray.freq[info.sortedTerms[termUpto]];
       }
 
       @Override
@@ -893,7 +896,7 @@ public class MemoryIndex {
         if (reuse == null || !(reuse instanceof MemoryDocsEnum)) {
           reuse = new MemoryDocsEnum();
         }
-        return ((MemoryDocsEnum) reuse).reset(liveDocs, info.sortedTerms[termUpto].getValue());
+        return ((MemoryDocsEnum) reuse).reset(liveDocs, info.sliceArray.freq[info.sortedTerms[termUpto]]);
       }
 
       @Override
@@ -901,7 +904,8 @@ public class MemoryIndex {
         if (reuse == null || !(reuse instanceof MemoryDocsAndPositionsEnum)) {
           reuse = new MemoryDocsAndPositionsEnum();
         }
-        return ((MemoryDocsAndPositionsEnum) reuse).reset(liveDocs, info.sortedTerms[termUpto].getValue());
+        final int ord = info.sortedTerms[termUpto];
+        return ((MemoryDocsAndPositionsEnum) reuse).reset(liveDocs, info.sliceArray.start[ord], info.sliceArray.end[ord], info.sliceArray.freq[ord]);
       }
 
       @Override
@@ -924,16 +928,16 @@ public class MemoryIndex {
     }
     
     private class MemoryDocsEnum extends DocsEnum {
-      private ArrayIntList positions;
       private boolean hasNext;
       private Bits liveDocs;
       private int doc = -1;
+      private int freq;
 
-      public DocsEnum reset(Bits liveDocs, ArrayIntList positions) {
+      public DocsEnum reset(Bits liveDocs, int freq) {
         this.liveDocs = liveDocs;
-        this.positions = positions;
         hasNext = true;
         doc = -1;
+        this.freq = freq;
         return this;
       }
 
@@ -959,26 +963,35 @@ public class MemoryIndex {
 
       @Override
       public int freq() throws IOException {
-        return positions.size();
+        return freq;
       }
     }
     
     private class MemoryDocsAndPositionsEnum extends DocsAndPositionsEnum {
-      private ArrayIntList positions;
       private int posUpto;
       private boolean hasNext;
       private Bits liveDocs;
       private int doc = -1;
+      private SliceReader sliceReader;
+      private int freq;
+      private int startOffset;
+      private int endOffset;
+      
+      public MemoryDocsAndPositionsEnum() {
+        this.sliceReader = new SliceReader(intBlockPool);
+      }
 
-      public DocsAndPositionsEnum reset(Bits liveDocs, ArrayIntList positions) {
+      public DocsAndPositionsEnum reset(Bits liveDocs, int start, int end, int freq) {
         this.liveDocs = liveDocs;
-        this.positions = positions;
+        this.sliceReader.reset(start, end);
         posUpto = 0;
         hasNext = true;
         doc = -1;
+        this.freq = freq;
         return this;
       }
 
+
       @Override
       public int docID() {
         return doc;
@@ -1001,22 +1014,30 @@ public class MemoryIndex {
 
       @Override
       public int freq() throws IOException {
-        return positions.size() / stride;
+        return freq;
       }
 
       @Override
       public int nextPosition() {
-        return positions.get(posUpto++ * stride);
+        assert !sliceReader.endOfSlice() : " stores offsets : " + startOffset;
+        if (storeOffsets) {
+          int pos = sliceReader.readInt();
+          startOffset = sliceReader.readInt();
+          endOffset = sliceReader.readInt();
+          return pos;
+        } else {
+          return sliceReader.readInt();
+        }
       }
 
       @Override
       public int startOffset() {
-        return stride == 1 ? -1 : positions.get((posUpto - 1) * stride + 1);
+        return startOffset;
       }
 
       @Override
       public int endOffset() {
-        return stride == 1 ? -1 : positions.get((posUpto - 1) * stride + 2);
+        return endOffset;
       }
 
       @Override
@@ -1105,4 +1126,58 @@ public class MemoryIndex {
       return norms;
     }
   }
+  
+  /**
+   * Resets the {@link MemoryIndex} to its initial state and recycles all internal buffers.
+   */
+  public void reset() {
+    this.fieldInfos.clear();
+    this.fields.clear();
+    this.sortedFields = null;
+    byteBlockPool.dropBuffersAndReset();
+    intBlockPool.reset(true);
+  }
+  
+  private static final class SliceByteStartArray extends DirectBytesStartArray {
+    int[] start; // nocommit maybe we can safe the end array and just check freq - need to change the SliceReader for this
+    int[] end;
+    int[] freq;
+    
+    public SliceByteStartArray(int initSize) {
+      super(initSize);
+    }
+    
+    @Override
+    public int[] init() {
+      final int[] ord = super.init();
+      start = new int[ArrayUtil.oversize(ord.length, RamUsageEstimator.NUM_BYTES_INT)];
+      end = new int[ArrayUtil.oversize(ord.length, RamUsageEstimator.NUM_BYTES_INT)];
+      freq = new int[ArrayUtil.oversize(ord.length, RamUsageEstimator.NUM_BYTES_INT)];
+      assert start.length >= ord.length;
+      assert end.length >= ord.length;
+      assert freq.length >= ord.length;
+      return ord;
+    }
+
+    @Override
+    public int[] grow() {
+      final int[] ord = super.grow();
+      if (start.length < ord.length) {
+        start = ArrayUtil.grow(start, ord.length);
+        end = ArrayUtil.grow(end, ord.length);
+        freq = ArrayUtil.grow(freq, ord.length);
+      }      
+      assert start.length >= ord.length;
+      assert end.length >= ord.length;
+      assert freq.length >= ord.length;
+      return ord;
+    }
+
+    @Override
+    public int[] clear() {
+     start = end = null;
+     return super.clear();
+    }
+    
+  }
 }
diff --git a/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java b/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
index 77dcedf..e7a5052 100644
--- a/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
+++ b/lucene/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
@@ -45,6 +45,7 @@ import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.queryparser.classic.QueryParser;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.PhraseQuery;
 import org.apache.lucene.search.RegexpQuery;
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
@@ -52,8 +53,11 @@ import org.apache.lucene.search.spans.SpanOrQuery;
 import org.apache.lucene.search.spans.SpanQuery;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.RAMDirectory;
+import org.apache.lucene.util.ByteBlockPool.Allocator;
+import org.apache.lucene.util.ByteBlockPool;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.RamUsageEstimator;
+import org.apache.lucene.util.RecyclingByteBlockAllocator;
 import org.apache.lucene.util._TestUtil;
 
 /**
@@ -94,15 +98,18 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
    * runs random tests, up to ITERATIONS times.
    */
   public void testRandomQueries() throws Exception {
-    for (int i = 0; i < ITERATIONS; i++)
-      assertAgainstRAMDirectory();
+    MemoryIndex index =  new MemoryIndex(random().nextBoolean(), randomByteBlockAllocator());
+    for (int i = 0; i < ITERATIONS; i++) {
+      assertAgainstRAMDirectory(index);
+    }
   }
-
+  
   /**
    * Build a randomish document for both RAMDirectory and MemoryIndex,
    * and run all the queries against it.
    */
-  public void assertAgainstRAMDirectory() throws Exception {
+  public void assertAgainstRAMDirectory(MemoryIndex memory) throws Exception {
+    memory.reset();
     StringBuilder fooField = new StringBuilder();
     StringBuilder termField = new StringBuilder();
  
@@ -132,7 +139,6 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
     writer.addDocument(doc);
     writer.close();
     
-    MemoryIndex memory = new MemoryIndex();
     memory.addField("foo", fooField.toString(), analyzer);
     memory.addField("term", termField.toString(), analyzer);
     
@@ -160,7 +166,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
     for (String query : queries) {
       TopDocs ramDocs = ram.search(qp.parse(query), 1);
       TopDocs memDocs = mem.search(qp.parse(query), 1);
-      assertEquals(ramDocs.totalHits, memDocs.totalHits);
+      assertEquals(query, ramDocs.totalHits, memDocs.totalHits);
     }
     reader.close();
   }
@@ -202,7 +208,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
   
   public void testDocsEnumStart() throws Exception {
     Analyzer analyzer = new MockAnalyzer(random());
-    MemoryIndex memory = new MemoryIndex();
+    MemoryIndex memory = new MemoryIndex(random().nextBoolean(), randomByteBlockAllocator());
     memory.addField("foo", "bar", analyzer);
     AtomicReader reader = (AtomicReader) memory.createSearcher().getIndexReader();
     DocsEnum disi = _TestUtil.docs(random(), reader, "foo", new BytesRef("bar"), null, null, 0);
@@ -220,27 +226,39 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
     reader.close();
   }
   
+  private Allocator randomByteBlockAllocator() {
+    if (random().nextBoolean()) {
+      return new RecyclingByteBlockAllocator();
+    } else {
+      return new ByteBlockPool.DirectAllocator();
+    }
+  }
+  
   public void testDocsAndPositionsEnumStart() throws Exception {
     Analyzer analyzer = new MockAnalyzer(random());
-    MemoryIndex memory = new MemoryIndex(true);
-    memory.addField("foo", "bar", analyzer);
-    AtomicReader reader = (AtomicReader) memory.createSearcher().getIndexReader();
-    DocsAndPositionsEnum disi = reader.termPositionsEnum(new Term("foo", "bar"));
-    int docid = disi.docID();
-    assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS);
-    assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
-    assertEquals(0, disi.nextPosition());
-    assertEquals(0, disi.startOffset());
-    assertEquals(3, disi.endOffset());
-    
-    // now reuse and check again
-    TermsEnum te = reader.terms("foo").iterator(null);
-    assertTrue(te.seekExact(new BytesRef("bar"), true));
-    disi = te.docsAndPositions(null, disi);
-    docid = disi.docID();
-    assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS);
-    assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
-    reader.close();
+    int numIters = atLeast(3);
+    MemoryIndex memory = new MemoryIndex(true, randomByteBlockAllocator());
+    for (int i = 0; i < numIters; i++) { // check reuse
+      memory.addField("foo", "bar", analyzer);
+      AtomicReader reader = (AtomicReader) memory.createSearcher().getIndexReader();
+      DocsAndPositionsEnum disi = reader.termPositionsEnum(new Term("foo", "bar"));
+      int docid = disi.docID();
+      assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS);
+      assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+      assertEquals(0, disi.nextPosition());
+      assertEquals(0, disi.startOffset());
+      assertEquals(3, disi.endOffset());
+      
+      // now reuse and check again
+      TermsEnum te = reader.terms("foo").iterator(null);
+      assertTrue(te.seekExact(new BytesRef("bar"), true));
+      disi = te.docsAndPositions(null, disi);
+      docid = disi.docID();
+      assertTrue(docid == -1 || docid == DocIdSetIterator.NO_MORE_DOCS);
+      assertTrue(disi.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
+      reader.close();
+      memory.reset();
+    }
   }
 
   // LUCENE-3831
@@ -248,7 +266,7 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
     RegexpQuery regex = new RegexpQuery(new Term("field", "worl."));
     SpanQuery wrappedquery = new SpanMultiTermQueryWrapper<RegexpQuery>(regex);
         
-    MemoryIndex mindex = new MemoryIndex();
+    MemoryIndex mindex = new MemoryIndex(random().nextBoolean(), randomByteBlockAllocator());
     mindex.addField("field", new MockAnalyzer(random()).tokenStream("field", new StringReader("hello there")));
 
     // This throws an NPE
@@ -260,10 +278,28 @@ public class MemoryIndexTest extends BaseTokenStreamTestCase {
     RegexpQuery regex = new RegexpQuery(new Term("field", "worl."));
     SpanQuery wrappedquery = new SpanOrQuery(new SpanMultiTermQueryWrapper<RegexpQuery>(regex));
 
-    MemoryIndex mindex = new MemoryIndex();
+    MemoryIndex mindex = new MemoryIndex(random().nextBoolean(), randomByteBlockAllocator());
     mindex.addField("field", new MockAnalyzer(random()).tokenStream("field", new StringReader("hello there")));
 
     // This passes though
     assertEquals(0, mindex.search(wrappedquery), 0.00001f);
   }
+  
+  public void testSameFieldAddedMultipleTimes() throws IOException {
+    MemoryIndex mindex = new MemoryIndex(random().nextBoolean(), randomByteBlockAllocator());
+    MockAnalyzer mockAnalyzer = new MockAnalyzer(random());
+    mindex.addField("field", "the quick brown fox", mockAnalyzer);
+    mindex.addField("field", "jumps over the", mockAnalyzer);
+    PhraseQuery query = new PhraseQuery();
+    query.add(new Term("field", "fox"));
+    query.add(new Term("field", "jumps"));
+    assertTrue(mindex.search(query) > 0.1);
+    mindex.reset();
+    mockAnalyzer.setPositionIncrementGap(1 + random().nextInt(10));
+    mindex.addField("field", "the quick brown fox", mockAnalyzer);
+    mindex.addField("field", "jumps over the", mockAnalyzer);
+    assertEquals(0, mindex.search(query), 0.00001f);
+    query.setSlop(10);
+    assertTrue("posGap" + mockAnalyzer.getPositionIncrementGap("field") , mindex.search(query) > 0.0001);
+  }
 }
