Index: src/test/org/apache/lucene/index/TestRAMReader.java
===================================================================
--- src/test/org/apache/lucene/index/TestRAMReader.java	(revision 0)
+++ src/test/org/apache/lucene/index/TestRAMReader.java	(revision 0)
@@ -0,0 +1,229 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.RAMReaderManager.RAMReader;
+import org.apache.lucene.index.TermsEnum.SeekStatus;
+import org.apache.lucene.index.TestPayloads.PayloadAnalyzer;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestRAMReader extends LuceneTestCase {
+  static String field1 = "text";
+  
+  static Document createDoc1(int id) {
+    Document doc = new Document();
+    doc.add(new Field("id", id+"", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(new Field(field1, "motorola hp", Field.Store.YES,
+        Field.Index.ANALYZED));
+    return doc;
+  }
+  
+  static Document createDoc2(int id) {
+    Document doc = new Document();
+    doc.add(new Field("id", id+"", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(new Field(field1, "apple beos", Field.Store.YES,
+        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    return doc;
+  }
+  
+  static Document createDoc3(int id) {
+    Document doc = new Document();
+    doc.add(new Field("id", id+"", Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(new Field(field1, "ipad iphone", Field.Store.YES,
+        Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
+    return doc;
+  }
+  
+  public void test() throws Exception {
+    AtomicInteger seq = new AtomicInteger(0);
+    
+    Directory directory = new MockRAMDirectory();
+    PayloadAnalyzer payloadAnalyzer = new PayloadAnalyzer();
+    byte[] payloadBytes = new byte[] {1, 2, 89};
+    payloadAnalyzer.setPayloadData(field1, payloadBytes, 0, payloadBytes.length);
+    IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT,
+        payloadAnalyzer);
+    iwc.setRAMBufferSizeMB(32.0);
+    IndexWriter writer = new IndexWriter(directory, iwc);
+    for (int i = 0; i < 5; i++) {
+      writer.addDocument(createDoc1(seq.getAndIncrement()));
+      writer.addDocument(createDoc2(seq.getAndIncrement()));
+    }
+    RAMReader[] ramReaders = writer.getRAMReaders();
+    RAMReader reader = ramReaders[0];
+    
+    Terms terms = reader.terms(field1);
+    String[] conTermsArr = toArray(terms.iterator());
+    assertTrue(Arrays.equals(conTermsArr, new String[] {"apple", "beos", "hp",
+        "motorola"}));
+    
+    // test docfreq and seeking
+    Terms cterms = reader.terms(field1);    
+    TermsEnum ctermsEnum = cterms.iterator();
+    SeekStatus ss = ctermsEnum.seek(new BytesRef("beos"), false);
+    assertEquals(ss, SeekStatus.FOUND);
+    int docFreq = ctermsEnum.docFreq();
+    assertEquals(5, docFreq);
+    
+    // assert postings are correct
+    DocsEnum docsEnum = ctermsEnum.docs(null, null);
+    int[] docs = toArray(docsEnum);
+    assertTrue(Arrays.equals(docs, new int[] {1, 3, 5, 7, 9}));
+    
+    // add more docs
+    for (int i = 0; i < 6; i++) {
+      writer.addDocument(createDoc3(seq.getAndIncrement()));
+    }
+    
+    Term delTerm1 = new Term("id", "2");
+    writer.getDocWriter().addDeleteTerm(delTerm1);
+    
+    RAMReader reader2 = writer.getRAMReaders()[0];
+    
+    // assert the document is shown as deleted only
+    // in reader2
+    assertFalse(reader.getDeletedDocs().get(2));
+    assertTrue(reader2.getDeletedDocs().get(2));
+    
+    Term motTerm = new Term(field1, "motorola");
+    
+    int[] delTerm1Docs1 = toDocsArray(motTerm, reader.getDeletedDocs(), reader);
+    System.out.println("delterms1:"+Arrays.toString(delTerm1Docs1));
+    assertEquals(5, delTerm1Docs1.length);
+    int[] delTerm1Docs12 = toDocsArray(motTerm, null, reader);
+    
+    assertTrue(Arrays.equals(delTerm1Docs1, delTerm1Docs12));
+    
+    int[] delTerm1Docs2 = toDocsArray(motTerm, reader2.getDeletedDocs(), reader2);
+    
+    //System.out.println("delterms12:"+Arrays.toString(delTerm1Docs12));
+    //System.out.println("delterms2:"+Arrays.toString(delTerm1Docs2));
+    
+    assertTrue(arrayContains(2, delTerm1Docs1));
+    assertTrue(arrayContains(2, delTerm1Docs12));
+    assertFalse(arrayContains(2, delTerm1Docs2));
+    
+    // assert the new terms are present
+    Terms terms2 = reader2.terms(field1);
+    String[] conTermsArr2 = toArray(terms2.iterator());
+    assertTrue(Arrays.equals(conTermsArr2, new String[] {"apple", "beos", "hp",
+        "ipad", "iphone", "motorola"}));
+    
+    // assert that the previous reader is returning the same terms
+    terms = reader.terms(field1);
+    conTermsArr = toArray(terms.iterator());
+    assertTrue(Arrays.equals(conTermsArr, new String[] {"apple", "beos", "hp",
+        "motorola"}));
+    
+    // assert that seeking that a new term isn't found
+    // in the first reader
+    ss = ctermsEnum.seek(new BytesRef("iphone"), false);
+    assertEquals(ss, SeekStatus.NOT_FOUND);
+    
+    // test reading a document
+    assertEquals("apple beos", ramReaders[0].document(3).getField("text").stringValue());
+
+    // delete by query
+    Query dq = new TermQuery(new Term(field1, "apple"));
+    writer.getDocWriter().addDeleteQuery(dq);
+    RAMReader reader3 = writer.getRAMReaders()[0];
+    int[] appleDelDocs = toDocsArray(new Term(field1, "apple"), reader3.getDeletedDocs(), reader3);
+    assertEquals(0, appleDelDocs.length);
+    //System.out.println("appleDelDocs:"+Arrays.toString(appleDelDocs));
+    
+    TermFreqVector tfv = ramReaders[0].getTermFreqVector(1, field1);
+    BytesRef[] tfvTerms = tfv.getTerms();
+    assertTrue(terms.getComparator().compare(new BytesRef("apple"), tfvTerms[0]) == 0);
+    
+    // test the term freq and postings length matches
+    // later we can check per indexreader to insure we're not going over the max doc id
+    
+    // assert that payloads work
+    ss = ctermsEnum.seek(new BytesRef("apple"), false);
+    DocsAndPositionsEnum docsPos = ctermsEnum.docsAndPositions(null, null);
+    PayloadAttribute payload = null;
+    BytesRef payloadMatch = new BytesRef(payloadBytes);
+    while (docsPos.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+      int docID = docsPos.docID();
+      assertEquals(1, docsPos.freq());
+      int pos = docsPos.nextPosition();
+      assertEquals(0, pos);
+      assertTrue(docsPos.hasPayload());
+      assertEquals(new BytesRef(payloadBytes), docsPos.getPayload());
+    }
+    reader.close();
+    reader2.close();
+    reader3.close();
+    
+    writer.close();
+    directory.close();
+  }
+  
+  private static boolean arrayContains(int v, int[] arr) {
+    for (int x=0; x < arr.length; x++) {
+      if (arr[x] == v) return true;
+    }
+    return false;
+  }
+  
+  public static int[] toDocsArray(Term term, Bits bits, RAMReader reader) throws IOException {
+    Terms cterms = reader.terms(term.field);    
+    TermsEnum ctermsEnum = cterms.iterator();
+    SeekStatus ss = ctermsEnum.seek(new BytesRef(term.text()), false);
+    if (ss.equals(SeekStatus.FOUND)) {
+      DocsEnum docsEnum = ctermsEnum.docs(bits, null);
+      return toArray(docsEnum);
+    }
+    return null;
+  }
+  
+  public static int[] toArray(DocsEnum docsEnum)
+      throws IOException {
+    List<Integer> docs = new ArrayList<Integer>();
+    while (docsEnum.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+      int docID = docsEnum.docID();
+      docs.add(docID);
+    }
+    return ArrayUtil.toIntArray(docs);
+  }
+  
+  public static String[] toArray(TermsEnum termsEnum) throws IOException {
+    BytesRef term = null;
+    List<String> list = new ArrayList<String>();
+    while ((term = termsEnum.next()) != null) {
+      list.add(term.utf8ToString());
+    }
+    return (String[]) list.toArray(new String[0]);
+  }
+}
Index: src/test/org/apache/lucene/index/TestByteSlices.java
===================================================================
--- src/test/org/apache/lucene/index/TestByteSlices.java	(revision 1021235)
+++ src/test/org/apache/lucene/index/TestByteSlices.java	(working copy)
@@ -21,7 +21,7 @@
 
 public class TestByteSlices extends LuceneTestCase {
 
-  private static class ByteBlockAllocator extends ByteBlockPool.Allocator {
+  public static class ByteBlockAllocator extends ByteBlockPool.Allocator {
     ArrayList<byte[]> freeByteBlocks = new ArrayList<byte[]>();
     
     /* Allocate another byte[] from the shared pool */
@@ -80,7 +80,7 @@
           System.out.println("write stream=" + stream);
 
         if (starts[stream] == -1) {
-          final int spot = pool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
+          final int spot = pool.newSliceByLevel(0);
           starts[stream] = uptos[stream] = spot + pool.byteOffset;
           if (VERBOSE)
             System.out.println("  init to " + starts[stream]);
Index: src/test/org/apache/lucene/index/TestPayloads.java
===================================================================
--- src/test/org/apache/lucene/index/TestPayloads.java	(revision 1021235)
+++ src/test/org/apache/lucene/index/TestPayloads.java	(working copy)
@@ -406,7 +406,7 @@
     /**
      * This Analyzer uses an WhitespaceTokenizer and PayloadFilter.
      */
-    private static class PayloadAnalyzer extends Analyzer {
+    public static final class PayloadAnalyzer extends Analyzer {
         Map<String,PayloadData> fieldToData = new HashMap<String,PayloadData>();
         
         void setPayloadData(String field, byte[] data, int offset, int length) {
Index: src/java/org/apache/lucene/index/DocsAndPositionsEnum.java
===================================================================
--- src/java/org/apache/lucene/index/DocsAndPositionsEnum.java	(revision 1021235)
+++ src/java/org/apache/lucene/index/DocsAndPositionsEnum.java	(working copy)
@@ -37,7 +37,7 @@
   public abstract boolean hasPayload();
 
   @Override
-  public final int read() {
+  public int read() throws IOException {
     throw new UnsupportedOperationException();
   }
 
Index: src/java/org/apache/lucene/index/ByteBlockPool.java
===================================================================
--- src/java/org/apache/lucene/index/ByteBlockPool.java	(revision 1021235)
+++ src/java/org/apache/lucene/index/ByteBlockPool.java	(working copy)
@@ -39,7 +39,7 @@
 import static org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_REF;
 import org.apache.lucene.util.ArrayUtil;
 
-final class ByteBlockPool {
+final class ByteBlockPool extends ByteBlockRead {
 
   abstract static class Allocator {
     abstract void recycleByteBlocks(byte[][] blocks, int start, int end);
@@ -47,17 +47,18 @@
     abstract byte[] getByteBlock();
   }
 
-  public byte[][] buffers = new byte[10][];
-
   int bufferUpto = -1;                        // Which buffer we are upto
   public int byteUpto = DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE;             // Where we are in head buffer
 
+  public int byteSliceUpto = DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE;
+  
   public byte[] buffer;                              // Current head buffer
   public int byteOffset = -DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE;          // Current head offset
 
   private final Allocator allocator;
 
   public ByteBlockPool(Allocator allocator) {
+    super(new byte[10][]);
     this.allocator = allocator;
   }
 
@@ -80,6 +81,7 @@
       bufferUpto = 0;
       byteUpto = 0;
       byteOffset = 0;
+      byteSliceUpto = 0;
       buffer = buffers[0];
     }
   }
@@ -95,15 +97,21 @@
     bufferUpto++;
 
     byteUpto = 0;
+    byteSliceUpto = 0;
     byteOffset += DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE;
   }
 
-  public int newSlice(final int size) {
+  public int newSliceByLevel(final int level) {
+    int size = levelSizeArray[level];
     if (byteUpto > DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE-size)
       nextBuffer();
     final int upto = byteUpto;
+    
     byteUpto += size;
-    buffer[byteUpto-1] = 16;
+    
+    byteSliceUpto = byteUpto - 3;
+    
+    buffer[byteSliceUpto-1] = 16;
     return upto;
   }
 
@@ -113,9 +121,9 @@
   // array is the length of each slice, ie first slice is 5
   // bytes, next slice is 14 bytes, etc.
   final static int[] nextLevelArray = {1, 2, 3, 4, 5, 6, 7, 8, 9, 9};
-  final static int[] levelSizeArray = {5, 14, 20, 30, 40, 40, 80, 80, 120, 200};
+  final static int[] levelSizeArray = {9, 14, 20, 30, 40, 40, 80, 80, 120, 200};
   final static int FIRST_LEVEL_SIZE = levelSizeArray[0];
-
+  
   public int allocSlice(final byte[] slice, final int upto) {
 
     final int level = slice[upto] & 15;
@@ -127,43 +135,21 @@
       nextBuffer();
 
     final int newUpto = byteUpto;
-    final int offset = newUpto + byteOffset;
+    final int offset = byteUpto + byteOffset;
+    
     byteUpto += newSize;
-
-    // Copy forward the past 3 bytes (which we are about
-    // to overwrite with the forwarding address):
-    buffer[newUpto] = slice[upto-3];
-    buffer[newUpto+1] = slice[upto-2];
-    buffer[newUpto+2] = slice[upto-1];
-
-    // Write forwarding address at end of last slice:
-    slice[upto-3] = (byte) (offset >>> 24);
-    slice[upto-2] = (byte) (offset >>> 16);
-    slice[upto-1] = (byte) (offset >>> 8);
-    slice[upto] = (byte) offset;
-        
+    
+    slice[upto] = (byte) (offset >>> 24);
+    slice[upto+1] = (byte) (offset >>> 16);
+    slice[upto+2] = (byte) (offset >>> 8);
+    slice[upto+3] = (byte) offset;
+    
+    byteSliceUpto = byteUpto - 3;
+    
     // Write new level:
-    buffer[byteUpto-1] = (byte) (16|newLevel);
+    buffer[byteSliceUpto-1] = (byte) (16|newLevel);
 
-    return newUpto+3;
+    return newUpto;
   }
-
-  // Fill in a BytesRef from term's length & bytes encoded in
-  // byte block
-  final BytesRef setBytesRef(BytesRef term, int textStart) {
-    final byte[] bytes = term.bytes = buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
-    int pos = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
-    if ((bytes[pos] & 0x80) == 0) {
-      // length is 1 byte
-      term.length = bytes[pos];
-      term.offset = pos+1;
-    } else {
-      // length is 2 bytes
-      term.length = (bytes[pos]&0x7f) + ((bytes[pos+1]&0xff)<<7);
-      term.offset = pos+2;
-    }
-    assert term.length >= 0;
-    return term;
-  }
 }
 
Index: src/java/org/apache/lucene/index/DocumentsWriterThreadPool.java
===================================================================
--- src/java/org/apache/lucene/index/DocumentsWriterThreadPool.java	(revision 1021235)
+++ src/java/org/apache/lucene/index/DocumentsWriterThreadPool.java	(working copy)
@@ -73,6 +73,19 @@
     this.maxNumThreadStates = (maxNumThreadStates < 1) ? IndexWriterConfig.DEFAULT_MAX_THREAD_STATES : maxNumThreadStates;
   }
   
+  DocumentsWriterPerThread[] getDocumentsWriterPerThreads() {
+    lock.lock();
+    try {
+      DocumentsWriterPerThread[] dwpts = new DocumentsWriterPerThread[allThreadStates.length];
+      for (int x=0; x < allThreadStates.length; x++) {
+        dwpts[x] = allThreadStates[x].perThread;
+      }
+      return dwpts;
+    } finally {
+      lock.unlock();
+    }
+  }
+  
   public final int getMaxThreadStates() {
     return this.maxNumThreadStates;
   }
Index: src/java/org/apache/lucene/index/FieldInfos.java
===================================================================
--- src/java/org/apache/lucene/index/FieldInfos.java	(revision 1021235)
+++ src/java/org/apache/lucene/index/FieldInfos.java	(working copy)
@@ -97,6 +97,12 @@
               field.isStoreOffsetWithTermVector(), field.getOmitNorms(), false, field.getOmitTermFreqAndPositions());
     }
   }
+  
+  void addFieldInfo(FieldInfo fi) {
+    String name = StringHelper.intern(fi.name);
+    byNumber.add(fi);
+    byName.put(name, fi);
+  }
 
   /** Returns true if any fields do not omitTermFreqAndPositions */
   public boolean hasProx() {
Index: src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
===================================================================
--- src/java/org/apache/lucene/index/DocumentsWriterPerThread.java	(revision 1021235)
+++ src/java/org/apache/lucene/index/DocumentsWriterPerThread.java	(working copy)
@@ -19,14 +19,19 @@
 
 import java.io.IOException;
 import java.io.PrintStream;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.index.ConcurrentTermsDictPerField.ReaderContextPerField;
+import org.apache.lucene.index.RAMReaderManager.RAMReader;
 import org.apache.lucene.index.codecs.Codec;
 import org.apache.lucene.search.Similarity;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FilterDirectory;
-import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.util.ArrayUtil;
 
 public class DocumentsWriterPerThread {
@@ -142,11 +147,14 @@
   SegmentWriteState flushState;
 
   long[] sequenceIDs = new long[8];
-  
+  long[] deletedSequenceIDs = new long[8];
   long numBytesUsed;
+  RAMReaderManager ramBufferReaders;
+  ReentrantLock writeLock = new ReentrantLock();
   
   public DocumentsWriterPerThread(Directory directory, DocumentsWriter parent, IndexingChain indexingChain) {
     parent.indexWriter.testPoint("DocumentsWriterPerThread.init start");
+    Arrays.fill(deletedSequenceIDs, Integer.MAX_VALUE);
     this.directory = directory;
     this.parent = parent;
     this.writer = parent.indexWriter;
@@ -159,8 +167,74 @@
     if (consumer instanceof DocFieldProcessor) {
       docFieldProcessor = (DocFieldProcessor) consumer;
     }
+    ramBufferReaders = new RAMReaderManager(this);
   }
   
+  StoredFieldsWriter getStoredFieldsWriter() {
+    return docFieldProcessor.fieldsWriter;
+  }
+  
+  TermsHash getTermsHash() {
+    DocInverter docInverter = (DocInverter)docFieldProcessor.consumer;
+    TermsHash termsHash = (TermsHash)docInverter.consumer;
+    return termsHash;
+  }
+  
+  TermVectorsTermsWriter getTermVectorsTermsWriter() {
+    TermsHash termsHash = getTermsHash();
+    return (TermVectorsTermsWriter)termsHash.nextTermsHash.consumer;
+  }
+  
+  void flushTermVectors() throws IOException {
+    getTermVectorsTermsWriter().flush();
+  }
+  
+  void flushStoredFields() throws IOException {
+    getStoredFieldsWriter().fieldsWriter.flush();
+  }
+  
+  FieldInfos getFieldInfos() {
+    Collection<DocFieldConsumerPerField> docFields = docFieldProcessor.fields();
+    FieldInfos fieldInfos = new FieldInfos();
+    for (DocFieldConsumerPerField dfcpf : docFields) {
+      fieldInfos.addFieldInfo(dfcpf.getFieldInfo());
+    }
+    return fieldInfos;
+  }
+  
+  public long getMaxSequenceID() {
+    return sequenceIDs[numDocsInRAM - 1];
+  }
+  
+  public Map<String,TermsHashPerField> getTermsHashPerFields() {
+    return docFieldProcessor.getTermsHashPerFields();
+  }
+  
+  void close() throws IOException {
+    ramBufferReaders.decRef();
+  }
+  
+  RAMReader getReader() throws IOException {
+    writeLock.lock();
+    try {
+      return ramBufferReaders.getReader();
+    } finally {
+      writeLock.unlock();
+    }
+  }
+  
+  Map<String,ReaderContextPerField> getReaderContextPerFields() {
+    long maxSeqID = getMaxSequenceID();
+    Map<String,ReaderContextPerField> map = new HashMap<String,ReaderContextPerField>();
+    for (Map.Entry<String,TermsHashPerField> entry : getTermsHashPerFields().entrySet()) {
+      String field = entry.getKey();
+      TermsHashPerField thpf = entry.getValue();
+      ReaderContextPerField context = thpf.getReaderContext(numDocsInRAM, maxSeqID);
+      map.put(field, context);
+    }
+    return map;
+  }
+  
   void setAborting() {
     aborting = true;
   }
@@ -205,6 +279,7 @@
   public void commitDocument(long sequenceID) {
     if (numDocsInRAM == sequenceIDs.length) {
       sequenceIDs = ArrayUtil.grow(sequenceIDs);
+      deletedSequenceIDs = ArrayUtil.grow(deletedSequenceIDs, deletedSequenceIDs.length+1, Integer.MAX_VALUE);
     }
     
     sequenceIDs[numDocsInRAM] = sequenceID;
@@ -273,7 +348,7 @@
 
       flushedDocCount += flushState.numDocs;
 
-      long maxSequenceID = sequenceIDs[numDocsInRAM-1];
+      long maxSequenceID = getMaxSequenceID();
       doAfterFlush();
       
       // Create new SegmentInfo, but do not add to our
Index: src/java/org/apache/lucene/index/ByteSliceReader.java
===================================================================
--- src/java/org/apache/lucene/index/ByteSliceReader.java	(revision 1021235)
+++ src/java/org/apache/lucene/index/ByteSliceReader.java	(working copy)
@@ -28,7 +28,7 @@
  * point we read the forwarding address of the next slice
  * and then jump to it.*/
 final class ByteSliceReader extends DataInput {
-  ByteBlockPool pool;
+  ByteBlockRead byteBlocks;
   int bufferUpto;
   byte[] buffer;
   public int upto;
@@ -38,19 +38,19 @@
 
   public int endIndex;
 
-  public void init(ByteBlockPool pool, int startIndex, int endIndex) {
+  public void init(ByteBlockRead byteBlocks, int startIndex, int endIndex) {
 
     assert endIndex-startIndex >= 0;
     assert startIndex >= 0;
     assert endIndex >= 0;
 
-    this.pool = pool;
+    this.byteBlocks = byteBlocks;
     this.endIndex = endIndex;
 
     level = 0;
     bufferUpto = startIndex / DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE;
     bufferOffset = bufferUpto * DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE;
-    buffer = pool.buffers[bufferUpto];
+    buffer = byteBlocks.buffers[bufferUpto];
     upto = startIndex & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
 
     final int firstSize = ByteBlockPool.levelSizeArray[0];
@@ -105,7 +105,7 @@
     bufferUpto = nextIndex / DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE;
     bufferOffset = bufferUpto * DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE;
 
-    buffer = pool.buffers[bufferUpto];
+    buffer = byteBlocks.buffers[bufferUpto];
     upto = nextIndex & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
 
     if (nextIndex + newSize >= endIndex) {
Index: src/java/org/apache/lucene/index/TermsHashPerField.java
===================================================================
--- src/java/org/apache/lucene/index/TermsHashPerField.java	(revision 1021235)
+++ src/java/org/apache/lucene/index/TermsHashPerField.java	(working copy)
@@ -23,6 +23,8 @@
 
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.index.ConcurrentTermsDictPerField.ReaderContextPerField;
+import org.apache.lucene.index.FreqProxTermsWriterPerField.FreqProxPostingsArray;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.RamUsageEstimator;
 
@@ -57,6 +59,7 @@
   ParallelPostingsArray postingsArray;
   private final BytesRef utf8;
   private Comparator<BytesRef> termComp;
+  int lastGetContextTermID = 0;
 
   public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHash termsHash, final TermsHash nextTermsHash, final FieldInfo fieldInfo) {
     intPool = termsHash.intPool;
@@ -152,6 +155,28 @@
                 ints[upto+stream]);
   }
 
+  public ReaderContextPerField getReaderContext(int maxDocID, long seqID) {
+    assert numPostings >= lastGetContextTermID;
+    // set the newest values
+    FreqProxPostingsArray readerPostings = (FreqProxPostingsArray)postingsArray;
+    // add values from the start of the last term id
+    for (int x=lastGetContextTermID; x < numPostings; x++) {
+      //readerPostings.freqStarts[x] = getStart(x, 0);
+      //readerPostings.freqUptos[x] = getEnd(x, 0);
+      //readerPostings.proxStarts[x] = getStart(x, 1);
+      //readerPostings.proxUptos[x] = getEnd(x, 1);
+    }
+    ByteBlockRead byteBlocks = ByteBlockRead.copy(bytePool);
+    IntRead intRead = intPool.copyReadOnly();
+    // make a complete copy of the term freqs
+    int[] termFreqs = new int[readerPostings.termFreqs.length];
+    System.arraycopy(readerPostings.termFreqs, 0, termFreqs, 0, termFreqs.length);
+    ReaderContextPerField readerContext = new ReaderContextPerField(numPostings, maxDocID, seqID,
+      readerPostings, termFreqs, byteBlocks, intRead, fieldInfo);
+    lastGetContextTermID = numPostings;
+    return readerContext;
+  }
+
   private void compactPostings() {
     int upto = 0;
     for(int i=0;i<postingsHashSize;i++) {
@@ -368,7 +393,7 @@
       postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
 
       for(int i=0;i<streamCount;i++) {
-        final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
+        final int upto = bytePool.newSliceByLevel(0);
         intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
       }
       postingsArray.byteStarts[termID] = intUptos[intUptoStart];
@@ -496,7 +521,7 @@
       postingsArray.intStarts[termID] = intUptoStart + intPool.intOffset;
 
       for(int i=0;i<streamCount;i++) {
-        final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
+        final int upto = bytePool.newSliceByLevel(0);
         intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
       }
       postingsArray.byteStarts[termID] = intUptos[intUptoStart];
Index: src/java/org/apache/lucene/index/DocFieldProcessor.java
===================================================================
--- src/java/org/apache/lucene/index/DocFieldProcessor.java	(revision 1021235)
+++ src/java/org/apache/lucene/index/DocFieldProcessor.java	(working copy)
@@ -63,7 +63,17 @@
     consumer.setFieldInfos(fieldInfos);
     fieldsWriter = new StoredFieldsWriter(docWriter, fieldInfos);
   }
-
+  
+  public Map<String,TermsHashPerField> getTermsHashPerFields() {
+    Map<String,TermsHashPerField> map = new HashMap<String,TermsHashPerField>();
+    Collection<DocFieldConsumerPerField> fields = fields();
+    for (DocFieldConsumerPerField f : fields) {
+      DocInverterPerField dipf = (DocInverterPerField)f;
+      map.put(f.getFieldInfo().name, (TermsHashPerField)dipf.consumer);
+    }
+    return map;
+  }
+  
   @Override
   public void flush(SegmentWriteState state) throws IOException {
 
Index: src/java/org/apache/lucene/index/ByteBlockRead.java
===================================================================
--- src/java/org/apache/lucene/index/ByteBlockRead.java	(revision 0)
+++ src/java/org/apache/lucene/index/ByteBlockRead.java	(revision 0)
@@ -0,0 +1,34 @@
+package org.apache.lucene.index;
+
+import org.apache.lucene.util.BytesRef;
+
+public class ByteBlockRead {
+  public byte[][] buffers;
+  
+  public ByteBlockRead(byte[][] buffers) {
+    this.buffers = buffers;
+  }
+  
+  public static ByteBlockRead copy(ByteBlockPool bbp) {
+    int len = bbp.buffers.length;
+    byte[][] buffers = new byte[len][];
+    System.arraycopy(bbp.buffers, 0, buffers, 0, len);
+    return new ByteBlockRead(buffers);
+  }
+  
+  final BytesRef setBytesRef(BytesRef term, int textStart) {
+    final byte[] bytes = term.bytes = buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
+    int pos = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
+    if ((bytes[pos] & 0x80) == 0) {
+      // length is 1 byte
+      term.length = bytes[pos];
+      term.offset = pos+1;
+    } else {
+      // length is 2 bytes
+      term.length = (bytes[pos]&0x7f) + ((bytes[pos+1]&0xff)<<7);
+      term.offset = pos+2;
+    }
+    assert term.length >= 0;
+    return term;
+  }
+}
Index: src/java/org/apache/lucene/index/IntRead.java
===================================================================
--- src/java/org/apache/lucene/index/IntRead.java	(revision 0)
+++ src/java/org/apache/lucene/index/IntRead.java	(revision 0)
@@ -0,0 +1,32 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class IntRead {
+  public int[][] buffers;
+  
+  public IntRead(int[][] buffers) {
+    this.buffers = buffers;
+  }
+  
+  public IntRead copyReadOnly() {
+    int[][] newBuffers = new int[buffers.length][];
+    System.arraycopy(buffers, 0, newBuffers, 0, buffers.length);
+    return new IntRead(newBuffers);
+  }
+}
Index: src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
===================================================================
--- src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java	(revision 1021235)
+++ src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java	(working copy)
@@ -121,6 +121,9 @@
     assert docState.testPoint("FreqProxTermsWriterPerField.newTerm start");
     
     FreqProxPostingsArray postings = (FreqProxPostingsArray) termsHashPerField.postingsArray;
+    // add to the term freq array
+    postings.termFreqs[termID]++;
+    
     postings.lastDocIDs[termID] = docState.docID;
     if (omitTermFreqAndPositions) {
       postings.lastDocCodes[termID] = docState.docID;
@@ -139,7 +142,10 @@
     FreqProxPostingsArray postings = (FreqProxPostingsArray) termsHashPerField.postingsArray;
     
     assert omitTermFreqAndPositions || postings.docFreqs[termID] > 0;
-
+    
+    // add to the term freq array
+    postings.termFreqs[termID]++;
+    
     if (omitTermFreqAndPositions) {
       if (docState.docID != postings.lastDocIDs[termID]) {
         assert docState.docID > postings.lastDocIDs[termID];
@@ -184,13 +190,23 @@
       lastDocIDs = new int[size];
       lastDocCodes = new int[size];
       lastPositions = new int[size];
+      //freqStarts = new int[size];
+      //freqUptos = new int[size];
+      //proxStarts = new int[size];
+      //proxUptos = new int[size];
+      termFreqs = new int[size];
     }
 
     int docFreqs[];                                    // # times this term occurs in the current doc
     int lastDocIDs[];                                  // Last docID where this term occurred
     int lastDocCodes[];                                // Code for prior doc
     int lastPositions[];                               // Last position where this term occurred
-
+    //int[] freqStarts;
+    //int[] freqUptos;                                   // end of the freq index
+    //int[] proxStarts;                                 
+    //int[] proxUptos;                                   // end of the prox index
+    int[] termFreqs;
+    
     @Override
     ParallelPostingsArray newInstance(int size) {
       return new FreqProxPostingsArray(size);
@@ -206,6 +222,11 @@
       System.arraycopy(lastDocIDs, 0, to.lastDocIDs, 0, numToCopy);
       System.arraycopy(lastDocCodes, 0, to.lastDocCodes, 0, numToCopy);
       System.arraycopy(lastPositions, 0, to.lastPositions, 0, numToCopy);
+      //System.arraycopy(freqStarts, 0, to.freqStarts, 0, numToCopy);
+      //System.arraycopy(freqUptos, 0, to.freqUptos, 0, numToCopy);
+      //System.arraycopy(proxStarts, 0, to.proxStarts, 0, numToCopy);
+      //System.arraycopy(proxUptos, 0, to.proxUptos, 0, numToCopy);
+      System.arraycopy(termFreqs, 0, to.termFreqs, 0, numToCopy);
     }
 
     @Override
Index: src/java/org/apache/lucene/index/ConcurrentTermsDictPerField.java
===================================================================
--- src/java/org/apache/lucene/index/ConcurrentTermsDictPerField.java	(revision 0)
+++ src/java/org/apache/lucene/index/ConcurrentTermsDictPerField.java	(revision 0)
@@ -0,0 +1,456 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ConcurrentNavigableMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+
+import org.apache.lucene.index.DocsEnum.BulkReadResult;
+import org.apache.lucene.index.FreqProxTermsWriterPerField.FreqProxPostingsArray;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+
+public class ConcurrentTermsDictPerField {
+  private ConcurrentSkipListMap<BytesRef,TermInfo> termMap;
+  String field;
+  private Comparator<BytesRef> comparator;
+  ReaderContextPerField prevReaderContext;
+  
+  public ConcurrentTermsDictPerField(String field, Comparator<BytesRef> comparator) {
+    this.field = field;
+    this.comparator = comparator;
+    termMap = new ConcurrentSkipListMap<BytesRef,TermInfo>(comparator);
+  }
+  
+  public Terms terms(ReaderContextPerField readerContext) {
+    return new ConcurrentTerms(readerContext);
+  }
+  
+  public static class TermInfo {
+    public int termID;
+    public long seqID; // seq id at the point-in-time this term was added
+    
+    public TermInfo(int termID, long seqID) {
+      this.termID = termID;
+      this.seqID = seqID;
+    }
+  }
+  
+  private static boolean isValid(TermInfo ti, long seqID) { 
+    return ti.seqID <= seqID;
+  }
+  
+  private class ConcurrentTermsEnum extends TermsEnum {
+    ConcurrentNavigableMap<BytesRef,TermInfo> subMap;
+    Iterator<Map.Entry<BytesRef,TermInfo>> iterator;
+    int termID = -1;
+    BytesRef term;
+    ReaderContextPerField readerContext;
+    
+    public ConcurrentTermsEnum(ReaderContextPerField readerContext) {
+      this.readerContext = readerContext;
+      this.subMap = termMap;
+      iterator = subMap.entrySet().iterator();
+    }
+    
+    public Comparator<BytesRef> getComparator() {
+      return comparator;
+    }
+    
+    public long ord() {
+      return termID;
+    }
+    
+    public int docFreq() {
+      return readerContext.postingsArray.termFreqs[termID];
+    }
+    
+    /**
+     * If the term is valid for this reader context 
+     */
+    private boolean isValid(TermInfo termInfo) {
+      return ConcurrentTermsDictPerField.isValid(termInfo, readerContext.seqID);
+    }
+    
+    public DocsEnum docs(Bits skipDocs, DocsEnum reuse) throws IOException {
+      return getDocsEnum(termID, readerContext, skipDocs);
+    }
+    
+    public DocsAndPositionsEnum docsAndPositions(Bits skipDocs,
+        DocsAndPositionsEnum reuse) throws IOException {
+      return getDocsEnum(termID, readerContext, skipDocs);
+    }
+    
+    /**
+     * @param useCache is being ignored
+     */
+    public SeekStatus seek(BytesRef start, boolean useCache) throws IOException {
+      if (start != null) {
+        subMap = termMap.tailMap(start, true);
+      } else {
+        subMap = termMap;
+      }
+      iterator = subMap.entrySet().iterator();
+      TermInfo termInfo = subMap.firstEntry().getValue();
+      termID = termInfo.termID;
+      if (comparator.compare(subMap.firstKey(), start) == 0
+          && isValid(termInfo)) {
+        return SeekStatus.FOUND;
+      } else if (subMap.size() == 0) {
+        return SeekStatus.END;
+      } else {
+        return SeekStatus.NOT_FOUND;
+      }
+    }
+    
+    public SeekStatus seek(long ord) throws IOException {
+      termID = (int)ord;
+      return SeekStatus.FOUND;
+    }
+    
+    public BytesRef next() throws IOException {
+      if (iterator.hasNext()) {
+        Map.Entry<BytesRef,TermInfo> entry = iterator.next();
+        TermInfo ti = entry.getValue();
+        if (isValid(ti)) {
+          termID = ti.termID;
+          term = entry.getKey();
+          return term;
+        } else {
+          return next();
+        }
+      } else {
+        return null;
+      }
+    }
+    
+    public BytesRef term() throws IOException {
+      return term;
+    }
+  }
+  
+  public static class ReaderContextPerField {
+    final int uniqueTermCount;
+    final int maxDocID;
+    final long seqID;
+    final FreqProxPostingsArray postingsArray;
+    final int[] termFreqs;
+    final ByteBlockRead byteBlocks;
+    final IntRead intRead;
+    final FieldInfo fieldInfo;
+    
+    public ReaderContextPerField(int uniqueTermCount, int maxDocID, long seqID,
+        FreqProxPostingsArray postingsArray, int[] termFreqs, ByteBlockRead byteBlocks, 
+        IntRead intRead, FieldInfo fieldInfo) {
+      this.uniqueTermCount = uniqueTermCount;
+      this.maxDocID = maxDocID;
+      this.seqID = seqID;
+      this.postingsArray = postingsArray;
+      this.byteBlocks = byteBlocks;
+      this.intRead = intRead;
+      this.termFreqs = termFreqs;
+      this.fieldInfo = fieldInfo;
+    }
+    
+    int getStart(int termID, int stream) {
+      return postingsArray.byteStarts[termID] + stream * ByteBlockPool.FIRST_LEVEL_SIZE;
+    }
+    
+    int getEnd(int termID, int stream) {
+      int intStart = postingsArray.intStarts[termID];
+      final int[] ints = intRead.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
+      final int upto = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
+      return ints[upto + stream];
+    }
+    
+    public void initReaderFreq(ByteSliceReader reader, int termID) {
+      int start = getStart(termID, 0);
+      int end = getEnd(termID, 0);
+      reader.init(byteBlocks, start, end);
+    }
+    
+    public void initReaderProx(ByteSliceReader reader, int termID) {
+      int start = getStart(termID, 1);
+      int end = getEnd(termID, 1);
+      reader.init(byteBlocks, start, end);
+    }
+  }
+  
+  private class ConcurrentTerms extends Terms {
+    ReaderContextPerField readerContext;
+    
+    public ConcurrentTerms(ReaderContextPerField readerContext) {
+      this.readerContext = readerContext;
+    }
+    
+    public Comparator<BytesRef> getComparator() throws IOException {
+      return comparator;
+    }
+    
+    int getTermID(BytesRef term) {
+      TermInfo ti = termMap.get(term);
+      if (ti == null) {
+        return -1;
+      }
+      if (isValid(ti, readerContext.seqID)) {
+        return ti.termID;
+      } else {
+        return -1;
+      }
+    }
+    
+    @Override
+    public TermsEnum iterator() throws IOException {
+      return new ConcurrentTermsEnum(readerContext);
+    }
+    
+    @Override
+    public long getUniqueTermCount() throws IOException {
+      return readerContext.uniqueTermCount;
+    }
+    
+    @Override
+    public int docFreq(BytesRef text) throws IOException {
+      int termID = getTermID(text);
+      return readerContext.termFreqs[termID];
+    }
+
+    @Override
+    public DocsEnum docs(Bits skipDocs, BytesRef term, DocsEnum reuse)
+        throws IOException {
+      int termID = getTermID(term);
+      if (termID != -1) {
+        return getDocsEnum(termID, readerContext, skipDocs);
+      } else {
+        return null;
+      }
+    }
+
+    @Override
+    public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, BytesRef term,
+        DocsAndPositionsEnum reuse) throws IOException {
+      int termID = getTermID(term);
+      if (termID != -1) {
+        return getDocsEnum(termID, readerContext, skipDocs);
+      } else {
+        return null;
+      }
+    }
+  }
+  
+  DocsAndPositionsEnum getDocsEnum(int termID, ReaderContextPerField readerContext, Bits skipDocs) {
+    return new Postings(termID, readerContext, readerContext.fieldInfo, skipDocs);
+  }
+  
+  public void update(ReaderContextPerField newReader) {
+    int numPostings = newReader.uniqueTermCount;
+    int start = 0;
+    if (prevReaderContext != null) {
+      start = prevReaderContext.uniqueTermCount;
+    }
+    for (int x=start; x < numPostings; x++) {
+      BytesRef term = new BytesRef();
+      int textStart = newReader.postingsArray.textStarts[x];
+      newReader.byteBlocks.setBytesRef(term, textStart);
+      termMap.put(term, new TermInfo(x, newReader.seqID));
+    }
+    prevReaderContext = newReader;
+  }
+  
+  public class Postings extends DocsAndPositionsEnum {
+    int numDocs = 0;
+    int docID = 0;
+    int termFreq = 0;
+    boolean endReached = false;
+    ByteSliceReader freq;
+    ByteSliceReader prox;
+    int termID;
+    int maxDocID;
+    boolean omitTermFreqAndPositions;
+    FieldInfo fieldInfo;
+    BytesRef payload;
+    int payloadLength;
+    int position = 0;
+    int positionIndex = 0;
+    Bits bits;
+    ReaderContextPerField readerContext;
+    FreqProxPostingsArray postingsArray;
+    
+    public Postings(int termID, ReaderContextPerField readerContext, FieldInfo fieldInfo, Bits bits) {
+      this.termID = termID;
+      this.readerContext = readerContext;
+      this.maxDocID = readerContext.maxDocID;
+      this.postingsArray = readerContext.postingsArray;
+      this.fieldInfo = fieldInfo;
+      this.bits = bits;
+      BytesRef text = new BytesRef();
+      final int textStart = readerContext.postingsArray.textStarts[termID];
+      freq = new ByteSliceReader();
+      readerContext.byteBlocks.setBytesRef(text, textStart);
+      readerContext.initReaderFreq(freq, termID);
+      omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
+      if (!omitTermFreqAndPositions) {
+        prox = new ByteSliceReader();
+        readerContext.initReaderProx(prox, termID);
+      }
+    }
+    
+    public int read() throws IOException {
+      initBulkResult();
+      int count = 0;
+      final int[] docs = bulkResult.docs.ints;
+      final int[] freqs = bulkResult.freqs.ints;
+      while(count < docs.length) {
+        final int doc = nextDoc();
+        if (doc != NO_MORE_DOCS) {
+          docs[count] = doc;
+          freqs[count] = freq();
+          count++;
+        } else {
+          break;
+        }
+      }
+      return count;
+    }
+    
+    @Override
+    public BulkReadResult getBulkResult() {
+      initBulkResult();
+      return bulkResult;
+    }
+    
+    @Override
+    public BytesRef getPayload() throws IOException {
+      return payload;
+    }
+    
+    @Override
+    public boolean hasPayload() {
+      return payload != null;
+    }
+    
+    @Override
+    public int nextPosition() throws IOException {
+      if (omitTermFreqAndPositions) {
+        throw new IOException("no position information for this field: "
+            + fieldInfo.name);
+      }
+      if (positionIndex >= termFreq) {
+        throw new IOException("over the term freq of " + termFreq);
+      }
+      // omitTermFreqAndPositions == false so we do write positions &
+      // payload
+      final int code = prox.readVInt();
+      position += code >> 1;
+      
+      final int payloadLength;
+      // nocommit: not sure what this is for
+      final BytesRef thisPayload;
+      
+      if ((code & 1) != 0) {
+        // This position has a payload
+        payloadLength = prox.readVInt();
+        if (payload == null) {
+          payload = new BytesRef();
+          payload.bytes = new byte[payloadLength];
+        } else if (payload.bytes.length < payloadLength) {
+          payload.grow(payloadLength);
+        }
+        prox.readBytes(payload.bytes, 0, payloadLength);
+        payload.length = payloadLength;
+        thisPayload = payload;
+      } else {
+        payloadLength = 0;
+        thisPayload = null;
+      }
+      positionIndex++;
+      return position;
+    }
+    
+    @Override
+    public int freq() {
+      return termFreq;
+    }
+    
+    @Override
+    public int docID() {
+      return docID;
+    }
+    
+    @Override
+    public int nextDoc() throws IOException {
+      position = 0;
+      payload = null;
+      payloadLength = -1;
+      positionIndex = 0;
+      while (true) {
+        if (freq.eof()) {
+          //if (postingsArray.lastDocCodes[termID] != -1) {
+          if (!endReached) {
+            // Return last doc
+            docID = postingsArray.lastDocIDs[termID];
+            if (!omitTermFreqAndPositions) {
+              termFreq = postingsArray.docFreqs[termID];
+            }
+            //postingsArray.lastDocCodes[termID] = -1;
+            endReached = true;
+          } else {
+            // EOF
+            return DocIdSetIterator.NO_MORE_DOCS;
+          }
+        } else {
+          final int code = freq.readVInt();
+          if (omitTermFreqAndPositions) {
+            docID += code;
+          } else {
+            docID += code >>> 1;
+            if ((code & 1) != 0) {
+              termFreq = 1;
+            } else {
+              termFreq = freq.readVInt();
+            }
+          }
+          assert docID != postingsArray.lastDocIDs[termID];
+        }
+        // return no more docs early if the max doc id has been reached
+        if (docID >= maxDocID) {
+          return DocIdSetIterator.NO_MORE_DOCS;
+        }
+        if (bits == null || !bits.get(docID)) {
+          break;
+        }
+      }
+      return docID;
+    }
+    
+    @Override
+    public int advance(int target) throws IOException {
+      while (nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+        if (target >= docID) {
+          return docID;
+        }
+      }
+      return docID = NO_MORE_DOCS;
+    }
+  }
+}
Index: src/java/org/apache/lucene/index/TermVectorsTermsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/TermVectorsTermsWriter.java	(revision 1021235)
+++ src/java/org/apache/lucene/index/TermVectorsTermsWriter.java	(working copy)
@@ -45,6 +45,12 @@
     this.docWriter = docWriter;
     docState = docWriter.docState;
   }
+  
+  void flush() throws IOException {
+    tvx.flush();
+    tvd.flush();
+    tvf.flush();
+  }
 
   @Override
   void flush(Map<FieldInfo, TermsHashConsumerPerField> fieldsToFlush, final SegmentWriteState state) throws IOException {
Index: src/java/org/apache/lucene/index/RAMReaderManager.java
===================================================================
--- src/java/org/apache/lucene/index/RAMReaderManager.java	(revision 0)
+++ src/java/org/apache/lucene/index/RAMReaderManager.java	(revision 0)
@@ -0,0 +1,455 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.FieldSelector;
+import org.apache.lucene.index.ConcurrentTermsDictPerField.ReaderContextPerField;
+import org.apache.lucene.index.TermsEnum.SeekStatus;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.CloseableThreadLocal;
+
+class RAMReaderManager {
+  Map<String,ConcurrentTermsDictPerField> map = new HashMap<String,ConcurrentTermsDictPerField>();
+  DocumentsWriterPerThread dwpt;
+  Comparator<BytesRef> comparator;
+  CloseableThreadLocal<FieldsReader> fieldsReaderLocal = new FieldsReaderLocal();
+  CloseableThreadLocal<TermVectorsReader> termVectorsLocal = new TermVectorsLocal();
+  long termVectorsFlushSeqID = -1;
+  long fieldsReaderFlushSeqID = -1;
+  TermVectorsReader termVectorsReader;
+  FieldsReader fieldsReader;
+  Map<Term,AtomicInteger> deleteTerms = new HashMap<Term,AtomicInteger>();
+  Map<Query,AtomicInteger> deleteQueries = new HashMap<Query,AtomicInteger>();
+  int numDeleted = 0;
+  AtomicInteger ref = new AtomicInteger(1);
+  
+  public RAMReaderManager(DocumentsWriterPerThread dwpt) {
+    this.dwpt = dwpt;
+    comparator = BytesRef.getUTF8SortedAsUTF16Comparator();
+  }
+  
+  void ensureTermVectorsFlushed(long seqID) throws IOException {
+    if (seqID > termVectorsFlushSeqID) {
+      dwpt.flushTermVectors();
+    }
+  }
+  
+  void ensureFieldsReaderFlushed(long seqID) throws IOException {
+    if (seqID > fieldsReaderFlushSeqID) {
+      dwpt.flushStoredFields();
+    }
+  }
+  
+  void addDeleteQuery(Query query, int maxDoc) {
+    AtomicInteger doc = deleteQueries.get(query);
+    if (doc == null) {
+      doc = new AtomicInteger();
+      deleteQueries.put(query, doc);
+    }
+    doc.set(maxDoc);
+  }
+  
+  FieldsReader getFieldsReaderLocal() {
+    return fieldsReaderLocal.get();
+  }
+  
+  void addDeleteTerm(Term term, int maxDoc) {
+    AtomicInteger doc = deleteTerms.get(term);
+    if (doc == null) {
+      doc = new AtomicInteger();
+      deleteTerms.put(term, doc);
+    }
+    doc.set(maxDoc);
+  }
+  
+  void applyDeletes(RAMReader reader, long[] deletedSequenceIDs) throws IOException {
+    applyDeleteTerms(reader, deletedSequenceIDs);
+    deleteTerms.clear();
+    applyDeleteQueries(reader, deletedSequenceIDs);
+    deleteQueries.clear();
+  }
+  
+  void applyDeleteQueries(RAMReader reader, long[] deletedSequenceIDs) throws IOException {
+    long seqID = reader.seqID;
+    IndexSearcher searcher = new IndexSearcher(reader);
+    try {
+      for (Map.Entry<Query, AtomicInteger> entry : deleteQueries.entrySet()) {
+        Query query = entry.getKey();
+        int limit = entry.getValue().get();
+        Weight weight = query.weight(searcher);
+        Scorer scorer = weight.scorer(reader, true, false);
+        if (scorer != null) {
+          while(true)  {
+            int doc = scorer.nextDoc();
+            if (doc == DocsEnum.NO_MORE_DOCS || doc >= limit)
+              break;
+            deletedSequenceIDs[doc] = seqID;
+            numDeleted++;
+          }
+        }
+      }
+    } finally {
+      searcher.close();
+    }
+  }
+  
+  void applyDeleteTerms(RAMReader reader, long[] deletedSequenceIDs) throws IOException {
+    BytesRef ref = new BytesRef();
+    long seqID = reader.seqID;
+    for (Map.Entry<Term,AtomicInteger> entry : deleteTerms.entrySet()) {
+      Term term = entry.getKey();
+      int termMaxDoc = entry.getValue().intValue();
+      Terms terms = reader.terms(term.field());
+      TermsEnum termsEnum = terms.iterator();
+      ref.copy(term.text());
+      SeekStatus ss = termsEnum.seek(ref);
+      if (ss.equals(SeekStatus.FOUND)) {
+        DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), null);
+        if (docsEnum != null) {
+          while (true) {
+            final int doc = docsEnum.nextDoc();
+            if (doc == DocsEnum.NO_MORE_DOCS || doc >= termMaxDoc)
+              break;
+            deletedSequenceIDs[doc] = seqID;
+            numDeleted++;
+          }
+        }
+      }
+    }
+  }
+  
+  synchronized void decRef() throws IOException {
+    if (ref.decrementAndGet() == 0) {
+      if (termVectorsReader != null) {
+        termVectorsReader.close();
+      }
+      if (fieldsReader != null) {
+        fieldsReader.close();
+      }
+    }
+  }
+  
+  private void createTermVectorsReader() throws IOException {
+    assert termVectorsReader == null;
+    FieldInfos fieldInfos = dwpt.getFieldInfos();
+    termVectorsReader = new TermVectorsReader(dwpt.directory, dwpt.segment, fieldInfos);
+  }
+  
+  private void createFieldsReader() throws IOException {
+    assert fieldsReader == null;
+    FieldInfos fieldInfos = dwpt.getFieldInfos();
+    fieldsReader = new FieldsReader(dwpt.directory, dwpt.segment, fieldInfos);
+  }
+  
+  public RAMReader getReader() throws IOException {
+    int maxDoc = dwpt.getNumDocsInRAM();
+    Map<String,ReaderContextPerField> contextMap = dwpt.getReaderContextPerFields();
+    long maxSeqID = dwpt.getMaxSequenceID();
+    long[] deletedSequenceIDs = dwpt.deletedSequenceIDs;
+    FieldInfos fieldInfos = dwpt.getFieldInfos();
+    List<FieldInfo> fieldInfoList = new ArrayList<FieldInfo>();
+    for (int x=0; x < fieldInfos.size(); x++) {
+      fieldInfoList.add(fieldInfos.fieldInfo(x));
+    }
+    RAMReader reader = new RAMReader(maxSeqID, fieldInfoList, numDeleted, deletedSequenceIDs, maxDoc, contextMap);
+    applyDeletes(reader, deletedSequenceIDs);
+    ref.incrementAndGet();
+    return reader;
+  }
+  
+  public class RAMReader extends IndexReader {
+    private Map<String,ReaderContextPerField> contextMap;
+    long[] deleteSeqIDs;
+    private int maxDoc;
+    long seqID;
+    int numDeleted;
+    Bits deletedDocs;
+    List<FieldInfo> fieldInfos;
+    
+    public RAMReader(long seqID, List<FieldInfo> fieldInfos, int numDeleted, long[] deleteSeqIDs, int maxDoc, Map<String,ReaderContextPerField> contextMap) {
+      this.seqID = seqID;
+      this.fieldInfos = fieldInfos;
+      this.numDeleted = numDeleted;
+      this.deleteSeqIDs = deleteSeqIDs;
+      this.maxDoc = maxDoc;
+      this.contextMap = contextMap;
+      // update each terms dict to the current point-in-time
+      for (Map.Entry<String,ReaderContextPerField> entry : contextMap.entrySet()) {
+        String field = entry.getKey();
+        ReaderContextPerField context = entry.getValue();
+        ConcurrentTermsDictPerField termsDict = getTermsDict(field);
+        termsDict.update(context);
+      }
+      deletedDocs = new RAMDeletedDocs(seqID, maxDoc, deleteSeqIDs);
+    }
+    
+    public byte[] norms(String field) throws IOException {
+      return null;
+    }
+
+    public void norms(String field, byte[] bytes, int offset) throws IOException {
+    }
+    
+    public TermFreqVector[] getTermFreqVectors(int docNum) throws IOException {
+      ensureTermVectorsFlushed(seqID);
+      return termVectorsLocal.get().get(docNum);
+    }
+
+    public TermFreqVector getTermFreqVector(int docNum, String field) throws IOException {
+      ensureTermVectorsFlushed(seqID);
+      return termVectorsLocal.get().get(docNum, field);
+    }
+
+    public void getTermFreqVector(int docNum, String field, TermVectorMapper mapper) throws IOException {
+      ensureTermVectorsFlushed(seqID);
+      termVectorsLocal.get().get(docNum, field, mapper);
+    }
+
+    public void getTermFreqVector(int docNum, TermVectorMapper mapper) throws IOException {
+      ensureTermVectorsFlushed(seqID);
+      termVectorsLocal.get().get(docNum, mapper);
+    }
+    
+    public boolean isCurrent() throws CorruptIndexException, IOException {
+      return true;
+    }
+    
+    public int maxDoc() {
+      return maxDoc;
+    }
+    
+    public Document document(int doc, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
+      ensureFieldsReaderFlushed(seqID);
+      FieldsReader fieldsReader = getFieldsReaderLocal();
+      return fieldsReader.doc(doc, fieldSelector);
+    }
+    
+    public Bits getDeletedDocs() throws IOException {
+      return deletedDocs;
+    }
+    
+    public Terms terms(String field) {
+      ReaderContextPerField context = contextMap.get(field);
+      if (context == null) return null;
+      ConcurrentTermsDictPerField termsDict = getTermsDict(field);
+      return termsDict.terms(context);
+    }
+    
+    public class RAMFields extends Fields {
+      public FieldsEnum iterator() throws IOException {
+        return new RAMFieldsEnum(fieldInfos);
+      }
+
+      public Terms terms(String field) throws IOException {
+        return RAMReader.this.terms(field);
+      }
+    }
+    
+    public class RAMFieldsEnum extends FieldsEnum {
+      Iterator<FieldInfo> iterator;
+      String field = null;
+      
+      public RAMFieldsEnum(List<FieldInfo> fieldInfos) {
+        iterator = fieldInfos.iterator();
+      }
+      
+      public String next() throws IOException {
+        if (iterator.hasNext()) {
+          return field = iterator.next().name;
+        }
+        return null;
+      }
+      
+      public TermsEnum terms() throws IOException {
+        return RAMReader.this.terms(field).iterator();
+      }
+    }
+    
+    protected void doCommit(Map<String, String> commitUserData) throws IOException {
+    }
+    
+    public int numDocs() {
+      return maxDoc - numDeleted;
+    }
+    
+    public boolean isDeleted(int n) {
+      return deletedDocs.get(n);
+    }
+    
+    public Collection<String> getFieldNames(IndexReader.FieldOption fieldOption) {
+      Set<String> fieldSet = new HashSet<String>();
+      for (int i = 0; i < fieldInfos.size(); i++) {
+        FieldInfo fi = fieldInfos.get(i);
+        if (fieldOption == IndexReader.FieldOption.ALL) {
+          fieldSet.add(fi.name);
+        }
+        else if (!fi.isIndexed && fieldOption == IndexReader.FieldOption.UNINDEXED) {
+          fieldSet.add(fi.name);
+        }
+        else if (fi.omitTermFreqAndPositions && fieldOption == IndexReader.FieldOption.OMIT_TERM_FREQ_AND_POSITIONS) {
+          fieldSet.add(fi.name);
+        }
+        else if (fi.storePayloads && fieldOption == IndexReader.FieldOption.STORES_PAYLOADS) {
+          fieldSet.add(fi.name);
+        }
+        else if (fi.isIndexed && fieldOption == IndexReader.FieldOption.INDEXED) {
+          fieldSet.add(fi.name);
+        }
+        else if (fi.isIndexed && fi.storeTermVector == false && fieldOption == IndexReader.FieldOption.INDEXED_NO_TERMVECTOR) {
+          fieldSet.add(fi.name);
+        }
+        else if (fi.storeTermVector == true &&
+                 fi.storePositionWithTermVector == false &&
+                 fi.storeOffsetWithTermVector == false &&
+                 fieldOption == IndexReader.FieldOption.TERMVECTOR) {
+          fieldSet.add(fi.name);
+        }
+        else if (fi.isIndexed && fi.storeTermVector && fieldOption == IndexReader.FieldOption.INDEXED_WITH_TERMVECTOR) {
+          fieldSet.add(fi.name);
+        }
+        else if (fi.storePositionWithTermVector && fi.storeOffsetWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_POSITION) {
+          fieldSet.add(fi.name);
+        }
+        else if (fi.storeOffsetWithTermVector && fi.storePositionWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET) {
+          fieldSet.add(fi.name);
+        }
+        else if ((fi.storeOffsetWithTermVector && fi.storePositionWithTermVector) &&
+                  fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET) {
+          fieldSet.add(fi.name);
+        }
+      }
+      return fieldSet;
+    }
+    
+    protected void doDelete(int docNum) throws CorruptIndexException, IOException {
+      throw new UnsupportedOperationException("");
+    }
+    
+    protected void doSetNorm(int doc, String field, byte value) throws CorruptIndexException, IOException {
+      throw new UnsupportedOperationException("");
+    }
+    
+    protected void doUndeleteAll() throws CorruptIndexException, IOException {
+      throw new UnsupportedOperationException("");
+    }
+    
+    public boolean hasDeletions() {
+      return numDeleted > 0;
+    }
+    
+    public Fields fields() throws IOException {
+      return new RAMFields();
+    }
+    
+    public long getUniqueTermCount() throws IOException {
+      long termCount = 0;
+      for (ReaderContextPerField context : contextMap.values()) {
+        termCount += context.uniqueTermCount;
+      }
+      return termCount;
+    }
+    
+    protected void doClose() throws IOException {
+      termVectorsLocal.close();
+      fieldsReaderLocal.close();
+      RAMReaderManager.this.decRef();
+    }
+  }
+  
+  static class RAMDeletedDocs implements Bits {
+    long seqID;
+    int len;
+    long[] seqIDs;
+    
+    public RAMDeletedDocs(long seqID, int len, long[] seqIDs) {
+      this.seqID = seqID;
+      this.len = len;
+      this.seqIDs = seqIDs;
+    }
+    
+    public boolean get(int index) {
+      assert index < len;
+      assert index < seqIDs.length;
+      return seqIDs[index] <= seqID;
+    }
+    
+    public int length() {
+      return len;
+    }
+  }
+  
+  private class FieldsReaderLocal extends CloseableThreadLocal<FieldsReader> {
+    @Override
+    protected FieldsReader initialValue() {
+      if (fieldsReader == null) {
+        try {
+          createFieldsReader();
+        } catch (IOException ioe) {
+          throw new RuntimeException(ioe);
+        }
+      }
+      return (FieldsReader)fieldsReader.clone();
+    }
+  }
+  
+  private class TermVectorsLocal extends CloseableThreadLocal<TermVectorsReader> {
+    @Override
+    protected TermVectorsReader initialValue() {
+      if (termVectorsReader == null) {
+        try {
+          createTermVectorsReader();
+        } catch (IOException ioe) {
+          throw new RuntimeException(ioe);
+        }
+      }
+      try {
+        return (TermVectorsReader)termVectorsReader.clone();
+      } catch (CloneNotSupportedException ce) {
+        throw new RuntimeException(ce);
+      }
+    }
+  }
+  
+  ConcurrentTermsDictPerField getTermsDict(String field) {
+    ConcurrentTermsDictPerField ctdpf = map.get(field);
+    if (ctdpf == null) {
+      ctdpf = new ConcurrentTermsDictPerField(field, comparator);
+      map.put(field, ctdpf);
+    }
+    return ctdpf;
+  }
+}
Index: src/java/org/apache/lucene/index/DocumentsWriter.java
===================================================================
--- src/java/org/apache/lucene/index/DocumentsWriter.java	(revision 1021235)
+++ src/java/org/apache/lucene/index/DocumentsWriter.java	(working copy)
@@ -18,6 +18,7 @@
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Document;
+import org.apache.lucene.index.RAMReaderManager.RAMReader;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Scorer;
@@ -118,7 +119,19 @@
     return numDocsInRAM.get() != 0 ||
       deletesInRAM.hasDeletes();
   }
-
+  
+  void addDeleteTerm(Term term) {
+    for (DocumentsWriterPerThread dwpt : getDocumentsWriterPerThreads()) {
+      dwpt.ramBufferReaders.addDeleteTerm(term, dwpt.getNumDocsInRAM());
+    }
+  }
+  
+  void addDeleteQuery(Query query) {
+    for (DocumentsWriterPerThread dwpt : getDocumentsWriterPerThreads()) {
+      dwpt.ramBufferReaders.addDeleteQuery(query, dwpt.getNumDocsInRAM());
+    }
+  }
+  
   DocumentsWriterPerThread newDocumentsWriterPerThread() {
     DocumentsWriterPerThread perThread = new DocumentsWriterPerThread(openFilesTrackingDirectory, this, config
         .getIndexingChain());
@@ -153,32 +166,37 @@
         new DocumentsWriterThreadPool.PerThreadTask<UpdateResult>() {
           @Override
           public UpdateResult process(final DocumentsWriterPerThread perThread) throws IOException {
-            long perThreadRAMUsedBeforeAdd = perThread.numBytesUsed;
-            perThread.addDocument(doc, analyzer);
+            perThread.writeLock.lock();
+            try {
+              long perThreadRAMUsedBeforeAdd = perThread.numBytesUsed;
+              perThread.addDocument(doc, analyzer);
 
-            final long sequenceID;
-            sequenceIDLock.lock();
-            try {
-              ensureOpen();
-              sequenceID = nextSequenceID();
-              if (delTerm != null) {
-                deletesInRAM.addDeleteTerm(delTerm, sequenceID, numDocumentsWriterPerThreads);
+              final long sequenceID;
+              sequenceIDLock.lock();
+              try {
+                ensureOpen();
+                sequenceID = nextSequenceID();
+                if (delTerm != null) {
+                  deletesInRAM.addDeleteTerm(delTerm, sequenceID, numDocumentsWriterPerThreads);
+                }
+                perThread.commitDocument(sequenceID);
+                if (!minSequenceIDsPerThread.containsKey(perThread)) {
+                  minSequenceIDsPerThread.put(perThread, sequenceID);
+                }
+                numDocsInRAM.incrementAndGet();
+              } finally {
+                sequenceIDLock.unlock();
               }
-              perThread.commitDocument(sequenceID);
-              if (!minSequenceIDsPerThread.containsKey(perThread)) {
-                minSequenceIDsPerThread.put(perThread, sequenceID);
+
+              UpdateResult result = new UpdateResult(sequenceID);
+              if (finishAddDocument(perThread, perThreadRAMUsedBeforeAdd)) {
+                result.flushed = true;
+                super.clearThreadBindings();
               }
-              numDocsInRAM.incrementAndGet();
+              return result;
             } finally {
-              sequenceIDLock.unlock();
+              perThread.writeLock.unlock();
             }
-
-            UpdateResult result = new UpdateResult(sequenceID);
-            if (finishAddDocument(perThread, perThreadRAMUsedBeforeAdd)) {
-              result.flushed = true;
-              super.clearThreadBindings();
-            }
-            return result;
           }
         });
         
@@ -405,9 +423,12 @@
     threadPool.resumeAllThreads();
   }
 
-  void close() {
+  void close() throws IOException {
     sequenceIDLock.lock();
     try {
+      for (DocumentsWriterPerThread dwpt : getDocumentsWriterPerThreads()) {
+        dwpt.close();
+      }
       closed = true;
     } finally {
       sequenceIDLock.unlock();
@@ -507,7 +528,20 @@
   long getRAMUsed() {
     return ramUsed.get();
   }
-
+  
+  public RAMReader[] getRAMReaders() throws IOException {
+    DocumentsWriterPerThread[] dwpts = getDocumentsWriterPerThreads();
+    RAMReader[] readers = new RAMReader[dwpts.length];
+    for (int x=0; x < dwpts.length; x++) {
+      readers[x] = dwpts[x].getReader();
+    }
+    return readers;
+  }
+  
+  public DocumentsWriterPerThread[] getDocumentsWriterPerThreads() {
+    return threadPool.getDocumentsWriterPerThreads();
+  }
+  
   // nocommit
   // long getRAMUsed() {
   // return numBytesUsed + deletesInRAM.bytesUsed + deletesFlushed.bytesUsed;
Index: src/java/org/apache/lucene/index/IntBlockPool.java
===================================================================
--- src/java/org/apache/lucene/index/IntBlockPool.java	(revision 1021235)
+++ src/java/org/apache/lucene/index/IntBlockPool.java	(working copy)
@@ -17,10 +17,8 @@
  * limitations under the License.
  */
 
-final class IntBlockPool {
+final class IntBlockPool extends IntRead {
 
-  public int[][] buffers = new int[10][];
-
   int bufferUpto = -1;                        // Which buffer we are upto
   public int intUpto = DocumentsWriterRAMAllocator.INT_BLOCK_SIZE;             // Where we are in head buffer
 
@@ -30,9 +28,10 @@
   final private DocumentsWriterPerThread docWriter;
 
   public IntBlockPool(DocumentsWriterPerThread docWriter) {
+    super(new int[10][]);
     this.docWriter = docWriter;
   }
-
+  
   public void reset() {
     if (bufferUpto != -1) {
       if (bufferUpto > 0)
Index: src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- src/java/org/apache/lucene/index/IndexWriter.java	(revision 1021235)
+++ src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -34,6 +34,7 @@
 import org.apache.lucene.index.IndexWriter.MaxFieldLength;
 import org.apache.lucene.index.IndexWriterConfig.OpenMode;
 import org.apache.lucene.index.PayloadProcessorProvider.DirPayloadProcessor;
+import org.apache.lucene.index.RAMReaderManager.RAMReader;
 import org.apache.lucene.index.codecs.CodecProvider;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.Similarity;
@@ -427,6 +428,14 @@
     }
   }
   
+  DocumentsWriter getDocWriter() {
+    return docWriter;
+  }
+  
+  public RAMReader[] getRAMReaders() throws IOException {
+    return docWriter.getRAMReaders();
+  }
+  
   // used only by asserts
   public synchronized boolean infoIsLive(SegmentInfo info) {
     int idx = segmentInfos.indexOf(info);
Index: src/java/org/apache/lucene/util/ArrayUtil.java
===================================================================
--- src/java/org/apache/lucene/util/ArrayUtil.java	(revision 1021235)
+++ src/java/org/apache/lucene/util/ArrayUtil.java	(working copy)
@@ -289,6 +289,22 @@
     } else
       return array;
   }
+  
+  public static long[] grow(long[] array, int minSize, long defaultValue) {
+    if (array.length < minSize) {
+      long[] newArray = new long[oversize(minSize, RamUsageEstimator.NUM_BYTES_LONG)];
+      System.arraycopy(array, 0, newArray, 0, array.length);
+      for (int x=array.length; x < newArray.length; x++) {
+        newArray[x] = defaultValue;
+      }
+      return newArray;
+    } else
+      return array;
+  }
+  
+  public static long[] grow(long[] array, long defaultValue) {
+    return grow(array, 1 + array.length, defaultValue);
+  }
 
   public static long[] grow(long[] array) {
     return grow(array, 1 + array.length);
