Index: lucene/src/test/org/apache/lucene/index/TestMiniRAMReader.java
===================================================================
--- lucene/src/test/org/apache/lucene/index/TestMiniRAMReader.java	(revision 0)
+++ lucene/src/test/org/apache/lucene/index/TestMiniRAMReader.java	(revision 0)
@@ -0,0 +1,104 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.TermsEnum.SeekStatus;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MockRAMDirectory;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestMiniRAMReader extends LuceneTestCase {
+  public void test() throws Exception {
+    String field = "text";
+    
+    Directory directory = new MockRAMDirectory();
+    IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT,
+        new WhitespaceAnalyzer());// new MockAnalyzer());
+    iwc.setRAMBufferSizeMB(32.0);
+    IndexWriter writer = new IndexWriter(directory, iwc);
+    for (int i = 0; i < 5; i++) {
+      Document doc = new Document();
+      doc.add(new Field("text", "motorola hp", Field.Store.YES,
+          Field.Index.ANALYZED));
+      writer.addDocument(doc);
+      doc = new Document();
+      doc.add(new Field("text", "apple beos", Field.Store.YES,
+          Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+    MiniRAMReader[] ramReaders = writer.getRAMReaders();
+    ArrayTermsDictionary atd = ramReaders[0].getArrayTermsDictionary(field);
+    Terms ramTerms = atd.terms(ramReaders[0].maxDocID);
+    TermsEnum ramTermsEnum = ramTerms.iterator();
+    SeekStatus ss = ramTermsEnum.seek(new BytesRef("hp"), false);
+    
+    assertEquals(SeekStatus.FOUND, ss);
+    
+    String[] ramTermsArr = toArray(ramTermsEnum);
+    assertTrue(Arrays.equals(ramTermsArr, new String[] {"hp", "motorola"}));
+    // while ( (term = ramTermsEnum.next()) != null) {
+    // System.out.println("atd:"+term.utf8ToString());
+    // }
+    // System.out.println("length:" + ramReaders.length);
+    
+    ConcurrentTermsDictionaryPerField ctd = ramReaders[0]
+        .getConcurrentTermsDictionary(field);
+    String[] conTermsArr = toArray(ctd.terms(ramReaders[0].maxDocID).iterator());
+    assertTrue(Arrays.equals(conTermsArr, new String[] {"apple", "beos", "hp", "motorola"}));
+    
+    Terms cterms = ctd.terms(ramReaders[0].maxDocID);
+    TermsEnum ctermsEnum = cterms.iterator();
+    ss = ctermsEnum.seek(new BytesRef("beos"), false);
+    assertEquals(ss, SeekStatus.FOUND);
+    int docFreq = ctermsEnum.docFreq();
+    assertEquals(5, docFreq);
+    DocsEnum docsEnum = ctermsEnum.docs(null, null);
+    int[] docs = toArray(docsEnum, docFreq);
+    assertTrue(Arrays.equals(docs, new int[] {1, 3, 5, 7, 9}));
+    writer.close();
+  }
+  
+  public static int[] toArray(DocsEnum docsEnum, int docFreq) throws IOException {
+    int[] arr = new int[docFreq];
+    int x = 0;
+    while (docsEnum.nextDoc() != DocsEnum.NO_MORE_DOCS) {
+      int docID = docsEnum.docID();
+      arr[x++] = docID;
+      // System.out.println(docID);
+    }
+    return arr;
+  }
+  
+  public static String[] toArray(TermsEnum termsEnum) throws IOException {
+    BytesRef term = null;
+    List<String> list = new ArrayList<String>();
+    while ((term = termsEnum.next()) != null) {
+      list.add(term.utf8ToString());
+    }
+    return (String[]) list.toArray(new String[0]);
+  }
+}
Index: lucene/src/java/org/apache/lucene/index/ByteBlockPool.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/ByteBlockPool.java	(revision 995905)
+++ lucene/src/java/org/apache/lucene/index/ByteBlockPool.java	(working copy)
@@ -33,13 +33,15 @@
  * its length and instead allocate a new slice once they
  * hit a non-zero byte. */
 
+import static org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_REF;
+
 import java.util.Arrays;
-import org.apache.lucene.util.BytesRef;
 import java.util.List;
-import static org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_REF;
+
 import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
 
-final class ByteBlockPool {
+final class ByteBlockPool extends BytesReader {
 
   abstract static class Allocator {
     abstract void recycleByteBlocks(byte[][] blocks, int start, int end);
@@ -47,9 +49,10 @@
     abstract byte[] getByteBlock();
   }
 
-  public byte[][] buffers = new byte[10][];
-
-  int bufferUpto = -1;                        // Which buffer we are upto
+  private byte[][] buffers = new byte[10][];
+  private boolean[] copyOnWrite = new boolean[10];
+  
+  public int bufferUpto = -1;                        // Which buffer we are upto
   public int byteUpto = DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE;             // Where we are in head buffer
 
   public byte[] buffer;                              // Current head buffer
@@ -60,8 +63,51 @@
   public ByteBlockPool(Allocator allocator) {
     this.allocator = allocator;
   }
-
-  public void reset() {
+  
+  public synchronized byte[] getBuffer(int i) {
+    return buffers[i];
+  }
+  
+  public synchronized byte[][] getBuffers() {
+    return buffers;
+  }
+  
+  public BytesReadOnly getBytesReadOnly(BytesReadOnly read) {
+    if (read == null) {
+      read = new BytesReadOnly();
+    }
+    if (read.buffers == null || buffers.length != read.buffers.length) {
+      read.buffers = new byte[buffers.length][];
+    }
+    System.arraycopy(buffers, 0, read.buffers, 0, buffers.length);
+    assert buffers.length == read.buffers.length;
+    copyOnWrite = new boolean[buffers.length];
+    for (int x=0; x < buffers.length; x++) {
+      // if the buffer is the same pointer/object, when it's written to
+      // it needs to be copied first
+      if (buffers[x] != null 
+          && read.buffers[x] == buffers[x]) {
+        copyOnWrite[x] = true;
+      } else {
+        copyOnWrite[x] = false;
+      }
+    }
+    return read;
+  }
+  
+  // make a copy of the byte[] at the given index
+  // only if they are currently the same pointer.
+  // this method *must* be called prior to making 
+  // changes to byte[]s in the buffers array
+  public synchronized void makeCopy(int i) {
+    if (copyOnWrite[i]) {
+      buffers[i] = Arrays.copyOf(buffers[i], buffers[i].length);
+      copyOnWrite[i] = false;
+    }
+  }
+  
+  // nocommit: we cannot recycle buffers that IndexReaders may still be using
+  public synchronized void reset() {
     if (bufferUpto != -1) {
       // We allocated at least one buffer
 
@@ -84,12 +130,17 @@
     }
   }
 
-  public void nextBuffer() {
+  public synchronized void nextBuffer() {
     if (1+bufferUpto == buffers.length) {
       byte[][] newBuffers = new byte[ArrayUtil.oversize(buffers.length+1,
                                                         NUM_BYTES_OBJECT_REF)][];
       System.arraycopy(buffers, 0, newBuffers, 0, buffers.length);
       buffers = newBuffers;
+      
+      // enlarge the copyOnWrite array
+      boolean[] newCopyOnWrite = new boolean[newBuffers.length];
+      System.arraycopy(copyOnWrite, 0, newCopyOnWrite, 0, copyOnWrite.length);
+      copyOnWrite = newCopyOnWrite;
     }
     buffer = buffers[1+bufferUpto] = allocator.getByteBlock();
     bufferUpto++;
@@ -98,7 +149,7 @@
     byteOffset += DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE;
   }
 
-  public int newSlice(final int size) {
+  public synchronized int newSlice(final int size) {
     if (byteUpto > DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE-size)
       nextBuffer();
     final int upto = byteUpto;
@@ -116,8 +167,7 @@
   final static int[] levelSizeArray = {5, 14, 20, 30, 40, 40, 80, 80, 120, 200};
   final static int FIRST_LEVEL_SIZE = levelSizeArray[0];
 
-  public int allocSlice(final byte[] slice, final int upto) {
-
+  public synchronized int allocSlice(final byte[] slice, final int upto) {
     final int level = slice[upto] & 15;
     final int newLevel = nextLevelArray[level];
     final int newSize = levelSizeArray[newLevel];
@@ -150,7 +200,7 @@
 
   // Fill in a BytesRef from term's length & bytes encoded in
   // byte block
-  final BytesRef setBytesRef(BytesRef term, int textStart) {
+  synchronized BytesRef setBytesRef(BytesRef term, int textStart) {
     final byte[] bytes = term.bytes = buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
     int pos = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
     if ((bytes[pos] & 0x80) == 0) {
Index: lucene/src/java/org/apache/lucene/index/DocumentsWriterThreadPool.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/DocumentsWriterThreadPool.java	(revision 995905)
+++ lucene/src/java/org/apache/lucene/index/DocumentsWriterThreadPool.java	(working copy)
@@ -52,7 +52,7 @@
   }
   
   protected abstract static class ThreadState {
-    private DocumentsWriterPerThread perThread;
+    DocumentsWriterPerThread perThread;
     private boolean isIdle = true;
     
     void start() {/* extension hook */}
@@ -73,6 +73,19 @@
     this.maxNumThreadStates = (maxNumThreadStates < 1) ? IndexWriterConfig.DEFAULT_MAX_THREAD_STATES : maxNumThreadStates;
   }
   
+  DocumentsWriterPerThread[] getDocumentsWriterPerThreads() {
+    lock.lock();
+    try {
+      DocumentsWriterPerThread[] dwpts = new DocumentsWriterPerThread[allThreadStates.length];
+      for (int x=0; x < allThreadStates.length; x++) {
+        dwpts[x] = allThreadStates[x].perThread;
+      }
+      return dwpts;
+    } finally {
+      lock.unlock();
+    }
+  }
+  
   public final int getMaxThreadStates() {
     return this.maxNumThreadStates;
   }
Index: lucene/src/java/org/apache/lucene/index/BytesReadOnly.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/BytesReadOnly.java	(revision 0)
+++ lucene/src/java/org/apache/lucene/index/BytesReadOnly.java	(revision 0)
@@ -0,0 +1,52 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.util.BytesRef;
+
+public class BytesReadOnly extends BytesReader {
+  public byte[][] buffers;
+  
+  BytesReadOnly() {}
+  
+  public BytesReadOnly(byte[][] buffers) {
+    this.buffers = buffers;
+  }
+  
+  public byte[][] getBuffers() {
+    return buffers;
+  }
+  
+  // Fill in a BytesRef from term's length & bytes encoded in
+  // byte block
+  final synchronized BytesRef setBytesRef(BytesRef term, int textStart) {
+    final byte[] bytes = term.bytes = buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
+    int pos = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
+    if ((bytes[pos] & 0x80) == 0) {
+      // length is 1 byte
+      term.length = bytes[pos];
+      term.offset = pos + 1;
+    } else {
+      // length is 2 bytes
+      term.length = (bytes[pos] & 0x7f) + ((bytes[pos + 1] & 0xff) << 7);
+      term.offset = pos + 2;
+    }
+    assert term.length >= 0;
+    return term;
+  }
+}
Index: lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java	(revision 995905)
+++ lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java	(working copy)
@@ -19,14 +19,14 @@
 
 import java.io.IOException;
 import java.io.PrintStream;
+import java.util.Map;
+import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.lucene.analysis.Analyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.index.codecs.Codec;
 import org.apache.lucene.search.Similarity;
 import org.apache.lucene.store.Directory;
-import org.apache.lucene.store.FilterDirectory;
-import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.util.ArrayUtil;
 
 public class DocumentsWriterPerThread {
@@ -76,6 +76,12 @@
     }
   };
   
+  public TermsHash getTermsHash() {
+    DocInverter docInverter = (DocInverter)docFieldProcessor.consumer;
+    TermsHash termsHash = (TermsHash)docInverter.consumer;
+    return termsHash;
+  }
+  
   static class DocState {
     final DocumentsWriterPerThread docWriter;
     Analyzer analyzer;
@@ -140,6 +146,7 @@
   private int numDocsInRAM;
   private int flushedDocCount;
   SegmentWriteState flushState;
+  ReentrantLock writeLock = new ReentrantLock();
 
   long[] sequenceIDs = new long[8];
   
@@ -159,8 +166,21 @@
     if (consumer instanceof DocFieldProcessor) {
       docFieldProcessor = (DocFieldProcessor) consumer;
     }
+    
   }
   
+  void writeLock() {
+    writeLock.lock();
+  }
+  
+  void writeUnlock() {
+    writeLock.unlock();
+  }
+  
+  public Map<String,TermsHashPerField> getTermsHashPerFields() {
+    return docFieldProcessor.getTermsHashPerFields();
+  }
+  
   void setAborting() {
     aborting = true;
   }
@@ -202,7 +222,7 @@
 
   }
 
-  public void commitDocument(long sequenceID) {
+  public synchronized void commitDocument(long sequenceID) {
     if (numDocsInRAM == sequenceIDs.length) {
       sequenceIDs = ArrayUtil.grow(sequenceIDs);
     }
@@ -240,7 +260,7 @@
   }
     
   /** Flush all pending docs to a new segment */
-  SegmentInfo flush() throws IOException {
+  synchronized SegmentInfo flush() throws IOException {
     assert numDocsInRAM > 0;
 
     flushState = new SegmentWriteState(infoStream, directory, segment, docFieldProcessor.fieldInfos,
Index: lucene/src/java/org/apache/lucene/index/ByteSliceReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/ByteSliceReader.java	(revision 995905)
+++ lucene/src/java/org/apache/lucene/index/ByteSliceReader.java	(working copy)
@@ -28,7 +28,7 @@
  * point we read the forwarding address of the next slice
  * and then jump to it.*/
 final class ByteSliceReader extends DataInput {
-  ByteBlockPool pool;
+  BytesReader pool;
   int bufferUpto;
   byte[] buffer;
   public int upto;
@@ -37,9 +37,10 @@
   public int bufferOffset;
 
   public int endIndex;
+  
+  byte[][] buffers;
 
-  public void init(ByteBlockPool pool, int startIndex, int endIndex) {
-
+  public void init(BytesReader pool, int startIndex, int endIndex) {
     assert endIndex-startIndex >= 0;
     assert startIndex >= 0;
     assert endIndex >= 0;
@@ -50,7 +51,8 @@
     level = 0;
     bufferUpto = startIndex / DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE;
     bufferOffset = bufferUpto * DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE;
-    buffer = pool.buffers[bufferUpto];
+    buffers = pool.getBuffers();
+    buffer = buffers[bufferUpto];
     upto = startIndex & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
 
     final int firstSize = ByteBlockPool.levelSizeArray[0];
@@ -98,14 +100,13 @@
 
     // Skip to our next slice
     final int nextIndex = ((buffer[limit]&0xff)<<24) + ((buffer[1+limit]&0xff)<<16) + ((buffer[2+limit]&0xff)<<8) + (buffer[3+limit]&0xff);
-
+    
     level = ByteBlockPool.nextLevelArray[level];
     final int newSize = ByteBlockPool.levelSizeArray[level];
 
     bufferUpto = nextIndex / DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE;
     bufferOffset = bufferUpto * DocumentsWriterRAMAllocator.BYTE_BLOCK_SIZE;
-
-    buffer = pool.buffers[bufferUpto];
+    buffer = buffers[bufferUpto];
     upto = nextIndex & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
 
     if (nextIndex + newSize >= endIndex) {
Index: lucene/src/java/org/apache/lucene/index/BytesReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/BytesReader.java	(revision 0)
+++ lucene/src/java/org/apache/lucene/index/BytesReader.java	(revision 0)
@@ -0,0 +1,9 @@
+package org.apache.lucene.index;
+
+import org.apache.lucene.util.BytesRef;
+
+public abstract class BytesReader {
+  public abstract byte[][] getBuffers();
+  
+  abstract BytesRef setBytesRef(BytesRef term, int textStart);
+}
Index: lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java	(revision 995905)
+++ lucene/src/java/org/apache/lucene/index/DocFieldProcessor.java	(working copy)
@@ -63,7 +63,17 @@
     consumer.setFieldInfos(fieldInfos);
     fieldsWriter = new StoredFieldsWriter(docWriter, fieldInfos);
   }
-
+  
+  public Map<String,TermsHashPerField> getTermsHashPerFields() {
+    Map<String,TermsHashPerField> map = new HashMap<String,TermsHashPerField>();
+    Collection<DocFieldConsumerPerField> fields = fields();
+    for (DocFieldConsumerPerField f : fields) {
+      DocInverterPerField dipf = (DocInverterPerField)f;
+      map.put(f.getFieldInfo().name, (TermsHashPerField)dipf.consumer);
+    }
+    return map;
+  }
+  
   @Override
   public void flush(SegmentWriteState state) throws IOException {
 
Index: lucene/src/java/org/apache/lucene/index/TermsHashPerField.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/TermsHashPerField.java	(revision 995905)
+++ lucene/src/java/org/apache/lucene/index/TermsHashPerField.java	(working copy)
@@ -20,9 +20,11 @@
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Comparator;
+import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
 import org.apache.lucene.document.Fieldable;
+import org.apache.lucene.index.FreqProxTermsWriterPerField.FreqProxPostingsArray;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.RamUsageEstimator;
 
@@ -55,8 +57,8 @@
   private int[] postingsHash;
  
   ParallelPostingsArray postingsArray;
-  private final BytesRef utf8;
-  private Comparator<BytesRef> termComp;
+  final BytesRef utf8;
+  Comparator<BytesRef> termComp;
 
   public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHash termsHash, final TermsHash nextTermsHash, final FieldInfo fieldInfo) {
     intPool = termsHash.intPool;
@@ -82,7 +84,29 @@
     else
       nextPerField = null;
   }
-
+  
+  // during the flush, no document additions are occurring
+  // there is no need to synchronize
+  public void flush(RAMBufferPerField buf) {
+    if (postingsArray != null) {
+      // if the postings array has changed in size
+      // we need the latest pointer to the latest postingsArray
+      if (buf.postingsArray == null 
+          || buf.postingsArray.size != postingsArray.size) {
+        assert postingsArray instanceof FreqProxPostingsArray;
+        buf.postingsArray = (FreqProxPostingsArray)postingsArray;
+      } else if (buf.postingsArray.size == postingsArray.size) {
+        // if the postingsArray is the same size as last time
+        // then it should be the same object, eg, pointer
+        assert buf.postingsArray == postingsArray;
+      }
+    }
+    buf.bytePool = bytePool.getBytesReadOnly(buf.bytePool);
+    buf.termBytePool = termBytePool.getBytesReadOnly(buf.termBytePool);
+    buf.intPool = intPool.getIntsReadOnly(buf.intPool);
+    buf.numTerms = numPostings;
+  }
+  
   private void initPostingsArray() {
     postingsArray = consumer.createPostingsArray(2);
     bytesUsed(postingsArray.size * postingsArray.bytesPerPosting());
@@ -145,7 +169,8 @@
   public void initReader(ByteSliceReader reader, int termID, int stream) {
     assert stream < streamCount;
     int intStart = postingsArray.intStarts[termID];
-    final int[] ints = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
+    //final int[] ints = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
+    int[] ints = intPool.getBuffer(intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT);
     final int upto = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
     reader.init(bytePool,
                 postingsArray.byteStarts[termID]+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
@@ -172,15 +197,30 @@
   public int[] sortPostings(Comparator<BytesRef> termComp) {
     this.termComp = termComp;
     compactPostings();
-    quickSort(postingsHash, 0, numPostings-1);
+    quickSort(postingsHash, 0, numPostings-1, new TermComparator());
     return postingsHash;
   }
-
-  void quickSort(int[] termIDs, int lo, int hi) {
+  
+  public class TermComparator implements Comparator<Integer> {
+    BytesRef tr1 = new BytesRef();
+    BytesRef tr2 = new BytesRef();
+    
+    public int compare(Integer termID1, Integer termID2) {
+      if (termID1 == termID2) {
+        return 0;
+      }
+      termBytePool.setBytesRef(tr1, postingsArray.textStarts[termID1]);
+      termBytePool.setBytesRef(tr2, postingsArray.textStarts[termID2]);
+      
+      return termComp.compare(tr1, tr2);
+    }
+  }
+  
+  static void quickSort(int[] termIDs, int lo, int hi, Comparator<Integer> comparator) {
     if (lo >= hi)
       return;
     else if (hi == 1+lo) {
-      if (comparePostings(termIDs[lo], termIDs[hi]) > 0) {
+      if (comparator.compare(termIDs[lo], termIDs[hi]) > 0) {
         final int tmp = termIDs[lo];
         termIDs[lo] = termIDs[hi];
         termIDs[hi] = tmp;
@@ -190,18 +230,18 @@
 
     int mid = (lo + hi) >>> 1;
 
-    if (comparePostings(termIDs[lo], termIDs[mid]) > 0) {
+    if (comparator.compare(termIDs[lo], termIDs[mid]) > 0) {
       int tmp = termIDs[lo];
       termIDs[lo] = termIDs[mid];
       termIDs[mid] = tmp;
     }
 
-    if (comparePostings(termIDs[mid], termIDs[hi]) > 0) {
+    if (comparator.compare(termIDs[mid], termIDs[hi]) > 0) {
       int tmp = termIDs[mid];
       termIDs[mid] = termIDs[hi];
       termIDs[hi] = tmp;
 
-      if (comparePostings(termIDs[lo], termIDs[mid]) > 0) {
+      if (comparator.compare(termIDs[lo], termIDs[mid]) > 0) {
         int tmp2 = termIDs[lo];
         termIDs[lo] = termIDs[mid];
         termIDs[mid] = tmp2;
@@ -217,10 +257,10 @@
     int partition = termIDs[mid];
 
     for (; ;) {
-      while (comparePostings(termIDs[right], partition) > 0)
+      while (comparator.compare(termIDs[right], partition) > 0)
         --right;
 
-      while (left < right && comparePostings(termIDs[left], partition) <= 0)
+      while (left < right && comparator.compare(termIDs[left], partition) <= 0)
         ++left;
 
       if (left < right) {
@@ -233,10 +273,10 @@
       }
     }
 
-    quickSort(termIDs, lo, left);
-    quickSort(termIDs, left + 1, hi);
+    quickSort(termIDs, lo, left, comparator);
+    quickSort(termIDs, left + 1, hi, comparator);
   }
-
+  
   /** Compares term text for two Posting instance and
    *  returns -1 if p1 < p2; 1 if p1 > p2; else 0. */
   int comparePostings(int term1, int term2) {
@@ -254,9 +294,10 @@
 
   /** Test whether the text for current RawPostingList p equals
    *  current tokenText in utf8. */
-  private boolean postingEquals(final int termID) {
+  private boolean postingEquals(final int termID, byte[][] buffers) {
     final int textStart = postingsArray.textStarts[termID];
-    final byte[] text = termBytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
+    //final byte[] text = termBytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
+    final byte[] text = buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
     assert text != null;
 
     int pos = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
@@ -377,7 +418,8 @@
 
     } else {
       int intStart = postingsArray.intStarts[termID];
-      intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
+      //intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
+      intUptos = intPool.getBuffer(intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT);
       intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
       consumer.addTerm(termID);
     }
@@ -399,8 +441,10 @@
 
     // Locate RawPostingList in hash
     int termID = postingsHash[hashPos];
-
-    if (termID != -1 && !postingEquals(termID)) {
+    
+    byte[][] buffers = termBytePool.getBuffers();
+    
+    if (termID != -1 && !postingEquals(termID, buffers)) {
       // Conflict: keep searching different locations in
       // the hash table.
       final int inc = ((code>>8)+code)|1;
@@ -408,7 +452,7 @@
         code += inc;
         hashPos = code & postingsHashMask;
         termID = postingsHash[hashPos];
-      } while (termID != -1 && !postingEquals(termID));
+      } while (termID != -1 && !postingEquals(termID, buffers));
     }
 
     if (termID == -1) {
@@ -505,7 +549,8 @@
 
     } else {
       final int intStart = postingsArray.intStarts[termID];
-      intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
+      intUptos = intPool.getBuffer(intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT);
+      //intUptos = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
       intUptoStart = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
       consumer.addTerm(termID);
     }
@@ -519,7 +564,8 @@
 
   void writeByte(int stream, byte b) {
     int upto = intUptos[intUptoStart+stream];
-    byte[] bytes = bytePool.buffers[upto >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
+    byte[] bytes = bytePool.getBuffer(upto >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT);
+    //byte[] bytes = bytePool.buffers[upto >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
     assert bytes != null;
     int offset = upto & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
     if (bytes[offset] != 0) {
@@ -560,7 +606,9 @@
   void rehashPostings(final int newSize) {
 
     final int newMask = newSize-1;
-
+    
+    byte[][] buffers = bytePool.getBuffers();
+    
     int[] newHash = new int[newSize];
     Arrays.fill(newHash, -1);
     for(int i=0;i<postingsHashSize;i++) {
@@ -570,7 +618,8 @@
         if (termsHash.primary) {
           final int textStart = postingsArray.textStarts[termID];
           final int start = textStart & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
-          final byte[] text = bytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
+          //final byte[] text = bytePool.buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
+          final byte[] text = buffers[textStart >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
           code = 0;
 
           final int len;
Index: lucene/src/java/org/apache/lucene/index/MiniRAMReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/MiniRAMReader.java	(revision 0)
+++ lucene/src/java/org/apache/lucene/index/MiniRAMReader.java	(revision 0)
@@ -0,0 +1,96 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentNavigableMap;
+
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.ReaderUtil;
+
+public class MiniRAMReader {
+  final Map<String,RAMBufferPerField> ramBuffers;
+  final int maxDocID;
+  
+  public MiniRAMReader(int maxDocID, 
+      Map<String,RAMBufferPerField> ramBuffers) {
+    this.maxDocID = maxDocID;
+    this.ramBuffers = ramBuffers;
+  }
+  
+  public ConcurrentTermsDictionaryPerField getConcurrentTermsDictionary(String field) {
+    RAMBufferPerField ramBuffer = ramBuffers.get(field);
+    return ramBuffer.getConcurrentTermsDictionary();
+  }
+  
+  public ArrayTermsDictionary getArrayTermsDictionary(String field) {
+    RAMBufferPerField ramBuffer = ramBuffers.get(field);
+    return ramBuffer.getArrayTermsDictionary();
+  }
+  
+  public Terms terms(String field) throws IOException {
+    final List<Terms> terms = new ArrayList<Terms>();
+    final List<ReaderUtil.Slice> slices = new ArrayList<ReaderUtil.Slice>();
+    ConcurrentTermsDictionaryPerField ctdpf = getConcurrentTermsDictionary(field);
+    if (ctdpf != null) {
+      terms.add(ctdpf.terms(maxDocID));
+      slices.add(new ReaderUtil.Slice(0, maxDocID, 0));
+    }
+    ArrayTermsDictionary atd = getArrayTermsDictionary(field);
+    if (atd != null) {
+      terms.add(atd.terms(maxDocID));
+      slices.add(new ReaderUtil.Slice(0, maxDocID, 0));
+    }
+    Terms result = null;
+    if (terms.size() == 1) {
+      result = terms.get(0);
+    } else {
+      result = new MultiTerms(terms.toArray(Terms.EMPTY_ARRAY),
+          slices.toArray(ReaderUtil.Slice.EMPTY_ARRAY));
+    }
+    return result;
+  }
+  
+  public DocsEnum termDocsEnum(Bits skipDocs, String field, BytesRef term) throws IOException {
+    return null;
+  }
+  
+  public DocsAndPositionsEnum termPositionsEnum(Bits skipDocs, String field, BytesRef term) throws IOException {
+    return null;
+  }
+  /**
+  public Iterator<Map.Entry<BytesRef,Integer>> getTerms(String field, BytesRef startingTerm) throws IOException {
+    ConcurrentTermsDictionaryPerField ctdp = conMap.get(field);
+    final ConcurrentNavigableMap<BytesRef,Integer> headMap;
+    if (startingTerm != null) {
+      headMap = ctdp.termMap.headMap(startingTerm, true);
+    } else {
+      headMap = ctdp.termMap;
+    }
+    return headMap.entrySet().iterator();
+  }
+  **/
+}
+
Index: lucene/src/java/org/apache/lucene/index/ByteSliceWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/ByteSliceWriter.java	(revision 995905)
+++ lucene/src/java/org/apache/lucene/index/ByteSliceWriter.java	(working copy)
@@ -31,18 +31,21 @@
   private byte[] slice;
   private int upto;
   private final ByteBlockPool pool;
+  byte[][] buffers;
 
   int offset0;
 
   public ByteSliceWriter(ByteBlockPool pool) {
     this.pool = pool;
+    buffers = pool.getBuffers();
   }
 
   /**
    * Set up the writer to write at address.
    */
   public void init(int address) {
-    slice = pool.buffers[address >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
+    //slice = pool.buffers[address >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
+    slice = buffers[address >> DocumentsWriterRAMAllocator.BYTE_BLOCK_SHIFT];
     assert slice != null;
     upto = address & DocumentsWriterRAMAllocator.BYTE_BLOCK_MASK;
     offset0 = address;
Index: lucene/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java	(revision 995905)
+++ lucene/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java	(working copy)
@@ -31,7 +31,7 @@
 // codecs; make separate container (tii/tis/skip/*) that can
 // be configured as any number of files 1..N
 final class FreqProxTermsWriterPerField extends TermsHashConsumerPerField implements Comparable<FreqProxTermsWriterPerField> {
-
+  static boolean FLUSH_READONLY_BUFFER = true;
   final FreqProxTermsWriter parent;
   final TermsHashPerField termsHashPerField;
   final FieldInfo fieldInfo;
@@ -129,6 +129,8 @@
       postings.docFreqs[termID] = 1;
       writeProx(termID, fieldState.position);
     }
+    // add to the term freq array
+    postings.termFreqs[termID]++;
   }
 
   @Override
@@ -138,8 +140,11 @@
     
     FreqProxPostingsArray postings = (FreqProxPostingsArray) termsHashPerField.postingsArray;
     
+    // add to the term freq array
+    postings.termFreqs[termID]++;
+    
     assert omitTermFreqAndPositions || postings.docFreqs[termID] > 0;
-
+    
     if (omitTermFreqAndPositions) {
       if (docState.docID != postings.lastDocIDs[termID]) {
         assert docState.docID > postings.lastDocIDs[termID];
@@ -184,13 +189,15 @@
       lastDocIDs = new int[size];
       lastDocCodes = new int[size];
       lastPositions = new int[size];
+      termFreqs = new int[size];
     }
 
     int docFreqs[];                                    // # times this term occurs in the current doc
     int lastDocIDs[];                                  // Last docID where this term occurred
     int lastDocCodes[];                                // Code for prior doc
     int lastPositions[];                               // Last position where this term occurred
-
+    int[] termFreqs;                                   // Term frequencies
+    
     @Override
     ParallelPostingsArray newInstance(int size) {
       return new FreqProxPostingsArray(size);
@@ -206,6 +213,7 @@
       System.arraycopy(lastDocIDs, 0, to.lastDocIDs, 0, numToCopy);
       System.arraycopy(lastDocCodes, 0, to.lastDocCodes, 0, numToCopy);
       System.arraycopy(lastPositions, 0, to.lastPositions, 0, numToCopy);
+      System.arraycopy(termFreqs, 0, to.termFreqs, 0, numToCopy);
     }
 
     @Override
@@ -223,7 +231,18 @@
    * into a single RAM segment. */
   void flush(FieldsConsumer consumer,  final SegmentWriteState state)
     throws CorruptIndexException, IOException {
-
+    
+    final FreqProxPostingsArray postings;
+    RAMBufferPerField ramBuffer = null;
+    
+    if (FLUSH_READONLY_BUFFER) {
+      ramBuffer = new RAMBufferPerField(termsHashPerField);
+      termsHashPerField.flush(ramBuffer);
+      postings = (FreqProxPostingsArray) ramBuffer.postingsArray;
+    } else {
+      postings = (FreqProxPostingsArray) termsHashPerField.postingsArray;
+    }
+    
     final TermsConsumer termsConsumer = consumer.addField(fieldInfo);
     final Comparator<BytesRef> termComp = termsConsumer.getComparator();
 
@@ -231,23 +250,30 @@
     
     final int[] termIDs = termsHashPerField.sortPostings(termComp);
     final int numTerms = termsHashPerField.numPostings;
-    final BytesRef text = new BytesRef();
-    final FreqProxPostingsArray postings = (FreqProxPostingsArray) termsHashPerField.postingsArray;
+    final BytesRef text = new BytesRef();    
+    
     final ByteSliceReader freq = new ByteSliceReader();
     final ByteSliceReader prox = new ByteSliceReader();
 
-    
     for (int i = 0; i < numTerms; i++) {
       final int termID = termIDs[i];
       // Get BytesRef
       final int textStart = postings.textStarts[termID];
-      termsHashPerField.bytePool.setBytesRef(text, textStart);
       
-      termsHashPerField.initReader(freq, termID, 0);
-      if (!fieldInfo.omitTermFreqAndPositions) {
-        termsHashPerField.initReader(prox, termID, 1);
+      if (FLUSH_READONLY_BUFFER) {
+        ramBuffer.bytePool.setBytesRef(text, textStart);
+        ramBuffer.initReader(freq, termID, 0);
+        if (!fieldInfo.omitTermFreqAndPositions) {
+          ramBuffer.initReader(prox, termID, 1);
+        }
+      } else {
+        termsHashPerField.bytePool.setBytesRef(text, textStart);
+      
+        termsHashPerField.initReader(freq, termID, 0);
+        if (!fieldInfo.omitTermFreqAndPositions) {
+          termsHashPerField.initReader(prox, termID, 1);
+        }
       }
-  
       // TODO: really TermsHashPerField should take over most
       // of this loop, including merge sort of terms from
       // multiple threads and interacting with the
@@ -342,6 +368,5 @@
   
     termsConsumer.finish();
   }
-
 }
 
Index: lucene/src/java/org/apache/lucene/index/RAMTermsPolicy.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/RAMTermsPolicy.java	(revision 0)
+++ lucene/src/java/org/apache/lucene/index/RAMTermsPolicy.java	(revision 0)
@@ -0,0 +1,6 @@
+package org.apache.lucene.index;
+
+public class RAMTermsPolicy {
+  public int concurrentTermsThresholdPerField = 5000;
+  
+}
Index: lucene/src/java/org/apache/lucene/index/LogMergePolicy.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/LogMergePolicy.java	(revision 995905)
+++ lucene/src/java/org/apache/lucene/index/LogMergePolicy.java	(working copy)
@@ -30,7 +30,7 @@
  *  {@link #setMergeFactor(int)} respectively.</p>
  *
  * <p>This class is abstract and requires a subclass to
- * define the {@link #size} method which specifies how a
+ * define the {@link #numTerms} method which specifies how a
  * segment's size is determined.  {@link LogDocMergePolicy}
  * is one subclass that measures size by document count in
  * the segment.  {@link LogByteSizeMergePolicy} is another
Index: lucene/src/java/org/apache/lucene/index/RAMBufferPerField.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/RAMBufferPerField.java	(revision 0)
+++ lucene/src/java/org/apache/lucene/index/RAMBufferPerField.java	(revision 0)
@@ -0,0 +1,265 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.lucene.index.FreqProxTermsWriterPerField.FreqProxPostingsArray;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+
+public class RAMBufferPerField {
+  FreqProxPostingsArray postingsArray;
+  IntsReadOnly intPool;
+  BytesReadOnly bytePool;
+  BytesReadOnly termBytePool;
+  BytesRef utf8;
+  final BytesRef tr1 = new BytesRef();
+  final BytesRef tr2 = new BytesRef();
+  Comparator<BytesRef> termComp;
+  int numTerms;
+  FieldInfo fieldInfo;
+  ConcurrentTermsDictionaryPerField concurrentTerms;
+  ArrayTermsDictionary arrayTerms;
+  int[] termFreqs;
+  
+  public RAMBufferPerField(TermsHashPerField termsHashField) {
+    utf8 = termsHashField.utf8;
+    termComp = BytesRef.getUTF8SortedAsUnicodeComparator();
+    fieldInfo = termsHashField.fieldInfo;
+  }
+  
+  public RAMBufferPerField(BytesRef utf8, Comparator<BytesRef> termComp) {
+    this.utf8 = utf8;
+    this.termComp = termComp;
+    assert termComp != null;
+  }
+  
+  public ArrayTermsDictionary getArrayTermsDictionary() {
+    if (arrayTerms == null) {
+      arrayTerms = new ArrayTermsDictionary(this);
+    }
+    return arrayTerms;
+  }
+  
+  public ConcurrentTermsDictionaryPerField getConcurrentTermsDictionary() {
+    if (concurrentTerms == null) {
+      concurrentTerms = new ConcurrentTermsDictionaryPerField(this);
+    } else {
+      concurrentTerms.update(this);
+    }
+    return concurrentTerms;
+  }
+  
+  public int[] getSortedTermIDs() {
+    int[] termids = new int[numTerms];
+    for (int x = 0; x < numTerms; x++) {
+      termids[x] = x;
+    }
+    TermsHashPerField.quickSort(termids, 0, numTerms - 1, new TermComparator());
+    return termids;
+  }
+  
+  public DocsAndPositionsEnum getDocsEnum(int termID, int maxDocID, Bits bits)
+      throws IOException {
+    return new Postings(termID, maxDocID, fieldInfo, bits);
+  }
+  
+  public class Postings extends DocsAndPositionsEnum {
+    int numDocs = 0;
+    int docID = 0;
+    int termFreq = 0;
+    ByteSliceReader freq;
+    ByteSliceReader prox;
+    int termID;
+    int maxDocID;
+    boolean omitTermFreqAndPositions;
+    FieldInfo fieldInfo;
+    BytesRef payload;
+    int payloadLength;
+    int position = 0;
+    int positionIndex = 0;
+    Bits bits;
+    
+    public Postings(int termID, int maxDocID, FieldInfo fieldInfo, Bits bits) {
+      this.termID = termID;
+      this.maxDocID = maxDocID;
+      this.fieldInfo = fieldInfo;
+      this.bits = bits;
+      BytesRef text = new BytesRef();
+      final int textStart = postingsArray.textStarts[termID];
+      freq = new ByteSliceReader();
+      bytePool.setBytesRef(text, textStart);
+      initReader(freq, termID, 0);
+      this.omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
+      if (!omitTermFreqAndPositions) {
+        prox = new ByteSliceReader();
+        initReader(prox, termID, 1);
+      }
+    }
+    
+    @Override
+    public BytesRef getPayload() throws IOException {
+      return payload;
+    }
+    
+    @Override
+    public boolean hasPayload() {
+      return payload != null;
+    }
+    
+    @Override
+    public int nextPosition() throws IOException {
+      if (omitTermFreqAndPositions) {
+        throw new IOException("no position information for this field: "
+            + fieldInfo.name);
+      }
+      if (positionIndex >= termFreq) {
+        throw new IOException("over the term freq of " + termFreq);
+      }
+      // omitTermFreqAndPositions == false so we do write positions &
+      // payload
+      final int code = prox.readVInt();
+      position += code >> 1;
+      
+      final int payloadLength;
+      // nocommit: not sure what this is for
+      final BytesRef thisPayload;
+      
+      if ((code & 1) != 0) {
+        // This position has a payload
+        payloadLength = prox.readVInt();
+        if (payload == null) {
+          payload = new BytesRef();
+          payload.bytes = new byte[payloadLength];
+        } else if (payload.bytes.length < payloadLength) {
+          payload.grow(payloadLength);
+        }
+        prox.readBytes(payload.bytes, 0, payloadLength);
+        payload.length = payloadLength;
+        thisPayload = payload;
+      } else {
+        payloadLength = 0;
+        thisPayload = null;
+      }
+      positionIndex++;
+      return position;
+    }
+    
+    @Override
+    public int freq() {
+      return termFreq;
+    }
+    
+    @Override
+    public int docID() {
+      return docID;
+    }
+    
+    @Override
+    public int nextDoc() throws IOException {
+      position = 0;
+      payload = null;
+      payloadLength = -1;
+      positionIndex = 0;
+      while (true) {
+        if (freq.eof()) {
+          if (postingsArray.lastDocCodes[termID] != -1) {
+            // Return last doc
+            docID = postingsArray.lastDocIDs[termID];
+            if (!omitTermFreqAndPositions) {
+              termFreq = postingsArray.docFreqs[termID];
+            }
+            postingsArray.lastDocCodes[termID] = -1;
+          } else {
+            // EOF
+            return DocIdSetIterator.NO_MORE_DOCS;
+          }
+        } else {
+          final int code = freq.readVInt();
+          if (omitTermFreqAndPositions) {
+            docID += code;
+          } else {
+            docID += code >>> 1;
+            if ((code & 1) != 0) {
+              termFreq = 1;
+            } else {
+              termFreq = freq.readVInt();
+            }
+          }
+          assert docID != postingsArray.lastDocIDs[termID];
+        }
+        // return no more docs early if the max doc id has been reached
+        if (docID > maxDocID) {
+          return DocIdSetIterator.NO_MORE_DOCS;
+        }
+        if (bits == null || !bits.get(docID)) {
+          break;
+        }
+      }
+      return docID;
+    }
+    
+    @Override
+    public int advance(int target) throws IOException {
+      while (nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+        if (target >= docID) {
+          return docID;
+        }
+      }
+      return docID = NO_MORE_DOCS;
+    }
+  }
+  
+  public void initReader(ByteSliceReader reader, int termID, int stream) {
+    // assert stream < streamCount;
+    int intStart = postingsArray.intStarts[termID];
+    final int[] ints = intPool.buffers[intStart >> DocumentsWriterRAMAllocator.INT_BLOCK_SHIFT];
+    final int upto = intStart & DocumentsWriterRAMAllocator.INT_BLOCK_MASK;
+    reader.init(bytePool, postingsArray.byteStarts[termID] + stream
+        * ByteBlockPool.FIRST_LEVEL_SIZE, ints[upto + stream]);
+  }
+  
+  public class TermComparator implements Comparator<Integer> {
+    BytesRef tr1 = new BytesRef();
+    BytesRef tr2 = new BytesRef();
+    
+    public int compare(Integer termID1, Integer termID2) {
+      if (termID1 == termID2) {
+        return 0;
+      }
+      termBytePool.setBytesRef(tr1, postingsArray.textStarts[termID1]);
+      termBytePool.setBytesRef(tr2, postingsArray.textStarts[termID2]);
+      
+      return termComp.compare(tr1, tr2);
+    }
+  }
+  
+  TermComparator getTermComparator() {
+    return new TermComparator();
+  }
+  
+  public BytesRef setBytesRef(int termID, BytesRef term) {
+    termBytePool.setBytesRef(term, postingsArray.textStarts[termID]);
+    return term;
+  }
+}
Index: lucene/src/java/org/apache/lucene/index/DocumentsWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/DocumentsWriter.java	(revision 995905)
+++ lucene/src/java/org/apache/lucene/index/DocumentsWriter.java	(working copy)
@@ -30,98 +30,101 @@
 
 /**
  * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
  * 
  * http://www.apache.org/licenses/LICENSE-2.0
  * 
  * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
  */
 
 final class DocumentsWriter {
   private long sequenceID;
   private int numDocumentsWriterPerThreads;
-
+  
   private final BufferedDeletesInRAM deletesInRAM = new BufferedDeletesInRAM();
   private final DocumentsWriterThreadPool threadPool;
   private final Lock sequenceIDLock = new ReentrantLock();
-
+  
   private final Directory openFilesTrackingDirectory;
   final IndexWriter indexWriter;
   final IndexWriterConfig config;
-
+  
   private int maxBufferedDocs;
   private double maxBufferSizeMB;
   private int maxBufferedDeleteTerms;
-
+  
   private boolean closed;
   private AtomicInteger numDocsInRAM = new AtomicInteger(0);
   private AtomicLong ramUsed = new AtomicLong(0);
-
+  
   private long flushedSequenceID = -1;
   private final PrintStream infoStream;
-
-  private Map<DocumentsWriterPerThread, Long> minSequenceIDsPerThread = new HashMap<DocumentsWriterPerThread, Long>();
-
-  public DocumentsWriter(Directory directory, IndexWriter indexWriter, IndexWriterConfig config) {
+  
+  private Map<DocumentsWriterPerThread,Long> minSequenceIDsPerThread = new HashMap<DocumentsWriterPerThread,Long>();
+  
+  Map<String,RAMBufferPerField> ramBufferMap = new HashMap<String,RAMBufferPerField>();
+  
+  public DocumentsWriter(Directory directory, IndexWriter indexWriter,
+      IndexWriterConfig config) {
     this.openFilesTrackingDirectory = new FilterDirectory(directory) {
-      @Override public IndexOutput createOutput(final String name) throws IOException {
+      @Override
+      public IndexOutput createOutput(final String name) throws IOException {
         addOpenFile(name);
         return super.createOutput(name);
       }
     };
-
-    //this.openFilesTrackingDirectory = directory;
+    
+    // this.openFilesTrackingDirectory = directory;
     this.indexWriter = indexWriter;
     this.config = config;
     this.maxBufferedDocs = config.getMaxBufferedDocs();
     this.threadPool = config.getIndexerThreadPool();
     this.infoStream = indexWriter.getInfoStream();
   }
-
+  
   public int getMaxBufferedDocs() {
     return maxBufferedDocs;
   }
-
+  
   public void setMaxBufferedDocs(int max) {
     this.maxBufferedDocs = max;
   }
-
+  
   public double getRAMBufferSizeMB() {
     return maxBufferSizeMB;
   }
-
+  
   public void setRAMBufferSizeMB(double mb) {
     this.maxBufferSizeMB = mb;
   }
-
+  
   public int getMaxBufferedDeleteTerms() {
     return maxBufferedDeleteTerms;
   }
-
+  
   public void setMaxBufferedDeleteTerms(int max) {
     this.maxBufferedDeleteTerms = max;
   }
-
+  
   private final long nextSequenceID() {
     return sequenceID++;
   }
   
   boolean anyChanges() {
-    return numDocsInRAM.get() != 0 ||
-      deletesInRAM.hasDeletes();
+    return numDocsInRAM.get() != 0 || deletesInRAM.hasDeletes();
   }
-
+  
   DocumentsWriterPerThread newDocumentsWriterPerThread() {
-    DocumentsWriterPerThread perThread = new DocumentsWriterPerThread(openFilesTrackingDirectory, this, config
-        .getIndexingChain());
+    DocumentsWriterPerThread perThread = new DocumentsWriterPerThread(
+        openFilesTrackingDirectory, this, config.getIndexingChain());
     sequenceIDLock.lock();
     try {
       numDocumentsWriterPerThreads++;
@@ -130,12 +133,12 @@
       sequenceIDLock.unlock();
     }
   }
-
+  
   long addDocument(final Document doc, final Analyzer analyzer)
       throws CorruptIndexException, IOException {
     return updateDocument(null, doc, analyzer);
   }
-
+  
   private final static class UpdateResult {
     long sequenceID;
     boolean flushed;
@@ -146,42 +149,49 @@
     }
   }
   
-  long updateDocument(final Term delTerm, final Document doc, final Analyzer analyzer)
-      throws CorruptIndexException, IOException {
-
+  long updateDocument(final Term delTerm, final Document doc,
+      final Analyzer analyzer) throws CorruptIndexException, IOException {
+    
     UpdateResult result = threadPool.executePerThread(this, doc,
         new DocumentsWriterThreadPool.PerThreadTask<UpdateResult>() {
           @Override
-          public UpdateResult process(final DocumentsWriterPerThread perThread) throws IOException {
-            long perThreadRAMUsedBeforeAdd = perThread.numBytesUsed;
-            perThread.addDocument(doc, analyzer);
-
-            final long sequenceID;
-            sequenceIDLock.lock();
+          public UpdateResult process(final DocumentsWriterPerThread perThread)
+              throws IOException {
+            perThread.writeLock();
             try {
-              ensureOpen();
-              sequenceID = nextSequenceID();
-              if (delTerm != null) {
-                deletesInRAM.addDeleteTerm(delTerm, sequenceID, numDocumentsWriterPerThreads);
+              long perThreadRAMUsedBeforeAdd = perThread.numBytesUsed;
+              perThread.addDocument(doc, analyzer);
+            
+              final long sequenceID;
+              sequenceIDLock.lock();
+              try {
+                ensureOpen();
+                sequenceID = nextSequenceID();
+                if (delTerm != null) {
+                  deletesInRAM.addDeleteTerm(delTerm, sequenceID,
+                    numDocumentsWriterPerThreads);
+                }
+                perThread.commitDocument(sequenceID);
+                if (!minSequenceIDsPerThread.containsKey(perThread)) {
+                  minSequenceIDsPerThread.put(perThread, sequenceID);
+                }
+                numDocsInRAM.incrementAndGet();
+              } finally {
+                sequenceIDLock.unlock();
               }
-              perThread.commitDocument(sequenceID);
-              if (!minSequenceIDsPerThread.containsKey(perThread)) {
-                minSequenceIDsPerThread.put(perThread, sequenceID);
+            
+              UpdateResult result = new UpdateResult(sequenceID);
+              if (finishAddDocument(perThread, perThreadRAMUsedBeforeAdd)) {
+                result.flushed = true;
+                super.clearThreadBindings();
               }
-              numDocsInRAM.incrementAndGet();
+              return result;
             } finally {
-              sequenceIDLock.unlock();
+              perThread.writeUnlock();
             }
-
-            UpdateResult result = new UpdateResult(sequenceID);
-            if (finishAddDocument(perThread, perThreadRAMUsedBeforeAdd)) {
-              result.flushed = true;
-              super.clearThreadBindings();
-            }
-            return result;
           }
         });
-        
+    
     if (result == null) {
       return -1;
     }
@@ -191,7 +201,7 @@
     }
     return result.sequenceID;
   }
-
+  
   private final boolean finishAddDocument(DocumentsWriterPerThread perThread,
       long perThreadRAMUsedBeforeAdd) throws IOException {
     int numDocsPerThread = perThread.getNumDocsInRAM();
@@ -201,7 +211,7 @@
       while (!numDocsInRAM.compareAndSet(oldValue, oldValue - numDocsPerThread)) {
         oldValue = numDocsInRAM.get();
       }
-
+      
       sequenceIDLock.lock();
       try {
         minSequenceIDsPerThread.remove(perThread);
@@ -210,66 +220,70 @@
         sequenceIDLock.unlock();
       }
     }
-
+    
     long deltaRAM = perThread.numBytesUsed - perThreadRAMUsedBeforeAdd;
     long oldValue = ramUsed.get();
     while (!ramUsed.compareAndSet(oldValue, oldValue + deltaRAM)) {
       oldValue = ramUsed.get();
     }
-
+    
     return flushed;
   }
-
+  
   long bufferDeleteTerms(final Term[] terms) throws IOException {
     sequenceIDLock.lock();
     try {
       ensureOpen();
       final long sequenceID = nextSequenceID();
-      deletesInRAM.addDeleteTerms(terms, sequenceID, numDocumentsWriterPerThreads);
+      deletesInRAM.addDeleteTerms(terms, sequenceID,
+          numDocumentsWriterPerThreads);
       return sequenceID;
     } finally {
       sequenceIDLock.unlock();
     }
   }
-
+  
   long bufferDeleteTerm(final Term term) throws IOException {
     sequenceIDLock.lock();
     try {
       ensureOpen();
       final long sequenceID = nextSequenceID();
-      deletesInRAM.addDeleteTerm(term, sequenceID, numDocumentsWriterPerThreads);
+      deletesInRAM
+          .addDeleteTerm(term, sequenceID, numDocumentsWriterPerThreads);
       return sequenceID;
     } finally {
       sequenceIDLock.unlock();
     }
   }
-
+  
   long bufferDeleteQueries(final Query[] queries) throws IOException {
     sequenceIDLock.lock();
     try {
       ensureOpen();
       final long sequenceID = nextSequenceID();
       for (Query q : queries) {
-        deletesInRAM.addDeleteQuery(q, sequenceID, numDocumentsWriterPerThreads);
+        deletesInRAM
+            .addDeleteQuery(q, sequenceID, numDocumentsWriterPerThreads);
       }
       return sequenceID;
     } finally {
       sequenceIDLock.unlock();
     }
   }
-
+  
   long bufferDeleteQuery(final Query query) throws IOException {
     sequenceIDLock.lock();
     try {
       ensureOpen();
       final long sequenceID = nextSequenceID();
-      deletesInRAM.addDeleteQuery(query, sequenceID, numDocumentsWriterPerThreads);
+      deletesInRAM.addDeleteQuery(query, sequenceID,
+          numDocumentsWriterPerThreads);
       return sequenceID;
     } finally {
       sequenceIDLock.unlock();
     }
   }
-
+  
   private final void updateFlushedSequenceID() {
     long newFlushedID = Long.MAX_VALUE;
     for (long minSeqIDPerThread : minSequenceIDsPerThread.values()) {
@@ -277,86 +291,92 @@
         newFlushedID = minSeqIDPerThread;
       }
     }
-
+    
     this.flushedSequenceID = newFlushedID;
   }
-
-  final boolean flushAllThreads(final boolean flushDeletes)
-      throws IOException {
+  
+  final boolean flushAllThreads(final boolean flushDeletes) throws IOException {
     
-    return threadPool.executeAllThreads(this, new DocumentsWriterThreadPool.AllThreadsTask<Boolean>() {
-      @Override
-      public Boolean process(Iterator<DocumentsWriterPerThread> threadsIterator) throws IOException {
-        boolean anythingFlushed = false;
-        
-        while (threadsIterator.hasNext()) {
-          DocumentsWriterPerThread perThread = threadsIterator.next();
-          final int numDocs = perThread.getNumDocsInRAM();
-          
-          // Always flush docs if there are any
-          boolean flushDocs = numDocs > 0;
-          
-          String segment = perThread.getSegment();
-
-          // If we are flushing docs, segment must not be null:
-          assert segment != null || !flushDocs;
-    
-          if (flushDocs) {
-            SegmentInfo newSegment = perThread.flush();
-            newSegment.dir = indexWriter.getDirectory();
+    return threadPool.executeAllThreads(this,
+        new DocumentsWriterThreadPool.AllThreadsTask<Boolean>() {
+          @Override
+          public Boolean process(
+              Iterator<DocumentsWriterPerThread> threadsIterator)
+              throws IOException {
+            boolean anythingFlushed = false;
             
-            if (newSegment != null) {
-              anythingFlushed = true;
+            while (threadsIterator.hasNext()) {
+              DocumentsWriterPerThread perThread = threadsIterator.next();
+              final int numDocs = perThread.getNumDocsInRAM();
               
-              IndexWriter.setDiagnostics(newSegment, "flush");
-              finishFlushedSegment(newSegment, perThread);
+              // Always flush docs if there are any
+              boolean flushDocs = numDocs > 0;
+              
+              String segment = perThread.getSegment();
+              
+              // If we are flushing docs, segment must not be null:
+              assert segment != null || !flushDocs;
+              
+              if (flushDocs) {
+                SegmentInfo newSegment = perThread.flush();
+                newSegment.dir = indexWriter.getDirectory();
+                
+                if (newSegment != null) {
+                  anythingFlushed = true;
+                  
+                  IndexWriter.setDiagnostics(newSegment, "flush");
+                  finishFlushedSegment(newSegment, perThread);
+                }
+              }
             }
+            
+            if (anythingFlushed) {
+              clearThreadBindings();
+              
+              sequenceIDLock.lock();
+              try {
+                flushedSequenceID = sequenceID;
+              } finally {
+                sequenceIDLock.unlock();
+              }
+              numDocsInRAM.set(0);
+            }
+            
+            if (flushDeletes) {
+              deletesInRAM.clear();
+            }
+            
+            return anythingFlushed;
           }
-        }
-
-        if (anythingFlushed) {
-          clearThreadBindings();
-
-          sequenceIDLock.lock();
-          try {
-            flushedSequenceID = sequenceID;
-          } finally {
-            sequenceIDLock.unlock();
-          }
-          numDocsInRAM.set(0);
-        }
-        
-        if (flushDeletes) {
-          deletesInRAM.clear();
-        }
-
-
-        return anythingFlushed;
-      }
-    });
+        });
   }
-
+  
   /** Build compound file for the segment we just flushed */
-  void createCompoundFile(String compoundFileName, DocumentsWriterPerThread perThread) throws IOException {
-    CompoundFileWriter cfsWriter = new CompoundFileWriter(openFilesTrackingDirectory, compoundFileName);
-    for(String fileName : perThread.flushState.flushedFiles) {
+  void createCompoundFile(String compoundFileName,
+      DocumentsWriterPerThread perThread) throws IOException {
+    CompoundFileWriter cfsWriter = new CompoundFileWriter(
+        openFilesTrackingDirectory, compoundFileName);
+    for (String fileName : perThread.flushState.flushedFiles) {
       cfsWriter.addFile(fileName);
     }
-      
+    
     // Perform the merge
     cfsWriter.close();
   }
-
-  void finishFlushedSegment(SegmentInfo newSegment, DocumentsWriterPerThread perThread) throws IOException {
+  
+  void finishFlushedSegment(SegmentInfo newSegment,
+      DocumentsWriterPerThread perThread) throws IOException {
     SegmentReader reader = indexWriter.readerPool.get(newSegment, false);
     try {
-      applyDeletes(reader, newSegment.getMinSequenceID(), newSegment.getMaxSequenceID(), perThread.sequenceIDs);
+      applyDeletes(reader, newSegment.getMinSequenceID(),
+          newSegment.getMaxSequenceID(), perThread.sequenceIDs);
     } finally {
       indexWriter.readerPool.release(reader);
     }
     
     if (indexWriter.useCompoundFile(newSegment)) {
-      String compoundFileName = IndexFileNames.segmentFileName(newSegment.name, "", IndexFileNames.COMPOUND_FILE_EXTENSION);
+      String compoundFileName = IndexFileNames.segmentFileName(newSegment.name,
+          "", IndexFileNames.COMPOUND_FILE_EXTENSION);
       message("creating compound file " + compoundFileName);
       // Now build compound file
       boolean success = false;
@@ -366,30 +386,32 @@
       } finally {
         if (!success) {
           if (infoStream != null) {
-            message("hit exception " +
-            		"reating compound file for newly flushed segment " + newSegment.name);
+            message("hit exception "
+                + "reating compound file for newly flushed segment "
+                + newSegment.name);
           }
-          indexWriter.getIndexFileDeleter().deleteFile(IndexFileNames.segmentFileName(newSegment.name, "", 
-              IndexFileNames.COMPOUND_FILE_EXTENSION));
+          indexWriter.getIndexFileDeleter().deleteFile(
+              IndexFileNames.segmentFileName(newSegment.name, "",
+                  IndexFileNames.COMPOUND_FILE_EXTENSION));
           for (String file : perThread.flushState.flushedFiles) {
             indexWriter.getIndexFileDeleter().deleteFile(file);
           }
-
+          
         }
       }
       
       for (String file : perThread.flushState.flushedFiles) {
         indexWriter.getIndexFileDeleter().deleteFile(file);
       }
-
+      
       newSegment.setUseCompoundFile(true);
       
-      synchronized(openFiles) {
+      synchronized (openFiles) {
         openFiles.remove(compoundFileName);
       }
     }
     
-    synchronized(openFiles) {
+    synchronized (openFiles) {
       openFiles.removeAll(perThread.flushState.flushedFiles);
     }
     
@@ -400,11 +422,11 @@
   void pauseAllThreads() {
     threadPool.pauseAllThreads();
   }
-
+  
   void resumeAllThreads() {
     threadPool.resumeAllThreads();
   }
-
+  
   void close() {
     sequenceIDLock.lock();
     try {
@@ -413,37 +435,38 @@
       sequenceIDLock.unlock();
     }
   }
-
+  
   private void ensureOpen() throws AlreadyClosedException {
     if (closed) {
       throw new AlreadyClosedException("this IndexWriter is closed");
     }
   }
-
-  private final boolean maybeFlushPerThread(DocumentsWriterPerThread perThread) throws IOException {
+  
+  private final boolean maybeFlushPerThread(DocumentsWriterPerThread perThread)
+      throws IOException {
     if (perThread.getNumDocsInRAM() == maxBufferedDocs) {
       flushSegment(perThread);
       assert perThread.getNumDocsInRAM() == 0;
       
       return true;
     }
-
+    
     return false;
   }
-
+  
   private boolean flushSegment(DocumentsWriterPerThread perThread)
       throws IOException {
     if (perThread.getNumDocsInRAM() == 0) {
       return false;
     }
-
+    
     SegmentInfo newSegment = perThread.flush();
     newSegment.dir = indexWriter.getDirectory();
     
     finishFlushedSegment(newSegment, perThread);
     return true;
   }
-
+  
   void abort() throws IOException {
     threadPool.abort(new DocumentsWriterThreadPool.AbortTask() {
       
@@ -454,38 +477,40 @@
         } catch (Throwable t) {
           abortedFiles = null;
         }
-    
+        
         deletesInRAM.clear();
         // nocommit
-    //        deletesFlushed.clear();
-    
+        // deletesFlushed.clear();
+        
         openFiles.clear();
         deletesInRAM.clear();
       }
     });
   }
-
+  
   final Set<String> openFiles = new HashSet<String>();
-  private Collection<String> abortedFiles; // List of files that were written before last abort()
+  private Collection<String> abortedFiles; // List of files that were written
+                                           // before last abort()
+  
   /*
-   * Returns Collection of files in use by this instance,
-   * including any flushed segments.
+   * Returns Collection of files in use by this instance, including any flushed
+   * segments.
    */
   @SuppressWarnings("unchecked")
   private Collection<String> openFiles() {
-    synchronized(openFiles) {
+    synchronized (openFiles) {
       return (Set<String>) ((HashSet<String>) openFiles).clone();
     }
   }
-
+  
   void addOpenFile(String file) {
-    synchronized(openFiles) {
+    synchronized (openFiles) {
       openFiles.add(file);
     }
   }
-
+  
   void removeOpenFile(String file) {
-    synchronized(openFiles) {
+    synchronized (openFiles) {
       openFiles.remove(file);
     }
   }
@@ -493,76 +518,108 @@
   Collection<String> abortedFiles() {
     return abortedFiles;
   }
-
+  
   boolean hasDeletes() {
     return deletesInRAM.hasDeletes();
   }
-
+  
   // nocommit
   int getNumDocsInRAM() {
     return numDocsInRAM.get();
   }
-
+  
   // nocommit
   long getRAMUsed() {
     return ramUsed.get();
   }
-
+  
   // nocommit
   // long getRAMUsed() {
   // return numBytesUsed + deletesInRAM.bytesUsed + deletesFlushed.bytesUsed;
   // }
-
+  
   boolean applyDeletes(SegmentInfos infos) throws IOException {
-    if (!hasDeletes())
-      return false;
-
+    if (!hasDeletes()) return false;
+    
     final long t0 = System.currentTimeMillis();
-
+    
     if (infoStream != null) {
-      message("apply " + deletesInRAM.getNumDeletes() + " buffered deletes on " +
-              +infos.size() + " segments.");
+      message("apply " + deletesInRAM.getNumDeletes() + " buffered deletes on "
+          + +infos.size() + " segments.");
     }
-
+    
     final int infosEnd = infos.size();
-
+    
     boolean any = false;
     for (int i = 0; i < infosEnd; i++) {
-
+      
       // Make sure we never attempt to apply deletes to
       // segment in external dir
       assert infos.info(i).dir == indexWriter.getDirectory();
-
+      
       SegmentInfo si = infos.info(i);
       // we have to synchronize here, because we need a write lock on
       // the segment in order to apply deletes
       synchronized (indexWriter) {
         SegmentReader reader = indexWriter.readerPool.get(si, false);
         try {
-          any |= applyDeletes(reader, si.getMinSequenceID(), si.getMaxSequenceID(), null);
+          any |= applyDeletes(reader, si.getMinSequenceID(),
+              si.getMaxSequenceID(), null);
         } finally {
           indexWriter.readerPool.release(reader);
         }
       }
     }
-
+    
     if (infoStream != null) {
-      message("apply deletes took " + (System.currentTimeMillis() - t0) + " msec");
+      message("apply deletes took " + (System.currentTimeMillis() - t0)
+          + " msec");
     }
-
+    
     return any;
   }
-
+  
+  /**
+   * Returns a ram reader per DocumentsWriterPerThread
+   */
+  synchronized MiniRAMReader[] getRAMReaders() {
+    DocumentsWriterPerThread[] dwpts = threadPool.getDocumentsWriterPerThreads();
+    MiniRAMReader[] readers = new MiniRAMReader[dwpts.length];
+    for (int x=0; x < dwpts.length; x++) {
+      // prevent writes from happening
+      dwpts[x].writeLock();
+      try {
+        Map<String,TermsHashPerField> thpfMap = dwpts[x].getTermsHashPerFields();
+        for (Map.Entry<String,TermsHashPerField> entry : thpfMap.entrySet()) {
+          String field = entry.getKey();
+          TermsHashPerField thpf = entry.getValue();
+          RAMBufferPerField ramBuffer = ramBufferMap.get(field);
+          if (ramBuffer == null) {
+            ramBuffer = new RAMBufferPerField(thpf);
+            ramBufferMap.put(field, ramBuffer);
+          }
+          thpf.flush(ramBuffer);
+        }
+        int maxDocID = dwpts[x].getNumDocsInRAM();
+        readers[x] = new MiniRAMReader(maxDocID, new HashMap<String,RAMBufferPerField>(ramBufferMap));
+      } finally {
+        dwpts[x].writeUnlock();
+      }
+    }
+    return readers;
+  }
+  
   // Apply buffered delete terms, queries and docIDs to the
   // provided reader
-  final boolean applyDeletes(IndexReader reader, long minSequenceID, long maxSequenceID, long[] sequenceIDs)
-      throws CorruptIndexException, IOException {
-
+  final boolean applyDeletes(IndexReader reader, long minSequenceID,
+      long maxSequenceID, long[] sequenceIDs) throws CorruptIndexException,
+      IOException {
+    
     assert sequenceIDs == null || sequenceIDs.length >= reader.maxDoc() : "reader.maxDoc="
         + reader.maxDoc() + ",sequenceIDs.length=" + sequenceIDs.length;
-
+    
     boolean any = false;
-
+    
     // first: delete the documents that had non-aborting exceptions
     if (sequenceIDs != null) {
       for (int i = 0; i < reader.maxDoc(); i++) {
@@ -575,11 +632,12 @@
     
     if (deletesInRAM.hasDeletes()) {
       IndexSearcher searcher = new IndexSearcher(reader);
-
-      SortedMap<Long, BufferedDeletesInRAM.Delete> deletes = deletesInRAM.deletes.getReadCopy();
       
-      SortedMap<Term, Long> deleteTerms = new TreeMap<Term, Long>();
-      for (Entry<Long, BufferedDeletesInRAM.Delete> entry : deletes.entrySet()) {
+      SortedMap<Long,BufferedDeletesInRAM.Delete> deletes = deletesInRAM.deletes
+          .getReadCopy();
+      
+      SortedMap<Term,Long> deleteTerms = new TreeMap<Term,Long>();
+      for (Entry<Long,BufferedDeletesInRAM.Delete> entry : deletes.entrySet()) {
         if (minSequenceID < entry.getKey()) {
           BufferedDeletesInRAM.Delete delete = entry.getValue();
           if (delete instanceof BufferedDeletesInRAM.DeleteTerm) {
@@ -602,7 +660,7 @@
                 if (doc == DocsEnum.NO_MORE_DOCS) {
                   break;
                 }
-                if ( (sequenceIDs != null && sequenceIDs[doc] < entry.getKey())
+                if ((sequenceIDs != null && sequenceIDs[doc] < entry.getKey())
                     || (sequenceIDs == null && maxSequenceID < entry.getKey())) {
                   reader.deleteDocument(doc);
                   any = true;
@@ -612,7 +670,7 @@
           }
         }
       }
-
+      
       // Delete by term
       if (deleteTerms.size() > 0) {
         Fields fields = reader.fields();
@@ -620,20 +678,21 @@
           // This reader has no postings
           return false;
         }
-
+        
         TermsEnum termsEnum = null;
-
+        
         String currentField = null;
         BytesRef termRef = new BytesRef();
         DocsEnum docs = null;
-
-        for (Entry<Term, Long> entry : deleteTerms.entrySet()) {
+        
+        for (Entry<Term,Long> entry : deleteTerms.entrySet()) {
           Term term = entry.getKey();
           // Since we visit terms sorted, we gain performance
           // by re-using the same TermsEnum and seeking only
           // forwards
           if (term.field() != currentField) {
-            assert currentField == null || currentField.compareTo(term.field()) < 0;
+            assert currentField == null
+                || currentField.compareTo(term.field()) < 0;
             currentField = term.field();
             Terms terms = fields.terms(currentField);
             if (terms != null) {
@@ -642,27 +701,28 @@
               termsEnum = null;
             }
           }
-
+          
           if (termsEnum == null) {
             continue;
           }
           // assert checkDeleteTerm(term);
-
+          
           termRef.copy(term.text());
-
+          
           if (termsEnum.seek(termRef, false) == TermsEnum.SeekStatus.FOUND) {
             DocsEnum docsEnum = termsEnum.docs(reader.getDeletedDocs(), docs);
-
+            
             if (docsEnum != null) {
               docs = docsEnum;
               // int limit = entry.getValue().getNum();
               while (true) {
                 final int doc = docs.nextDoc();
-                // if (docID == DocsEnum.NO_MORE_DOCS || docIDStart+docID >= limit) {
+                // if (docID == DocsEnum.NO_MORE_DOCS || docIDStart+docID >=
+                // limit) {
                 if (doc == DocsEnum.NO_MORE_DOCS) {
                   break;
                 }
-                if ( (sequenceIDs != null && sequenceIDs[doc] < entry.getValue())
+                if ((sequenceIDs != null && sequenceIDs[doc] < entry.getValue())
                     || (sequenceIDs == null && maxSequenceID < entry.getValue())) {
                   reader.deleteDocument(doc);
                   any = true;
@@ -673,10 +733,10 @@
         }
       }
     }
-
+    
     return any;
   }
-
+  
   void message(String message) {
     indexWriter.message("DW: " + message);
   }
Index: lucene/src/java/org/apache/lucene/index/IntBlockPool.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/IntBlockPool.java	(revision 995905)
+++ lucene/src/java/org/apache/lucene/index/IntBlockPool.java	(working copy)
@@ -19,7 +19,8 @@
 
 final class IntBlockPool {
 
-  public int[][] buffers = new int[10][];
+  private int[][] buffers = new int[10][];
+  private boolean[] copyOnWrite;
 
   int bufferUpto = -1;                        // Which buffer we are upto
   public int intUpto = DocumentsWriterRAMAllocator.INT_BLOCK_SIZE;             // Where we are in head buffer
@@ -32,6 +33,29 @@
   public IntBlockPool(DocumentsWriterPerThread docWriter) {
     this.docWriter = docWriter;
   }
+  
+  public synchronized int[] getBuffer(int i) {
+    return buffers[i];
+  }
+  
+  public IntsReadOnly getIntsReadOnly(IntsReadOnly read) {
+    if (read == null) {
+      read = new IntsReadOnly();
+    }
+    if (read.buffers == null || buffers.length != read.buffers.length) {
+      read.buffers = new int[buffers.length][];
+    }
+    System.arraycopy(buffers, 0, read.buffers, 0, buffers.length);
+    copyOnWrite = new boolean[buffers.length];
+    for (int x=0; x < buffers.length; x++) {
+      if (read.buffers[x] == buffers[x]) {
+        copyOnWrite[x] = true;
+      } else {
+        copyOnWrite[x] = false;
+      }
+    }
+    return read;
+  }
 
   public void reset() {
     if (bufferUpto != -1) {
@@ -52,6 +76,11 @@
       int[][] newBuffers = new int[(int) (buffers.length*1.5)][];
       System.arraycopy(buffers, 0, newBuffers, 0, buffers.length);
       buffers = newBuffers;
+      
+      // enlarge the copyOnWrite array
+      boolean[] newCopyOnWrite = new boolean[newBuffers.length];
+      System.arraycopy(copyOnWrite, 0, newCopyOnWrite, 0, copyOnWrite.length);
+      copyOnWrite = newCopyOnWrite;
     }
     buffer = buffers[1+bufferUpto] = docWriter.ramAllocator.getIntBlock();
     bufferUpto++;
Index: lucene/src/java/org/apache/lucene/index/IndexWriter.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/IndexWriter.java	(revision 995905)
+++ lucene/src/java/org/apache/lucene/index/IndexWriter.java	(working copy)
@@ -427,6 +427,10 @@
     }
   }
   
+  public MiniRAMReader[] getRAMReaders() {
+    return docWriter.getRAMReaders();
+  }
+  
   // used only by asserts
   public synchronized boolean infoIsLive(SegmentInfo info) {
     int idx = segmentInfos.indexOf(info);
Index: lucene/src/java/org/apache/lucene/index/IntsReadOnly.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/IntsReadOnly.java	(revision 0)
+++ lucene/src/java/org/apache/lucene/index/IntsReadOnly.java	(revision 0)
@@ -0,0 +1,5 @@
+package org.apache.lucene.index;
+
+public class IntsReadOnly {
+  public int[][] buffers;
+}
Index: lucene/src/java/org/apache/lucene/index/ArrayTermsDictionary.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/ArrayTermsDictionary.java	(revision 0)
+++ lucene/src/java/org/apache/lucene/index/ArrayTermsDictionary.java	(revision 0)
@@ -0,0 +1,157 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to You under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+import java.io.IOException;
+import java.util.Comparator;
+
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+
+public class ArrayTermsDictionary {
+  int[] sortedTermIDs;
+  RAMBufferPerField ramBuffer;
+  int numTerms;
+  
+  public ArrayTermsDictionary(RAMBufferPerField ramBuffer) {
+    sortedTermIDs = ramBuffer.getSortedTermIDs();
+    this.ramBuffer = ramBuffer;
+    this.numTerms = ramBuffer.numTerms;
+  }
+  
+  public Terms terms(int maxDocID) {
+    return new ArrayTerms(ramBuffer, maxDocID);
+  }
+  
+  public class ArrayTerms extends RAMTerms {
+    public ArrayTerms(RAMBufferPerField ramBuffer, int maxDocID) {
+      super(ramBuffer, maxDocID);
+    }
+    
+    public TermsEnum iterator() {
+      return new RAMTermsEnum(maxDocID);
+    }
+    
+    public int getTermID(BytesRef term) {
+      BytesRef dummy = new BytesRef();
+      int idx = seekTerm(term, dummy);
+      if (idx >= 0) return idx;
+      return -1;
+    }
+  }
+  
+  BytesRef getTerm(int id, BytesRef term) {
+    ramBuffer.setBytesRef(id, term);
+    return term;
+  }
+  
+  public RAMTermsEnum termsEnum(int maxDocID) {
+    return new RAMTermsEnum(maxDocID);
+  }
+  
+  public class RAMTermsEnum extends TermsEnum {
+    BytesRef dummy = new BytesRef();
+    int index = 0;
+    final int maxDocID;
+    
+    public RAMTermsEnum(int maxDocID) {
+      this.maxDocID = maxDocID;
+    }
+    
+    public DocsEnum docs(Bits skipDocs, DocsEnum reuse) throws IOException {
+      return ramBuffer.getDocsEnum(index, maxDocID, skipDocs);
+    }
+    
+    public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, DocsAndPositionsEnum reuse) throws IOException {
+      return ramBuffer.getDocsEnum(index, maxDocID, skipDocs);
+    }
+    
+    public Comparator<BytesRef> getComparator() throws IOException {
+      return ramBuffer.termComp;
+    }
+    
+    public SeekStatus seek(long ord) throws IOException {
+      if (ord >= index) {
+        return SeekStatus.END;
+      }
+      index = (int)ord;
+      return SeekStatus.FOUND;
+    }
+    
+    public SeekStatus seek(BytesRef seekTerm, boolean useCache) throws IOException {
+      int idx = seekTerm(seekTerm, dummy);
+      if (idx >= 0) {
+        index = idx;
+        return SeekStatus.FOUND;
+      }
+      else if (idx < 0) {
+        index = idx * -1;
+        return SeekStatus.NOT_FOUND;
+      }
+      return SeekStatus.END;
+    }
+    
+    public int docFreq() {
+      return ramBuffer.postingsArray.termFreqs[index];
+    }
+    
+    public long ord() throws IOException {
+      return index;
+    }
+    
+    public BytesRef next() throws IOException {
+      if (index < sortedTermIDs.length) {
+        BytesRef term = term();
+        index++;
+        return term;
+      } else {
+        return null;
+      }
+    }
+    
+    public BytesRef term() throws IOException {
+      int termID = sortedTermIDs[index];
+      return getTerm(termID, new BytesRef());
+    }
+  }
+ 
+  int seekTerm(BytesRef term, BytesRef dummy) {
+    return binarySearch(term, dummy, 0, sortedTermIDs.length-1);
+  }
+  
+  int binarySearch(BytesRef key, BytesRef dummy, int low, int high) {  
+    while (low <= high) {
+      int mid = (low + high) >>> 1;
+      int midTermId = sortedTermIDs[mid];
+      BytesRef midVal = ramBuffer.setBytesRef(midTermId, dummy);
+      int cmp;
+      if (midVal != null) {
+        cmp = this.ramBuffer.termComp.compare(midVal, key);
+      } else {
+        cmp = -1;
+      }
+      if (cmp < 0)
+        low = mid + 1;
+      else if (cmp > 0)
+        high = mid - 1;
+      else
+        return mid;
+      }
+    return -(low + 1);
+  }
+}
Index: lucene/src/java/org/apache/lucene/index/DocInverter.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/DocInverter.java	(revision 995905)
+++ lucene/src/java/org/apache/lucene/index/DocInverter.java	(working copy)
@@ -71,6 +71,16 @@
     consumer.setFieldInfos(fieldInfos);
     endConsumer.setFieldInfos(fieldInfos);
   }
+  
+  Map<String,TermsHashPerField> getTermsHashPerFields(Map<FieldInfo, DocFieldConsumerPerField> fieldsToFlush) {
+    Map<String,TermsHashPerField> map = new HashMap<String,TermsHashPerField>();
+    for (Map.Entry<FieldInfo, DocFieldConsumerPerField> fieldToFlush : fieldsToFlush.entrySet()) {
+      DocInverterPerField perField = (DocInverterPerField) fieldToFlush.getValue();
+      TermsHashPerField thpf = (TermsHashPerField)perField.consumer;
+      map.put(fieldToFlush.getKey().name, thpf);
+    }
+    return map;
+  }
 
   @Override
   void flush(Map<FieldInfo, DocFieldConsumerPerField> fieldsToFlush, SegmentWriteState state) throws IOException {
Index: lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java	(revision 995905)
+++ lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java	(working copy)
@@ -131,6 +131,7 @@
   private boolean readerPooling;
   private DocumentsWriterThreadPool indexerThreadPool;
   private int readerTermsIndexDivisor;
+  private RAMTermsPolicy ramTermsPolicy = new RAMTermsPolicy();
   
   // required for clone
   private Version matchVersion;
@@ -610,6 +611,10 @@
     return readerTermsIndexDivisor;
   }
   
+  public RAMTermsPolicy getRAMTermsPolicy() {
+    return ramTermsPolicy;
+  }
+  
   @Override
   public String toString() {
     StringBuilder sb = new StringBuilder();
Index: lucene/src/java/org/apache/lucene/index/RAMTerms.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/RAMTerms.java	(revision 0)
+++ lucene/src/java/org/apache/lucene/index/RAMTerms.java	(revision 0)
@@ -0,0 +1,63 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Comparator;
+
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+
+public abstract class RAMTerms extends Terms {
+  final Comparator<BytesRef> comp;
+  final RAMBufferPerField ramBuffer;
+  final int maxDocID;
+  
+  public RAMTerms(RAMBufferPerField ramBuffer, int maxDocID) {
+    this.ramBuffer = ramBuffer;
+    this.comp = ramBuffer.termComp;
+    this.maxDocID = maxDocID;
+  }
+  
+  abstract int getTermID(BytesRef term);
+  
+  @Override
+  public Comparator<BytesRef> getComparator() throws IOException {
+    return comp;
+  }
+
+  @Override
+  public int docFreq(BytesRef text) throws IOException {
+    int termID = getTermID(text);
+    return ramBuffer.postingsArray.docFreqs[termID];
+  }
+
+  @Override
+  public DocsEnum docs(Bits skipDocs, BytesRef term, DocsEnum reuse)
+      throws IOException {
+    int termID = getTermID(term);
+    return ramBuffer.getDocsEnum(termID, maxDocID, skipDocs);
+  }
+
+  @Override
+  public DocsAndPositionsEnum docsAndPositions(Bits skipDocs, BytesRef term,
+      DocsAndPositionsEnum reuse) throws IOException {
+    int termID = getTermID(term);
+    return ramBuffer.getDocsEnum(termID, maxDocID, skipDocs);
+  }
+}
Index: lucene/src/java/org/apache/lucene/index/ConcurrentTermsDictionaryPerField.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/ConcurrentTermsDictionaryPerField.java	(revision 0)
+++ lucene/src/java/org/apache/lucene/index/ConcurrentTermsDictionaryPerField.java	(revision 0)
@@ -0,0 +1,156 @@
+package org.apache.lucene.index;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.concurrent.ConcurrentNavigableMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+
+import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
+
+public class ConcurrentTermsDictionaryPerField {
+  private int maxTermID = -1;
+  ConcurrentSkipListMap<BytesRef,Integer> termMap;
+  public BytesRef field;
+  RAMBufferPerField ramBuffer;
+  
+  public ConcurrentTermsDictionaryPerField(RAMBufferPerField ramBuffer) {
+    this.ramBuffer = ramBuffer;
+    field = ramBuffer.utf8;
+    termMap = new ConcurrentSkipListMap<BytesRef,Integer>(ramBuffer.termComp);
+    update(ramBuffer);
+  }
+  
+  public Terms terms(int maxDocID) {
+    return new ConcurrentTerms(ramBuffer, maxDocID);
+  }
+  
+  private class ConcurrentTermsEnum extends TermsEnum {
+    ConcurrentNavigableMap<BytesRef,Integer> subMap;
+    Iterator<Map.Entry<BytesRef,Integer>> iterator;
+    int termId = -1;
+    BytesRef term;
+    int maxDocID;
+    
+    public ConcurrentTermsEnum(int maxDocID) {
+      this.maxDocID = maxDocID;
+      this.subMap = termMap;
+      iterator = subMap.entrySet().iterator();
+    }
+    
+    public Comparator<BytesRef> getComparator() {
+      return ramBuffer.termComp;
+    }
+    
+    public long ord() {
+      return termId;
+    }
+    
+    public int docFreq() {
+      return ramBuffer.postingsArray.termFreqs[termId];
+      //return ramBuffer.postingsArray.docFreqs[termId];
+      // nocommit: not sure where to get this value yet
+    }
+    
+    public DocsEnum docs(Bits skipDocs, DocsEnum reuse) throws IOException {
+      return ramBuffer.getDocsEnum(termId, maxDocID, skipDocs);
+    }
+    
+    public DocsAndPositionsEnum docsAndPositions(Bits skipDocs,
+        DocsAndPositionsEnum reuse) throws IOException {
+      return ramBuffer.getDocsEnum(termId, maxDocID, skipDocs);
+    }
+    
+    public SeekStatus seek(BytesRef start, boolean useCache) throws IOException {
+      if (start != null) {
+        subMap = termMap.tailMap(start, true);
+      } else {
+        subMap = termMap;
+      }
+      iterator = subMap.entrySet().iterator();
+      termId = subMap.firstEntry().getValue();
+      if (ramBuffer.termComp.compare(subMap.firstKey(), start) == 0) {
+        return SeekStatus.FOUND;
+      } else if (subMap.size() == 0) {
+        return SeekStatus.END;
+      } else {
+        return SeekStatus.NOT_FOUND;
+      }
+    }
+    
+    public SeekStatus seek(long ord) throws IOException {
+      termId = (int)ord;
+      return SeekStatus.FOUND;
+    }
+    
+    public BytesRef next() throws IOException {
+      if (iterator.hasNext()) {
+        Map.Entry<BytesRef,Integer> entry = iterator.next();
+        termId = entry.getValue();
+        term = entry.getKey();
+        return term;
+      } else {
+        return null;
+      }
+    }
+    
+    public BytesRef term() throws IOException {
+      return term;
+    }
+  }
+  
+  private class ConcurrentTerms extends RAMTerms {
+    public ConcurrentTerms(RAMBufferPerField ramBuffer, int maxDocID) {
+      super(ramBuffer, maxDocID);
+    }
+    
+    @Override
+    int getTermID(BytesRef term) {
+      return termMap.get(term);
+    }
+    
+    @Override
+    public TermsEnum iterator() throws IOException {
+      return new ConcurrentTermsEnum(maxDocID);
+    }
+    
+    @Override
+    public long getUniqueTermCount() throws IOException {
+      return termMap.size();
+    }
+  }
+  
+  // this should be called on demand, ie, when terms enum is required
+  public void update(RAMBufferPerField ramBuffer) {
+    // add the term ids. the termMap will automatically
+    // sort them in ascending order
+    int start = maxTermID + 1;
+    for (int x = start; x < ramBuffer.numTerms; x++) {
+      BytesRef term = new BytesRef();
+      ramBuffer.setBytesRef(x, term);
+      termMap.put(term, x);
+    }
+    // set the new maxTermID
+    maxTermID = ramBuffer.numTerms - 1;
+  }
+}
