diff --git a/lucene/core/src/java/org/apache/lucene/codecs/TempPostingsReaderBase.java b/lucene/core/src/java/org/apache/lucene/codecs/TempPostingsReaderBase.java
index 690b744..1801877 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/TempPostingsReaderBase.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/TempPostingsReaderBase.java
@@ -24,6 +24,7 @@ import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.store.IndexInput;
+import org.apache.lucene.store.ByteArrayDataInput;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.codecs.temp.TempTermState;
 
@@ -52,11 +53,11 @@ public abstract class TempPostingsReaderBase implements Closeable {
    *  dictionary {@link IndexInput}. */
   public abstract void init(IndexInput termsIn) throws IOException;
 
-  /** Return a newly created empty TermState */
-  public abstract TempTermState newTermState() throws IOException;
+  /** Return a newly created empty TermMetaData */
+  public abstract TermMetaData newTermMetaData() throws IOException;
 
   /** Actually decode metadata for next term */
-  public abstract void nextTerm(FieldInfo fieldInfo, TempTermState state) throws IOException;
+  public abstract void nextTerm(FieldInfo fieldInfo, TempTermState state, ByteArrayDataInput bytesReader) throws IOException;
 
   /** Must fully consume state, since after this call that
    *  TermState may be reused. */
@@ -69,9 +70,4 @@ public abstract class TempPostingsReaderBase implements Closeable {
 
   @Override
   public abstract void close() throws IOException;
-
-  /** Reads data for all terms in the next block; this
-   *  method should merely load the byte[] blob but not
-   *  decode, which is done in {@link #nextTerm}. */
-  public abstract void readTermsBlock(IndexInput termsIn, FieldInfo fieldInfo, TempTermState termState) throws IOException;
 }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/TempPostingsWriterBase.java b/lucene/core/src/java/org/apache/lucene/codecs/TempPostingsWriterBase.java
index ef41ea3..d691d6f 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/TempPostingsWriterBase.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/TempPostingsWriterBase.java
@@ -22,6 +22,7 @@ import java.io.Closeable;
 
 import org.apache.lucene.store.IndexOutput;
 import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.codecs.temp.TempTermState;
 
 /**
  * Extension of {@link PostingsConsumer} to support pluggable term dictionaries.
@@ -50,20 +51,17 @@ public abstract class TempPostingsWriterBase extends PostingsConsumer implements
    *  the provided {@code termsOut}. */
   public abstract void start(IndexOutput termsOut) throws IOException;
 
+  /** Return a newly created empty TermMetaData*/
+  public abstract TermMetaData newTermMetaData() throws IOException;
+
   /** Start a new term.  Note that a matching call to {@link
-   *  #finishTerm(TermStats)} is done, only if the term has at least one
+   *  #finishTerm(TempTermState)} is done, only if the term has at least one
    *  document. */
   public abstract void startTerm() throws IOException;
 
-  /** Flush count terms starting at start "backwards", as a
-   *  block. start is a negative offset from the end of the
-   *  terms stack, ie bigger start means further back in
-   *  the stack. */
-  public abstract void flushTermsBlock(int start, int count) throws IOException;
-
   /** Finishes the current term.  The provided {@link
-   *  TermStats} contains the term's summary statistics. */
-  public abstract void finishTerm(TermStats stats) throws IOException;
+   *  TempTermState} contains the term's summary statistics. */
+  public abstract void finishTerm(TempTermState state) throws IOException;
 
   /** Called when the writing switches to another field. */
   public abstract void setField(FieldInfo fieldInfo);
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/TermMetaData.java b/lucene/core/src/java/org/apache/lucene/codecs/TermMetaData.java
new file mode 100644
index 0000000..74e8d94
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/codecs/TermMetaData.java
@@ -0,0 +1,55 @@
+package org.apache.lucene.codecs;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+
+import org.apache.lucene.store.DataInput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.codecs.temp.TempTermState;
+
+public abstract class TermMetaData implements Cloneable {
+
+  /* no arg means the instance will be always 'less than' any other instance */
+  public TermMetaData() {
+  }
+
+  public TermMetaData clone() {
+    try {
+      return (TermMetaData)super.clone();
+    } catch (CloneNotSupportedException cnse) {
+      throw new RuntimeException(cnse);
+    }
+  }
+  public abstract void copyFrom(TermMetaData other);
+
+  /* return (this - smaller), if possible */
+  public abstract TermMetaData subtract(TermMetaData inc);
+
+  /* return (this + inc), if possible */
+  public abstract TermMetaData add(TermMetaData inc);
+
+  public abstract void write(DataOutput out, FieldInfo info, TempTermState state) throws IOException;
+
+  public abstract void read(DataInput out, FieldInfo info, TempTermState state) throws IOException;
+
+  public String toString() {
+    return "TermMetaData";
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/temp/TempBlockTermsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/temp/TempBlockTermsReader.java
index c11afb8..3be17bf 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/temp/TempBlockTermsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/temp/TempBlockTermsReader.java
@@ -628,9 +628,14 @@ public class TempBlockTermsReader extends FieldsProducer {
         private int startBytePos;
         private int suffix;
 
+        // buffer current block of term metadata
+        ByteArrayDataInput bytesReader;
+        byte[] bytes;
+
         public Frame(int ord) throws IOException {
           this.ord = ord;
-          termState = postingsReader.newTermState();
+          termState = new TempTermState();
+          termState.meta = postingsReader.newTermMetaData();
           termState.totalTermFreq = -1;
         }
 
@@ -729,8 +734,17 @@ public class TempBlockTermsReader extends FieldsProducer {
 
           termState.termBlockOrd = 0;
           nextEnt = 0;
-          
-          postingsReader.readTermsBlock(in, fieldInfo, termState);
+
+          // metadata 
+          numBytes = in.readVInt();
+          if (bytes == null) {
+            bytes = new byte[ArrayUtil.oversize(numBytes, 1)];
+            bytesReader = new ByteArrayDataInput();
+          } else if (bytes.length < numBytes) {
+            bytes = new byte[ArrayUtil.oversize(numBytes, 1)];
+          }
+          in.readBytes(bytes, 0, numBytes);
+          bytesReader.reset(bytes, 0, numBytes);
 
           if (!isLastInFloor) {
             // Sub-blocks of a single floor block are always
@@ -807,7 +821,7 @@ public class TempBlockTermsReader extends FieldsProducer {
               //if (DEBUG) System.out.println("    totTF=" + state.totalTermFreq);
             }
 
-            postingsReader.nextTerm(fieldInfo, termState);
+            postingsReader.nextTerm(fieldInfo, termState, bytesReader);
             metaDataUpto++;
             termState.termBlockOrd++;
           }
@@ -2300,9 +2314,14 @@ public class TempBlockTermsReader extends FieldsProducer {
 
         final TempTermState state;
 
+        // buffer current block of term metadata
+        ByteArrayDataInput bytesReader;
+        byte[] bytes;
+
         public Frame(int ord) throws IOException {
           this.ord = ord;
-          state = postingsReader.newTermState();
+          state = new TempTermState();
+          state.meta = postingsReader.newTermMetaData();
           state.totalTermFreq = -1;
         }
 
@@ -2399,9 +2418,16 @@ public class TempBlockTermsReader extends FieldsProducer {
           nextEnt = 0;
           lastSubFP = -1;
 
-          // TODO: we could skip this if !hasTerms; but
-          // that's rare so won't help much
-          postingsReader.readTermsBlock(in, fieldInfo, state);
+          // metadata 
+          numBytes = in.readVInt();
+          if (bytes == null) {
+            bytes = new byte[ArrayUtil.oversize(numBytes, 1)];
+            bytesReader = new ByteArrayDataInput();
+          } else if (bytes.length < numBytes) {
+            bytes = new byte[ArrayUtil.oversize(numBytes, 1)];
+          }
+          in.readBytes(bytes, 0, numBytes);
+          bytesReader.reset(bytes, 0, numBytes);
 
           // Sub-blocks of a single floor block are always
           // written one after another -- tail recurse:
@@ -2609,7 +2635,7 @@ public class TempBlockTermsReader extends FieldsProducer {
               //if (DEBUG) System.out.println("    totTF=" + state.totalTermFreq);
             }
 
-            postingsReader.nextTerm(fieldInfo, state);
+            postingsReader.nextTerm(fieldInfo, state, bytesReader);
             metaDataUpto++;
             state.termBlockOrd++;
           }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/temp/TempBlockTermsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/temp/TempBlockTermsWriter.java
index bef5ee9..b955b7d 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/temp/TempBlockTermsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/temp/TempBlockTermsWriter.java
@@ -47,6 +47,7 @@ import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.TermsConsumer;
 import org.apache.lucene.codecs.TermStats;
 import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.codecs.TermMetaData;
 
 /*
   TODO:
@@ -937,7 +938,7 @@ public class TempBlockTermsWriter extends FieldsConsumer {
       bytesWriter2.reset();
 
       // Have postings writer write block
-      postingsWriter.flushTermsBlock(futureTermCount+termCount, termCount);
+      flushTermsBlock(futureTermCount+termCount, termCount);
 
       // Remove slice replaced by block:
       slice.clear();
@@ -956,6 +957,42 @@ public class TempBlockTermsWriter extends FieldsConsumer {
 
       return new PendingBlock(prefix, startFP, termCount != 0, isFloor, floorLeadByte, subIndices);
     }
+    private final RAMOutputStream bytesWriter3 = new RAMOutputStream();
+
+    /** Flush count terms starting at start "backwards", as a
+     *  block. start is a negative offset from the end of the
+     *  terms stack, ie bigger start means further back in
+     *  the stack. */
+    void flushTermsBlock(int start, int count) throws IOException {
+      if (count == 0) {
+        out.writeByte((byte) 0);
+        return;
+      }
+
+      assert start <= pendingMetaData.size();
+      assert count <= start;
+
+      final int limit = pendingMetaData.size() - start + count;
+
+      TermMetaData upto, last, delta;
+      last = postingsWriter.newTermMetaData();
+      for(int idx=limit-count; idx<limit; idx++) {
+        upto = pendingMetaData.get(idx);
+        // nocommit: wow ... FP is smaller than 
+        // latest trunk version... somebody optimized the PF?
+        delta = upto.subtract(last);
+        delta.write(bytesWriter3, fieldInfo, null);
+        // nocommit: last = upto ? sadly no... 
+        // With terms A, B, C, lastDocFP[C] == docFP[A], if docFP[B] == -1
+        last = delta.add(last);
+      }
+      out.writeVInt((int) bytesWriter3.getFilePointer());
+      bytesWriter3.writeTo(out);
+      bytesWriter3.reset();
+
+      // Remove the terms we just wrote:
+      pendingMetaData.subList(limit-count, limit).clear();
+    }
 
     TermsWriter(FieldInfo fieldInfo) {
       this.fieldInfo = fieldInfo;
@@ -996,6 +1033,7 @@ public class TempBlockTermsWriter extends FieldsConsumer {
     }
 
     private final IntsRef scratchIntsRef = new IntsRef();
+    private final List<TermMetaData> pendingMetaData = new ArrayList<TermMetaData>();
 
     @Override
     public void finishTerm(BytesRef text, TermStats stats) throws IOException {
@@ -1005,7 +1043,13 @@ public class TempBlockTermsWriter extends FieldsConsumer {
 
       blockBuilder.add(Util.toIntsRef(text, scratchIntsRef), noOutputs.getNoOutput());
       pending.add(new PendingTerm(BytesRef.deepCopyOf(text), stats));
-      postingsWriter.finishTerm(stats);
+      
+      TempTermState termState = new TempTermState();
+      termState.docFreq = stats.docFreq;
+      termState.totalTermFreq = stats.totalTermFreq;
+      //termState.meta = postingsWriter.newTermMetaData();
+      postingsWriter.finishTerm(termState);
+      pendingMetaData.add(termState.meta);
       numTerms++;
     }
 
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/temp/TempMetaData.java b/lucene/core/src/java/org/apache/lucene/codecs/temp/TempMetaData.java
new file mode 100644
index 0000000..e363f7a
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/codecs/temp/TempMetaData.java
@@ -0,0 +1,172 @@
+package org.apache.lucene.codecs.temp;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+
+import org.apache.lucene.codecs.TermMetaData;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.DataInput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.FieldInfo.IndexOptions;
+
+
+// Must keep final because we do non-standard clone
+final class TempMetaData extends TermMetaData {
+  long docStartFP;
+  long posStartFP;
+  long payStartFP;
+  long skipOffset;
+  long lastPosBlockOffset;
+  // docid when there is a single pulsed posting, otherwise -1
+  // freq is always implicitly totalTermFreq in this case.
+  int singletonDocID;
+
+  /* no arg means the instance will be always 'less than' any other instance */
+  TempMetaData() {
+    docStartFP = 0;
+    posStartFP = 0;
+    payStartFP = 0;
+    singletonDocID = 0;
+  }
+
+  TempMetaData(long docStartFP, long posStartFP, long payStartFP, long skipOffset, long lastPosBlockOffset, int singletonDocID) {
+    this.docStartFP = docStartFP;
+    this.posStartFP = posStartFP;
+    this.payStartFP = payStartFP;
+    this.skipOffset = skipOffset;
+    this.lastPosBlockOffset = lastPosBlockOffset;
+    this.singletonDocID = singletonDocID;
+  }
+
+  /* delta-encoding, only works on monotonical part */
+  public TermMetaData subtract(TermMetaData _inc) {
+    TempMetaData inc = (TempMetaData) _inc;
+    TempMetaData ret = (TempMetaData)super.clone();
+    if (ret.singletonDocID != -1) {  // current MetaData have no valid docFP, copy it so delta=0
+      ret.docStartFP = 0;
+    } else {
+      assert inc.docStartFP <= ret.docStartFP;
+      ret.docStartFP -= inc.docStartFP;
+    }
+    if (ret.posStartFP != -1) {
+      assert inc.posStartFP <= ret.posStartFP;
+      ret.posStartFP -= inc.posStartFP;
+    }
+    if (ret.payStartFP != -1 && inc.payStartFP != -1) {
+      assert inc.payStartFP <= ret.payStartFP;
+      ret.payStartFP -= inc.payStartFP;
+    }
+    return ret;
+  }
+
+  @Override
+  public TermMetaData add(TermMetaData _inc) {
+    TempMetaData inc = (TempMetaData) _inc;
+    TempMetaData ret = (TempMetaData)super.clone();
+    if (ret.singletonDocID != -1) {
+      ret.docStartFP = inc.docStartFP;
+    } else {
+      ret.docStartFP += inc.docStartFP;
+    }
+    if (ret.posStartFP != -1) {
+      ret.posStartFP += inc.posStartFP;
+    }
+    // nocommit: not symmetric with those lines in subtract... ,
+    // the deep reason is: during writing, we depends on the '-1' values 
+    // to see whether to write them...
+    //
+    // Here, the use of subtract() & add() are quite different:
+    // subtract() will get the delta value to *write* to disk, 
+    //            so the format must be exactly the same as a pendingTerm in 
+    //            Lucene41PostingsWriter.flushTermBlock()
+    // add() will try to pass the 'lastPayStartFP' along the consumed metadata,
+    //       so the metadata returned by 'add' will *never* be written to disk.
+    // for example, whether we have payStartFP depends on the totalTermFreq of a term,
+    // like for term A,B,C, A & C has payStartFP, but B doesn't, so, 
+    // the payStartFP returned by B.subtract(A) should be -1, otherwise the write will stupidly write another VLong,
+    // the payStartFP returned by B.subtract(A).add(A) should be the same as A
+    //
+    if (inc.payStartFP != -1) {
+      if (ret.payStartFP != -1) {
+        ret.payStartFP += inc.payStartFP;
+      } else {
+        ret.payStartFP = inc.payStartFP;
+      }
+    }
+    return ret;
+  }
+
+  @Override
+  // nocommit: should TermState be glued with field info? quite stupid to calculate indexOptions here
+  // but we're silly calculating this in Lucenen41PSTR as well..
+  public void write(DataOutput out, FieldInfo info, TempTermState state) throws IOException {
+    final IndexOptions indexOptions = info.getIndexOptions();
+    boolean fieldHasFreqs = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
+    boolean fieldHasPositions = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
+    boolean fieldHasOffsets = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
+    boolean fieldHasPayloads = info.hasPayloads();
+
+    if (singletonDocID == -1) {
+      out.writeVLong(docStartFP);
+    } else {
+      out.writeVInt(singletonDocID);
+    }
+
+    if (fieldHasPositions) {
+      out.writeVLong(posStartFP);
+      if (lastPosBlockOffset != -1) {
+        out.writeVLong(lastPosBlockOffset);
+      }
+      if ((fieldHasPayloads || fieldHasOffsets) && payStartFP != -1) {
+        out.writeVLong(payStartFP);
+      }
+    }
+    if (skipOffset != -1) {
+      out.writeVLong(skipOffset);
+    }
+  }
+
+  @Override
+  public void read(DataInput in, FieldInfo info, TempTermState state) throws IOException {
+  }
+
+  @Override
+  public TempMetaData clone() {
+    TempMetaData other = new TempMetaData();
+    other.copyFrom(this);
+    return other;
+  }
+
+  @Override
+  public void copyFrom(TermMetaData _other) {
+    //super.copyFrom(_other);
+    TempMetaData other = (TempMetaData) _other;
+    docStartFP = other.docStartFP;
+    posStartFP = other.posStartFP;
+    payStartFP = other.payStartFP;
+    lastPosBlockOffset = other.lastPosBlockOffset;
+    skipOffset = other.skipOffset;
+    singletonDocID = other.singletonDocID;
+  }
+
+  @Override
+  public String toString() {
+    return "docStartFP=" + docStartFP + " posStartFP=" + posStartFP + " payStartFP=" + payStartFP + " lastPosBlockOffset=" + lastPosBlockOffset + " singletonDocID=" + singletonDocID;
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/temp/TempPostingsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/temp/TempPostingsReader.java
index 25a0978..ba50869 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/temp/TempPostingsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/temp/TempPostingsReader.java
@@ -43,6 +43,7 @@ import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.Bits;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.codecs.TermMetaData;
 import org.apache.lucene.codecs.lucene41.ForUtil;
 import org.apache.lucene.codecs.lucene41.Lucene41SkipReader;
 
@@ -143,55 +144,9 @@ public final class TempPostingsReader extends TempPostingsReaderBase {
     }
   }
 
-  // Must keep final because we do non-standard clone
-  private final static class IntBlockTermState extends TempTermState {
-    long docStartFP;
-    long posStartFP;
-    long payStartFP;
-    long skipOffset;
-    long lastPosBlockOffset;
-    // docid when there is a single pulsed posting, otherwise -1
-    // freq is always implicitly totalTermFreq in this case.
-    int singletonDocID;
-
-    // Only used by the "primary" TermState -- clones don't
-    // copy this (basically they are "transient"):
-    ByteArrayDataInput bytesReader;  // TODO: should this NOT be in the TermState...?
-    byte[] bytes;
-
-    @Override
-    public IntBlockTermState clone() {
-      IntBlockTermState other = new IntBlockTermState();
-      other.copyFrom(this);
-      return other;
-    }
-
-    @Override
-    public void copyFrom(TermState _other) {
-      super.copyFrom(_other);
-      IntBlockTermState other = (IntBlockTermState) _other;
-      docStartFP = other.docStartFP;
-      posStartFP = other.posStartFP;
-      payStartFP = other.payStartFP;
-      lastPosBlockOffset = other.lastPosBlockOffset;
-      skipOffset = other.skipOffset;
-      singletonDocID = other.singletonDocID;
-
-      // Do not copy bytes, bytesReader (else TermState is
-      // very heavy, ie drags around the entire block's
-      // byte[]).  On seek back, if next() is in fact used
-      // (rare!), they will be re-read from disk.
-    }
-
-    @Override
-    public String toString() {
-      return super.toString() + " docStartFP=" + docStartFP + " posStartFP=" + posStartFP + " payStartFP=" + payStartFP + " lastPosBlockOffset=" + lastPosBlockOffset + " singletonDocID=" + singletonDocID;
-    }
-  }
-
   @Override
-  public IntBlockTermState newTermState() {
-    return new IntBlockTermState();
+  public TermMetaData newTermMetaData() {
+    return new TempMetaData();
   }
 
   @Override
@@ -199,85 +154,66 @@ public final class TempPostingsReader extends TempPostingsReaderBase {
     IOUtils.close(docIn, posIn, payIn);
   }
 
-  /* Reads but does not decode the byte[] blob holding
-     metadata for the current terms block */
-  @Override
-  public void readTermsBlock(IndexInput termsIn, FieldInfo fieldInfo, TempTermState _termState) throws IOException {
-    final IntBlockTermState termState = (IntBlockTermState) _termState;
-
-    final int numBytes = termsIn.readVInt();
-
-    if (termState.bytes == null) {
-      termState.bytes = new byte[ArrayUtil.oversize(numBytes, 1)];
-      termState.bytesReader = new ByteArrayDataInput();
-    } else if (termState.bytes.length < numBytes) {
-      termState.bytes = new byte[ArrayUtil.oversize(numBytes, 1)];
-    }
-
-    termsIn.readBytes(termState.bytes, 0, numBytes);
-    termState.bytesReader.reset(termState.bytes, 0, numBytes);
-  }
-
   @Override
-  public void nextTerm(FieldInfo fieldInfo, TempTermState _termState)
+  public void nextTerm(FieldInfo fieldInfo, TempTermState termState, ByteArrayDataInput bytesReader)
     throws IOException {
-    final IntBlockTermState termState = (IntBlockTermState) _termState;
+    final TempMetaData meta = (TempMetaData) termState.meta;
     final boolean isFirstTerm = termState.termBlockOrd == 0;
     final boolean fieldHasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
     final boolean fieldHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
     final boolean fieldHasPayloads = fieldInfo.hasPayloads();
 
-    final DataInput in = termState.bytesReader;
+    final DataInput in = bytesReader;
     if (isFirstTerm) {
       if (termState.docFreq == 1) {
-        termState.singletonDocID = in.readVInt();
-        termState.docStartFP = 0;
+        meta.singletonDocID = in.readVInt();
+        meta.docStartFP = 0;
       } else {
-        termState.singletonDocID = -1;
-        termState.docStartFP = in.readVLong();
+        meta.singletonDocID = -1;
+        meta.docStartFP = in.readVLong();
       }
       if (fieldHasPositions) {
-        termState.posStartFP = in.readVLong();
+        meta.posStartFP = in.readVLong();
         if (termState.totalTermFreq > BLOCK_SIZE) {
-          termState.lastPosBlockOffset = in.readVLong();
+          meta.lastPosBlockOffset = in.readVLong();
         } else {
-          termState.lastPosBlockOffset = -1;
+          meta.lastPosBlockOffset = -1;
         }
         if ((fieldHasPayloads || fieldHasOffsets) && termState.totalTermFreq >= BLOCK_SIZE) {
-          termState.payStartFP = in.readVLong();
+          meta.payStartFP = in.readVLong();
         } else {
-          termState.payStartFP = -1;
+          meta.payStartFP = -1;
         }
       }
     } else {
       if (termState.docFreq == 1) {
-        termState.singletonDocID = in.readVInt();
+        meta.singletonDocID = in.readVInt();
       } else {
-        termState.singletonDocID = -1;
-        termState.docStartFP += in.readVLong();
+        meta.singletonDocID = -1;
+        meta.docStartFP += in.readVLong();
       }
       if (fieldHasPositions) {
-        termState.posStartFP += in.readVLong();
+        meta.posStartFP += in.readVLong();
         if (termState.totalTermFreq > BLOCK_SIZE) {
-          termState.lastPosBlockOffset = in.readVLong();
+          meta.lastPosBlockOffset = in.readVLong();
         } else {
-          termState.lastPosBlockOffset = -1;
+          meta.lastPosBlockOffset = -1;
         }
         if ((fieldHasPayloads || fieldHasOffsets) && termState.totalTermFreq >= BLOCK_SIZE) {
           long delta = in.readVLong();
-          if (termState.payStartFP == -1) {
-            termState.payStartFP = delta;
+          if (meta.payStartFP == -1) {
+            meta.payStartFP = delta;
           } else {
-            termState.payStartFP += delta;
+            meta.payStartFP += delta;
           }
         }
       }
     }
 
     if (termState.docFreq > BLOCK_SIZE) {
-      termState.skipOffset = in.readVLong();
+      meta.skipOffset = in.readVLong();
     } else {
-      termState.skipOffset = -1;
+      meta.skipOffset = -1;
     }
   }
     
@@ -292,7 +228,7 @@ public final class TempPostingsReader extends TempPostingsReaderBase {
     } else {
       docsEnum = new BlockDocsEnum(fieldInfo);
     }
-    return docsEnum.reset(liveDocs, (IntBlockTermState) termState, flags);
+    return docsEnum.reset(liveDocs, termState, flags);
   }
 
   // TODO: specialize to liveDocs vs not
@@ -316,7 +252,7 @@ public final class TempPostingsReader extends TempPostingsReaderBase {
       } else {
         docsAndPositionsEnum = new BlockDocsAndPositionsEnum(fieldInfo);
       }
-      return docsAndPositionsEnum.reset(liveDocs, (IntBlockTermState) termState);
+      return docsAndPositionsEnum.reset(liveDocs, termState);
     } else {
       EverythingEnum everythingEnum;
       if (reuse instanceof EverythingEnum) {
@@ -327,7 +263,7 @@ public final class TempPostingsReader extends TempPostingsReaderBase {
       } else {
         everythingEnum = new EverythingEnum(fieldInfo);
       }
-      return everythingEnum.reset(liveDocs, (IntBlockTermState) termState, flags);
+      return everythingEnum.reset(liveDocs, termState, flags);
     }
   }
 
@@ -391,16 +327,17 @@ public final class TempPostingsReader extends TempPostingsReaderBase {
         indexHasPayloads == fieldInfo.hasPayloads();
     }
     
-    public DocsEnum reset(Bits liveDocs, IntBlockTermState termState, int flags) throws IOException {
+    public DocsEnum reset(Bits liveDocs, TempTermState termState, int flags) throws IOException {
+      final TempMetaData meta = (TempMetaData) termState.meta;
       this.liveDocs = liveDocs;
       // if (DEBUG) {
       //   System.out.println("  FPR.reset: termState=" + termState);
       // }
       docFreq = termState.docFreq;
       totalTermFreq = indexHasFreq ? termState.totalTermFreq : docFreq;
-      docTermStartFP = termState.docStartFP;
-      skipOffset = termState.skipOffset;
-      singletonDocID = termState.singletonDocID;
+      docTermStartFP = meta.docStartFP;
+      skipOffset = meta.skipOffset;
+      singletonDocID = meta.singletonDocID;
       if (docFreq > 1) {
         if (docIn == null) {
           // lazy init
@@ -687,18 +624,19 @@ public final class TempPostingsReader extends TempPostingsReaderBase {
         indexHasPayloads == fieldInfo.hasPayloads();
     }
     
-    public DocsAndPositionsEnum reset(Bits liveDocs, IntBlockTermState termState) throws IOException {
+    public DocsAndPositionsEnum reset(Bits liveDocs, TempTermState termState) throws IOException {
+      final TempMetaData meta = (TempMetaData) termState.meta;
       this.liveDocs = liveDocs;
       // if (DEBUG) {
       //   System.out.println("  FPR.reset: termState=" + termState);
       // }
       docFreq = termState.docFreq;
-      docTermStartFP = termState.docStartFP;
-      posTermStartFP = termState.posStartFP;
-      payTermStartFP = termState.payStartFP;
-      skipOffset = termState.skipOffset;
       totalTermFreq = termState.totalTermFreq;
-      singletonDocID = termState.singletonDocID;
+      docTermStartFP = meta.docStartFP;
+      posTermStartFP = meta.posStartFP;
+      payTermStartFP = meta.payStartFP;
+      skipOffset = meta.skipOffset;
+      singletonDocID = meta.singletonDocID;
       if (docFreq > 1) {
         if (docIn == null) {
           // lazy init
@@ -713,7 +651,7 @@ public final class TempPostingsReader extends TempPostingsReaderBase {
       } else if (termState.totalTermFreq == BLOCK_SIZE) {
         lastPosBlockFP = -1;
       } else {
-        lastPosBlockFP = posTermStartFP + termState.lastPosBlockOffset;
+        lastPosBlockFP = posTermStartFP + meta.lastPosBlockOffset;
       }
 
       doc = -1;
@@ -1144,18 +1082,19 @@ public final class TempPostingsReader extends TempPostingsReaderBase {
         indexHasPayloads == fieldInfo.hasPayloads();
     }
     
-    public EverythingEnum reset(Bits liveDocs, IntBlockTermState termState, int flags) throws IOException {
+    public EverythingEnum reset(Bits liveDocs, TempTermState termState, int flags) throws IOException {
+      final TempMetaData meta = (TempMetaData) termState.meta;
       this.liveDocs = liveDocs;
       // if (DEBUG) {
       //   System.out.println("  FPR.reset: termState=" + termState);
       // }
       docFreq = termState.docFreq;
-      docTermStartFP = termState.docStartFP;
-      posTermStartFP = termState.posStartFP;
-      payTermStartFP = termState.payStartFP;
-      skipOffset = termState.skipOffset;
       totalTermFreq = termState.totalTermFreq;
-      singletonDocID = termState.singletonDocID;
+      docTermStartFP = meta.docStartFP;
+      posTermStartFP = meta.posStartFP;
+      payTermStartFP = meta.payStartFP;
+      skipOffset = meta.skipOffset;
+      singletonDocID = meta.singletonDocID;
       if (docFreq > 1) {
         if (docIn == null) {
           // lazy init
@@ -1171,7 +1110,7 @@ public final class TempPostingsReader extends TempPostingsReaderBase {
       } else if (termState.totalTermFreq == BLOCK_SIZE) {
         lastPosBlockFP = -1;
       } else {
-        lastPosBlockFP = posTermStartFP + termState.lastPosBlockOffset;
+        lastPosBlockFP = posTermStartFP + meta.lastPosBlockOffset;
       }
 
       this.needsOffsets = (flags & DocsAndPositionsEnum.FLAG_OFFSETS) != 0;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/temp/TempPostingsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/temp/TempPostingsWriter.java
index 0d0d4bb..ba331e9 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/temp/TempPostingsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/temp/TempPostingsWriter.java
@@ -41,6 +41,7 @@ import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.packed.PackedInts;
+import org.apache.lucene.codecs.TermMetaData;
 
 
 /**
@@ -73,8 +74,6 @@ public final class TempPostingsWriter extends TempPostingsWriterBase {
   final IndexOutput posOut;
   final IndexOutput payOut;
 
-  private IndexOutput termsOut;
-
   // How current field indexes postings:
   private boolean fieldHasFreqs;
   private boolean fieldHasPositions;
@@ -192,7 +191,6 @@ public final class TempPostingsWriter extends TempPostingsWriterBase {
 
   @Override
   public void start(IndexOutput termsOut) throws IOException {
-    this.termsOut = termsOut;
     CodecUtil.writeHeader(termsOut, TERMS_CODEC, VERSION_CURRENT);
     termsOut.writeVInt(BLOCK_SIZE);
   }
@@ -350,37 +348,22 @@ public final class TempPostingsWriter extends TempPostingsWriterBase {
     }
   }
 
-  private static class PendingTerm {
-    public final long docStartFP;
-    public final long posStartFP;
-    public final long payStartFP;
-    public final long skipOffset;
-    public final long lastPosBlockOffset;
-    public final int singletonDocID;
-
-    public PendingTerm(long docStartFP, long posStartFP, long payStartFP, long skipOffset, long lastPosBlockOffset, int singletonDocID) {
-      this.docStartFP = docStartFP;
-      this.posStartFP = posStartFP;
-      this.payStartFP = payStartFP;
-      this.skipOffset = skipOffset;
-      this.lastPosBlockOffset = lastPosBlockOffset;
-      this.singletonDocID = singletonDocID;
-    }
+  @Override
+  public TermMetaData newTermMetaData() {
+    return new TempMetaData();
   }
 
-  private final List<PendingTerm> pendingTerms = new ArrayList<PendingTerm>();
-
   /** Called when we are done adding docs to this term */
   @Override
-  public void finishTerm(TermStats stats) throws IOException {
-    assert stats.docFreq > 0;
+  public void finishTerm(TempTermState state) throws IOException {
+    assert state.docFreq > 0;
 
     // TODO: wasteful we are counting this (counting # docs
     // for this term) in two places?
-    assert stats.docFreq == docCount: stats.docFreq + " vs " + docCount;
+    assert state.docFreq == docCount: state.docFreq + " vs " + docCount;
 
     // if (DEBUG) {
-    //   System.out.println("FPW.finishTerm docFreq=" + stats.docFreq);
+    //   System.out.println("FPW.finishTerm docFreq=" + state.docFreq);
     // }
 
     // if (DEBUG) {
@@ -391,7 +374,7 @@ public final class TempPostingsWriter extends TempPostingsWriterBase {
     
     // docFreq == 1, don't write the single docid/freq to a separate file along with a pointer to it.
     final int singletonDocID;
-    if (stats.docFreq == 1) {
+    if (state.docFreq == 1) {
       // pulse the singleton docid into the term dictionary, freq is implicitly totalTermFreq
       singletonDocID = docDeltaBuffer[0];
     } else {
@@ -422,8 +405,8 @@ public final class TempPostingsWriter extends TempPostingsWriterBase {
 
       // totalTermFreq is just total number of positions(or payloads, or offsets)
       // associated with current term.
-      assert stats.totalTermFreq != -1;
-      if (stats.totalTermFreq > BLOCK_SIZE) {
+      assert state.totalTermFreq != -1;
+      if (state.totalTermFreq > BLOCK_SIZE) {
         // record file offset for last pos in last block
         lastPosBlockOffset = posOut.getFilePointer() - posTermStartFP;
       } else {
@@ -488,7 +471,7 @@ public final class TempPostingsWriter extends TempPostingsWriterBase {
         }
       }
       // if (DEBUG) {
-      //   System.out.println("  totalTermFreq=" + stats.totalTermFreq + " lastPosBlockOffset=" + lastPosBlockOffset);
+      //   System.out.println("  totalTermFreq=" + state.totalTermFreq + " lastPosBlockOffset=" + lastPosBlockOffset);
       // }
     } else {
       lastPosBlockOffset = -1;
@@ -509,7 +492,7 @@ public final class TempPostingsWriter extends TempPostingsWriterBase {
     }
 
     long payStartFP;
-    if (stats.totalTermFreq >= BLOCK_SIZE) {
+    if (state.totalTermFreq >= BLOCK_SIZE) {
       payStartFP = payTermStartFP;
     } else {
       payStartFP = -1;
@@ -518,66 +501,14 @@ public final class TempPostingsWriter extends TempPostingsWriterBase {
     // if (DEBUG) {
     //   System.out.println("  payStartFP=" + payStartFP);
     // }
-
-    pendingTerms.add(new PendingTerm(docTermStartFP, posTermStartFP, payStartFP, skipOffset, lastPosBlockOffset, singletonDocID));
+  
+    state.meta = new TempMetaData(docTermStartFP, posTermStartFP, payStartFP, skipOffset, lastPosBlockOffset, singletonDocID);
     docBufferUpto = 0;
     posBufferUpto = 0;
     lastDocID = 0;
     docCount = 0;
   }
 
-  private final RAMOutputStream bytesWriter = new RAMOutputStream();
-
-  @Override
-  public void flushTermsBlock(int start, int count) throws IOException {
-
-    if (count == 0) {
-      termsOut.writeByte((byte) 0);
-      return;
-    }
-
-    assert start <= pendingTerms.size();
-    assert count <= start;
-
-    final int limit = pendingTerms.size() - start + count;
-
-    long lastDocStartFP = 0;
-    long lastPosStartFP = 0;
-    long lastPayStartFP = 0;
-    for(int idx=limit-count; idx<limit; idx++) {
-      PendingTerm term = pendingTerms.get(idx);
-
-      if (term.singletonDocID == -1) {
-        bytesWriter.writeVLong(term.docStartFP - lastDocStartFP);
-        lastDocStartFP = term.docStartFP;
-      } else {
-        bytesWriter.writeVInt(term.singletonDocID);
-      }
-
-      if (fieldHasPositions) {
-        bytesWriter.writeVLong(term.posStartFP - lastPosStartFP);
-        lastPosStartFP = term.posStartFP;
-        if (term.lastPosBlockOffset != -1) {
-          bytesWriter.writeVLong(term.lastPosBlockOffset);
-        }
-        if ((fieldHasPayloads || fieldHasOffsets) && term.payStartFP != -1) {
-          bytesWriter.writeVLong(term.payStartFP - lastPayStartFP);
-          lastPayStartFP = term.payStartFP;
-        }
-      }
-
-      if (term.skipOffset != -1) {
-        bytesWriter.writeVLong(term.skipOffset);
-      }
-    }
-
-    termsOut.writeVInt((int) bytesWriter.getFilePointer());
-    bytesWriter.writeTo(termsOut);
-    bytesWriter.reset();
-
-    // Remove the terms we just wrote:
-    pendingTerms.subList(limit-count, limit).clear();
-  }
 
   @Override
   public void close() throws IOException {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/temp/TempTermState.java b/lucene/core/src/java/org/apache/lucene/codecs/temp/TempTermState.java
index 7cf99cc..8c783a7 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/temp/TempTermState.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/temp/TempTermState.java
@@ -18,6 +18,7 @@ package org.apache.lucene.codecs.temp;
 
 import org.apache.lucene.index.DocsEnum; // javadocs
 import org.apache.lucene.index.TermState;
+import org.apache.lucene.codecs.TermMetaData;
 
 /**
  * Holds all state required for {@link PostingsReaderBase}
@@ -33,11 +34,19 @@ public class TempTermState extends TermState {
   /** the term's ord in the current block */
   public int termBlockOrd;
 
+  public TermMetaData meta;
+
   /** Sole constructor. (For invocation by subclass 
    *  constructors, typically implicit.) */
   protected TempTermState() {
   }
 
+  public TempTermState clone() {
+    TempTermState other = (TempTermState)super.clone();
+    other.meta = this.meta.clone();  // nocommit: why do we need to clone all?
+    return other;
+  }
+
   @Override
   public void copyFrom(TermState _other) {
     assert _other instanceof TempTermState : "can not copy from " + _other.getClass().getName();
@@ -45,6 +54,7 @@ public class TempTermState extends TermState {
     docFreq = other.docFreq;
     totalTermFreq = other.totalTermFreq;
     termBlockOrd = other.termBlockOrd;
+    meta = other.meta.clone();
   }
 
   @Override
