Index: lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
===================================================================
--- lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java	(revision 1062778)
+++ lucene/contrib/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java	(working copy)
@@ -562,8 +562,8 @@
     };
 
     helper.run();
-    assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
-        numHighlights == 7);
+    assertEquals("Failed to find correct number of highlights", 7,
+        numHighlights);
   }
 
   public void testNotSpanSimpleQuery() throws Exception {
@@ -581,8 +581,8 @@
     };
 
     helper.run();
-    assertTrue("Failed to find correct number of highlights " + numHighlights + " found",
-        numHighlights == 4);
+    assertEquals("Failed to find correct number of highlights", 4,
+        numHighlights);
   }
 
   public void testGetBestFragmentsSimpleQuery() throws Exception {
Index: lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java
===================================================================
--- lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java	(revision 1062778)
+++ lucene/contrib/memory/src/test/org/apache/lucene/index/memory/MemoryIndexTest.java	(working copy)
@@ -31,12 +31,23 @@
 import org.apache.lucene.analysis.MockTokenizer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader.ReaderContext;
 import org.apache.lucene.queryParser.QueryParser;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Positions;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.TopDocs;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.Weight.ScorerContext;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.ReaderUtil;
 import org.apache.lucene.util._TestUtil;
 
 /**
@@ -138,6 +149,39 @@
     mem.close();
   }
   
+  public void testTermQueryWithPositions() throws IOException {
+    boolean usePayload = false;
+    String fieldName = "field";
+    final int num = random.nextInt(1000); 
+    MemoryIndex index = new MemoryIndex();
+    StringBuilder builder = new StringBuilder();
+    String content = "1 2 3 4 5 6 7 8 9 10 ";
+    for (int i = 0; i < num; i++) {
+      builder.append(content);
+    }
+    content = builder.toString();
+    index.addField(fieldName, content, new MockAnalyzer(
+        MockTokenizer.WHITESPACE, true, false));
+    IndexSearcher searcher = index.createSearcher();
+    TermQuery one = new TermQuery(new Term(fieldName, "1"));
+    Weight weight = one.createWeight(searcher);
+    Scorer scorer = weight.scorer(
+        ReaderUtil.leaves(searcher.getTopReaderContext())[0], ScorerContext
+            .def().needsPositions(true).needsPayloads(usePayload));
+    assertNotNull(scorer);
+    assertEquals(0, scorer.nextDoc());
+    Positions positions = scorer.positions();
+    for (int i = 0; i < num; i++) {
+      assertTrue(positions.next());
+      assertEquals(num, positions.freq);
+
+      assertEquals(i*10, positions.start);
+      assertEquals(i*10+1, positions.end);
+    }
+    assertFalse(positions.next());
+    assertEquals(scorer.nextDoc(), Scorer.NO_MORE_DOCS);
+  }
+  
   /**
    * Return a random analyzer (Simple, Stop, Standard) to analyze the terms.
    */
Index: lucene/src/java/org/apache/lucene/index/BulkPostingsEnum.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/BulkPostingsEnum.java	(revision 1062778)
+++ lucene/src/java/org/apache/lucene/index/BulkPostingsEnum.java	(working copy)
@@ -19,6 +19,8 @@
 
 import java.io.IOException;
 
+import org.apache.lucene.util.BytesRef;
+
 /** Low level bulk iterator through postings (documents,
  *  term freq, positions).  This API shifts much
  *  responsibility to the caller, in order to maximize
@@ -79,6 +81,25 @@
 
     // nocommit messy
     public abstract void setOffset(int offset);
+    
+    public BulkPayload getPayloadBuffer() {
+      return null;
+    }
+    public boolean payloadsAvailable() {
+      return false;
+    }
+  }
+  
+  public static final class BulkPayload {
+    public final BytesRef bytes;
+    public final int[] start;
+    public final int[] length;
+    
+    public BulkPayload(BytesRef bytes, int[] start, int[] length) {
+      this.bytes = bytes;
+      this.start = start;
+      this.length = length;
+    }
   }
 
   public abstract BlockReader getDocDeltasReader() throws IOException;
Index: lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java	(revision 1062778)
+++ lucene/src/java/org/apache/lucene/index/codecs/standard/StandardPostingsReader.java	(working copy)
@@ -23,9 +23,7 @@
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.FieldInfo;
-import org.apache.lucene.index.DocsEnum;
 import org.apache.lucene.index.BulkPostingsEnum;
-import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.SegmentInfo;
 import org.apache.lucene.index.TermState;
@@ -1040,6 +1038,7 @@
       final int[] buffer = new int[BULK_BUFFER_SIZE];
       int limit;
       int pending;
+      BulkPayload payloads;
   
       @Override
       public int[] getBuffer() {
@@ -1055,15 +1054,25 @@
       public int fill() throws IOException {
         limit = pending > BULK_BUFFER_SIZE ? BULK_BUFFER_SIZE : pending;
         if (storePayloads) {
+          int payloadOffset = 0;
           for(int i=0;i<limit;i++) {
             final int code = proxIn.readVInt();
             buffer[i] = code >>> 1;
-            if ((code & 1) != 0) {
+            if ((code & 1) != 0) { // TODO maybe optimize this and preallocate space for payloads if payloadLength doesn't change regularly
               payloadLength = proxIn.readVInt();
             }
             if (payloadLength != 0) {
-              // skip payload
-              proxIn.seek(proxIn.getFilePointer()+payloadLength);
+
+              if (payloads != null) {
+                payloads.start[i] = payloadOffset;
+                payloads.length[i] = payloadLength;
+                payloads.bytes.grow(payloadOffset + payloadLength);
+                proxIn.readBytes(payloads.bytes.bytes, payloadOffset, payloadLength);
+                payloadOffset += payloadLength;
+              } else {
+                // skip payload
+                proxIn.seek(proxIn.getFilePointer()+payloadLength);
+              }
             }
           }
         } else {
@@ -1084,6 +1093,19 @@
       public void setOffset(int offset) {
         throw new UnsupportedOperationException();
       }
+
+      @Override
+      public BulkPayload getPayloadBuffer() {
+        if (storePayloads && payloads == null) { // only create if needed
+          this.payloads = new BulkPayload(new BytesRef(), new int[BULK_BUFFER_SIZE], new int[BULK_BUFFER_SIZE]);
+        }
+        return this.payloads;
+      }
+
+      @Override
+      public boolean payloadsAvailable() {
+        return storePayloads;
+      }
     }
     
     @Override
Index: lucene/src/java/org/apache/lucene/search/Positions.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/Positions.java	(revision 0)
+++ lucene/src/java/org/apache/lucene/search/Positions.java	(revision 0)
@@ -0,0 +1,43 @@
+package org.apache.lucene.search;
+/**
+ * Copyright 2005 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+
+import org.apache.lucene.util.BytesRef;
+
+/**
+ * 
+ * TODO add documentation
+ */
+public abstract class Positions {
+  public int freq;
+  public int start;
+  public int end;
+  public final boolean hasPayloads;
+  public final BytesRef payload;
+  
+  public Positions() {
+    this(null);
+  }
+  
+  public Positions(BytesRef ref) {
+    payload = ref;
+    hasPayloads = ref != null;
+  }
+  
+  public abstract boolean next() throws IOException;
+  
+}
Index: lucene/src/java/org/apache/lucene/search/Scorer.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/Scorer.java	(revision 1062778)
+++ lucene/src/java/org/apache/lucene/search/Scorer.java	(working copy)
@@ -101,6 +101,11 @@
   public float freq() throws IOException {
     throw new UnsupportedOperationException(this + " does not implement freq()");
   }
+  
+  public Positions positions() throws IOException {
+    // TODO make abstract?
+    throw new UnsupportedOperationException();
+  }
 
   /**
    * A callback to gather information from a scorer and its sub-scorers. Each
Index: lucene/src/java/org/apache/lucene/search/TermQuery.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/TermQuery.java	(revision 1062778)
+++ lucene/src/java/org/apache/lucene/search/TermQuery.java	(working copy)
@@ -107,7 +107,7 @@
       final Terms terms = fields.terms(field);
       final TermsEnum termsEnum = terms.getThreadTermsEnum();
       termsEnum.seek(term.bytes(), state); // ignore return value -- asserted above!
-      final BulkPostingsEnum docs = termsEnum.bulkPostings(null, true, false);
+      final BulkPostingsEnum docs = termsEnum.bulkPostings(null, true, scorerContext.needsPositions);
       
       final int docFreq = termsEnum.docFreq();
       final BlockReader docDeltas = docs.getDocDeltasReader();
@@ -116,8 +116,16 @@
         return new MatchOnlyTermScorer(this, docs, docDeltas, docFreq,
             reader.getDeletedDocs(), similarity, reader.norms(term.field()));
       } else {
-        return new TermScorer(this, docs, docDeltas, frequencies, docFreq,
-            reader.getDeletedDocs(), similarity, reader.norms(term.field()));
+        if (scorerContext.needsPositions) {
+          BlockReader positionDeltasReader = docs.getPositionDeltasReader();
+          assert positionDeltasReader != null : docs;
+          return new TermScorer(this, docs, docDeltas,
+              frequencies, positionDeltasReader, scorerContext.needsPayloads, docFreq,
+              reader.getDeletedDocs(), similarity, reader.norms(term.field()));
+        } else {
+          return new TermScorer(this, docs, docDeltas, frequencies, docFreq,
+              reader.getDeletedDocs(), similarity, reader.norms(term.field()));
+        }
       }
     }
     
Index: lucene/src/java/org/apache/lucene/search/TermScorer.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/TermScorer.java	(revision 1062778)
+++ lucene/src/java/org/apache/lucene/search/TermScorer.java	(working copy)
@@ -21,7 +21,9 @@
 
 import org.apache.lucene.index.BulkPostingsEnum;
 import org.apache.lucene.index.BulkPostingsEnum.BlockReader;
+import org.apache.lucene.index.BulkPostingsEnum.BulkPayload;
 import org.apache.lucene.util.Bits;
+import org.apache.lucene.util.BytesRef;
 
 // nocommit -- break out aligned & not cases?
 // nocommit -- break out bulk vs doc-at-time scorer?
@@ -51,6 +53,12 @@
   private final int docFreq;
   private int count;
   private final Similarity similarity;
+  private final TermPositions positions;
+
+  
+  TermScorer(Weight weight, BulkPostingsEnum td, BlockReader docDeltaReader, BlockReader freqReader, int docFreq, Bits skipDocs, Similarity similarity, byte[] norms) throws IOException {
+    this(weight, td, docDeltaReader, freqReader, null, false, docFreq, skipDocs, similarity, norms);
+  }
   
   /**
    * Construct a <code>TermScorer</code>.
@@ -65,7 +73,8 @@
    * @param norms
    *          The field norms of the document fields for the <code>Term</code>.
    */
-  TermScorer(Weight weight, BulkPostingsEnum td, BlockReader docDeltaReader, BlockReader freqReader, int docFreq, Bits skipDocs, Similarity similarity, byte[] norms) throws IOException {
+  
+  TermScorer(Weight weight, BulkPostingsEnum td, BlockReader docDeltaReader, BlockReader freqReader, BlockReader posDeltaReader, boolean needsPayloads, int docFreq, Bits skipDocs, Similarity similarity, byte[] norms) throws IOException {
     super(weight);
     this.similarity = similarity;
     this.docsEnum = td;
@@ -74,6 +83,7 @@
     docDeltas = docDeltasReader.getBuffer();
     this.freqsReader = freqReader;
     freqs = freqsReader.getBuffer();
+    positions = posDeltaReader == null ? null : getPositions(posDeltaReader, needsPayloads); 
     reset();
     this.skipDocs = skipDocs;
     this.norms = norms;
@@ -253,6 +263,9 @@
       assert freqPointerMax != 0;
       freqPointer = 0;
     }
+    if (positions != null) { // compiler should optimize this away 
+      positions.fill();
+    }
   }
   
   private void fillDeltas() throws IOException {
@@ -272,5 +285,114 @@
     assert freqPointerMax >= freqPointer: "fP=" + freqPointer + " fPMax=" + freqPointerMax;
     --docPointer;
     --freqPointer;
+    if (positions != null) { // compiler should optimize this away 
+      positions.reset();
+    }
+  }
+  
+  @Override
+  public Positions positions() throws IOException {
+    return positions;
   }
+  
+  class TermPositions extends Positions {
+    private int positionsPending;
+    private int posPointerMax;
+    protected int posPointer;
+    private final int[] posBuffer;
+    private final BlockReader posReader;
+
+    public TermPositions(BlockReader blockReader) {
+      this(blockReader, null); // no payloads here
+      
+    }
+    
+    protected TermPositions(BlockReader blockReader, BytesRef payloads) {
+      super(payloads);
+      posReader = blockReader;
+      posBuffer = blockReader.getBuffer();
+    }
+    
+    void fill() throws IOException {
+      freq = freqs[freqPointer];
+      if (positionsPending > 0) {
+        posPointer += positionsPending;
+        while (posPointer >= posPointerMax) { // we need while here if numPos > buffersize
+          posPointer -= posPointerMax; // add the pending positions from last round
+          posPointerMax = posReader.fill();
+          assert posPointerMax != 0;
+        }
+      } else if (posPointer + 1 >= posPointerMax) {
+        posPointerMax = posReader.fill();
+        assert posPointerMax != 0;
+        posPointer = -1;
+      }
+      end = start = 0;
+      positionsPending = freq;
+    }
+    
+    void reset() {
+      start = 0;
+      posPointer = posReader.offset();
+      posPointerMax = posReader.end();
+      assert posPointerMax >= posPointer: "pP=" + posPointer + " pPMax=" + posPointerMax;
+      --posPointer;
+      positionsPending = freq = 0;
+    }
+
+    @Override
+    public boolean next() throws IOException {
+      if (--positionsPending >= 0) {
+        if(++posPointer >= posPointerMax) {
+          posPointerMax = posReader.fill();
+          assert posPointerMax != 0;
+          posPointer = 0;
+        }
+        start += posBuffer[posPointer];
+        end = start+1;
+        return true;
+      }
+      end = start = 0;
+      positionsPending = 0;
+      return false;
+    }
+  }
+  
+  final class TermPositionsAndPayloads extends TermPositions {
+    private final BulkPayload payloads;
+
+    TermPositionsAndPayloads(BlockReader blockReader,
+        BulkPayload payloads) {
+      super(blockReader, payloads.bytes);
+      this.payloads = payloads;
+    }
+    @Override
+    void reset() {
+      super.reset();
+      payload.offset = 0;
+      payload.length = 0;
+    }
+
+    @Override
+    public boolean next() throws IOException {
+      if (super.next()) {
+        payload.offset = this.payloads.start[posPointer];
+        payload.length = this.payloads.length[posPointer];
+        return true;
+      }
+      
+      payload.offset = 0;
+      payload.length = 0;
+      return false;
+    }
+  }
+
+  private final TermPositions getPositions(BlockReader blockReader,
+      boolean needsPayload) {
+    if (needsPayload && blockReader.payloadsAvailable()) {
+      return new TermPositionsAndPayloads(blockReader,  blockReader.getPayloadBuffer());
+    }
+    return new TermPositions(blockReader);
+  }
+ 
 }
Index: lucene/src/java/org/apache/lucene/search/Weight.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/Weight.java	(revision 1062778)
+++ lucene/src/java/org/apache/lucene/search/Weight.java	(working copy)
@@ -142,23 +142,39 @@
      */
     public final boolean topScorer;
     
+    /**
+     * if <code>true</code> Scorers / Queries supporting positional information will load
+     * positions.  
+     */
+    public final boolean needsPositions;
     
-    private static final ScorerContext DEFAULT_CONTEXT = new ScorerContext(true, false);
+    /**
+     * if <code>true</code> Scorers / Queries supporting payload information will load
+     * payloads.  
+     */
+    public final boolean needsPayloads;
+    
+    
+    private static final ScorerContext DEFAULT_CONTEXT = new ScorerContext(true, false, false, false);
 
     /**
      * Returns a default {@link ScorerContext} template initialized with:
      * <ul>
      * <li>{@link #scoreDocsInOrder} = <code>true</code></li>
      * <li>{@link #topScorer} = <code>false</code></li>
+     * <li>{@link #needsPositions} = <code>false</code></li>
+     * <li>{@link #needsPayloads} = <code>false</code></li>
      * </ul>
      */
     public static ScorerContext def() {
       return DEFAULT_CONTEXT;
     }
     
-    private ScorerContext(boolean scoreDocsInOrder, boolean topScorer) {
+    private ScorerContext(boolean scoreDocsInOrder, boolean topScorer, boolean needsPostions, boolean needsPayloads) {
       this.scoreDocsInOrder = scoreDocsInOrder;
       this.topScorer = topScorer;
+      this.needsPositions = needsPostions;
+      this.needsPayloads = needsPayloads;
     }
 
     /**
@@ -172,7 +188,7 @@
       if (this.scoreDocsInOrder == scoreDocsInOrder) {
         return this;
       }
-      return new ScorerContext(scoreDocsInOrder, topScorer);
+      return new ScorerContext(scoreDocsInOrder, topScorer, needsPositions, needsPayloads);
     }
     
     /**
@@ -186,7 +202,36 @@
       if (this.topScorer == topScorer) {
         return this;
       }
-      return new ScorerContext(scoreDocsInOrder, topScorer);
+      return new ScorerContext(scoreDocsInOrder, topScorer, needsPositions, needsPayloads);
+    }
+    
+    
+    /**
+     * Creates and returns a copy of this context with the given value for
+     * {@link #needsPositions} and returns a new instance of
+     * {@link ScorerContext} iff the given value differs from the
+     * {@link #needsPositions}. Otherwise, this method has no effect and
+     * returns this instance.
+     */
+    public ScorerContext needsPositions(boolean needsPositions) {
+      if (this.needsPositions == needsPositions) {
+        return this;
+      }
+      return new ScorerContext(scoreDocsInOrder, topScorer, needsPositions, needsPayloads);
+    }
+    
+    /**
+     * Creates and returns a copy of this context with the given value for
+     * {@link #needsPayloads} and returns a new instance of
+     * {@link ScorerContext} iff the given value differs from the
+     * {@link #needsPayloads}. Otherwise, this method has no effect and
+     * returns this instance.
+     */
+    public ScorerContext needsPayloads(boolean needsPayloads) {
+      if (this.needsPayloads == needsPayloads) {
+        return this;
+      }
+      return new ScorerContext(scoreDocsInOrder, topScorer, needsPositions, needsPayloads);
     }
   }
 }
Index: lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java	(revision 1062798)
+++ lucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java	(working copy)
@@ -21,12 +21,12 @@
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.DocsAndPositionsEnum;
 import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Positions;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.search.Weight;
 import org.apache.lucene.search.Similarity;
 import org.apache.lucene.search.Explanation;
 import org.apache.lucene.search.ComplexExplanation;
-import org.apache.lucene.search.spans.TermSpans;
 import org.apache.lucene.search.spans.SpanTermQuery;
 import org.apache.lucene.search.spans.SpanWeight;
 import org.apache.lucene.search.spans.SpanScorer;
@@ -75,7 +75,7 @@
 
     @Override
     public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
-      return new PayloadTermSpanScorer((TermSpans) query.getSpans(context),
+      return new PayloadTermSpanScorer((SpansScorerWrapper) query.getSpans(context),
           this, similarity, context.reader.norms(query.getField()));
     }
 
@@ -83,9 +83,9 @@
       protected BytesRef payload;
       protected float payloadScore;
       protected int payloadsSeen;
-      private final TermSpans termSpans;
+      private final SpansScorerWrapper termSpans;
 
-      public PayloadTermSpanScorer(TermSpans spans, Weight weight,
+      public PayloadTermSpanScorer(SpansScorerWrapper spans, Weight weight,
           Similarity similarity, byte[] norms) throws IOException {
         super(spans, weight, similarity, norms);
         termSpans = spans;
@@ -113,9 +113,9 @@
       }
 
       protected void processPayload(Similarity similarity) throws IOException {
-        final DocsAndPositionsEnum postings = termSpans.getPostings();
-        if (postings.hasPayload()) {
-          payload = postings.getPayload();
+        final Positions postings = termSpans.getPositions();
+        if (termSpans.isPayloadAvailable()) {
+          payload = postings.payload;
           if (payload != null) {
             payloadScore = function.currentScore(doc, term.field(),
                                                  spans.start(), spans.end(), payloadsSeen, payloadScore,
Index: lucene/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/spans/SpanTermQuery.java	(revision 1062798)
+++ lucene/src/java/org/apache/lucene/search/spans/SpanTermQuery.java	(working copy)
@@ -17,22 +17,42 @@
  * limitations under the License.
  */
 
-import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader.ReaderContext;
 import org.apache.lucene.index.Term;
 import org.apache.lucene.index.DocsAndPositionsEnum;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Positions;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.TermQuery;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.Weight.ScorerContext;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.ToStringUtils;
 
 import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.Set;
 
 /** Matches spans containing a term. */
 public class SpanTermQuery extends SpanQuery {
   protected Term term;
-
+  private TermQuery termQuery;
+  private final boolean needsPayloads;
   /** Construct a SpanTermQuery matching the named term's spans. */
-  public SpanTermQuery(Term term) { this.term = term; }
+  public SpanTermQuery(Term term) { this(term, new TermQuery(term), true); }
+  public SpanTermQuery(Term term, boolean needsPayloads) { this(term, new TermQuery(term), needsPayloads); }
+  
+  
 
+  private SpanTermQuery(Term term, TermQuery query, boolean needsPayloads) {
+    this.term = term;
+    this.termQuery = query;
+    this.needsPayloads = needsPayloads;
+  }
+  
   /** Return the term whose spans are matched. */
   public Term getTerm() { return term; }
 
@@ -79,24 +99,160 @@
       return false;
     return true;
   }
-
+  
+  @Override
+  public Weight createWeight(IndexSearcher searcher) throws IOException {
+    return new SpanWeight(this, searcher);
+  }
+  
+  
+ 
   @Override
   public Spans getSpans(final AtomicReaderContext context) throws IOException {
-    final IndexReader reader = context.reader;
-    final DocsAndPositionsEnum postings = reader.termPositionsEnum(reader.getDeletedDocs(),
+    final DocsAndPositionsEnum postings = context.reader.termPositionsEnum(context.reader.getDeletedDocs(),
                                                                    term.field(),
                                                                    term.bytes());
-
+    ReaderContext topReaderContext = context.reader.getTopReaderContext();
+    Weight weight = termQuery.weight(new IndexSearcher(topReaderContext));
+    
     if (postings != null) {
-      return new TermSpans(postings, term);
+      Scorer scorer = weight.scorer((AtomicReaderContext) topReaderContext, ScorerContext.def().needsPositions(true).needsPayloads(needsPayloads));
+      return new SpansScorerWrapper(scorer, scorer.positions());
     } else {
-      if (reader.termDocsEnum(reader.getDeletedDocs(), term.field(), term.bytes()) != null) {
+      if (context.reader.termPositionsEnum(context.reader.getDeletedDocs(), term.field(), term.bytes()) != null) {
         // term does exist, but has no positions
         throw new IllegalStateException("field \"" + term.field() + "\" was indexed with Field.omitTermFreqAndPositions=true; cannot run SpanTermQuery (term=" + term.text() + ")");
       } else {
         // term does not exist
-        return TermSpans.EMPTY_TERM_SPANS;
+        return EMPTY_SPANS;
+      }
+    }
+  }
+  
+  
+  //public due to PayloadTermQuery
+  public static class SpansScorerWrapper extends Spans {
+    private final Scorer scorer;
+    private final Positions positions;
+    // only load each payload once
+    private boolean loadPayload = true;
+    private int doc = -1;
+    public SpansScorerWrapper(Scorer scorer, Positions positions) {
+      this.scorer = scorer;
+      this.positions = positions;
+    }
+    @Override
+    public boolean next() throws IOException {
+      loadPayload = true;
+      if(doc == -1) {
+        doc = scorer.nextDoc();
+      }
+      
+      if (doc == Scorer.NO_MORE_DOCS) {
+        return false;
+      }
+      
+      if(!positions.next()) {
+        doc = scorer.nextDoc();
+        if (doc == Scorer.NO_MORE_DOCS) {
+          return false;
+        }
+        return positions.next();
+      } 
+      return true;
+    }
+
+    @Override
+    public boolean skipTo(int target) throws IOException {
+      loadPayload = true;
+      doc = scorer.advance(target);
+      if(doc == Scorer.NO_MORE_DOCS) {
+        return false;
+      }
+      return positions.next();
+    }
+
+    @Override
+    public int doc() {
+      return doc;
+    }
+
+    @Override
+    public int start() {
+      return positions.start;
+    }
+
+    @Override
+    public int end() {
+      assert positions.start == positions.end-1;
+      return positions.end;
+    }
+
+    @Override
+    public Collection<byte[]> getPayload() throws IOException {
+      loadPayload = false;
+      BytesRef payload = positions.payload;
+      if (payload.length == 0) {
+        return Collections.emptyList();
       }
+      byte[] retVal = new byte[payload.length];
+      System.arraycopy(payload.bytes, payload.offset, retVal, 0, payload.length);
+      return Collections.singletonList(retVal);
+    }
+
+    @Override
+    public boolean isPayloadAvailable() {
+      return positions.hasPayloads && loadPayload;
+    }
+    
+    public Positions getPositions() {
+      return positions;
+    }
+    
+  }
+  
+  
+  private static final class EmptySpans extends SpansScorerWrapper {
+
+    public EmptySpans() {
+      super(null, null);
+    }
+
+    @Override
+    public boolean next() {
+      return false;
+    }
+
+    @Override
+    public boolean skipTo(int target) {
+      return false;
+    }
+
+    @Override
+    public int doc() {
+      return DocIdSetIterator.NO_MORE_DOCS;
+    }
+
+    @Override
+    public int start() {
+      return -1;
+    }
+
+    @Override
+    public int end() {
+      return -1;
+    }
+
+    @Override
+    public Collection<byte[]> getPayload() {
+      return null;
+    }
+
+    @Override
+    public boolean isPayloadAvailable() {
+      return false;
     }
   }
+
+  public static final Spans EMPTY_SPANS = new EmptySpans();
 }
Index: lucene/src/java/org/apache/lucene/search/spans/TermSpans.java
===================================================================
--- lucene/src/java/org/apache/lucene/search/spans/TermSpans.java	(revision 1062778)
+++ lucene/src/java/org/apache/lucene/search/spans/TermSpans.java	(working copy)
@@ -1,169 +0,0 @@
-package org.apache.lucene.search.spans;
-/**
- * Copyright 2005 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-import org.apache.lucene.index.Term;
-import org.apache.lucene.index.DocsAndPositionsEnum;
-import org.apache.lucene.search.DocIdSetIterator;
-import org.apache.lucene.util.BytesRef;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.Collection;
-
-/**
- * Expert:
- * Public for extension only
- */
-public class TermSpans extends Spans {
-  protected final DocsAndPositionsEnum postings;
-  protected final Term term;
-  protected int doc;
-  protected int freq;
-  protected int count;
-  protected int position;
-
-  public TermSpans(DocsAndPositionsEnum postings, Term term) throws IOException {
-    this.postings = postings;
-    this.term = term;
-    doc = -1;
-  }
-
-  // only for EmptyTermSpans (below)
-  TermSpans() {
-    term = null;
-    postings = null;
-  }
-
-  @Override
-  public boolean next() throws IOException {
-    if (count == freq) {
-      if (postings == null) {
-        return false;
-      }
-      doc = postings.nextDoc();
-      if (doc == DocsAndPositionsEnum.NO_MORE_DOCS) {
-        return false;
-      }
-      freq = postings.freq();
-      count = 0;
-    }
-    position = postings.nextPosition();
-    count++;
-    return true;
-  }
-
-  @Override
-  public boolean skipTo(int target) throws IOException {
-    doc = postings.advance(target);
-    if (doc == DocsAndPositionsEnum.NO_MORE_DOCS) {
-      return false;
-    }
-
-    freq = postings.freq();
-    count = 0;
-    position = postings.nextPosition();
-    count++;
-
-    return true;
-  }
-
-  @Override
-  public int doc() {
-    return doc;
-  }
-
-  @Override
-  public int start() {
-    return position;
-  }
-
-  @Override
-  public int end() {
-    return position + 1;
-  }
-
-  // TODO: Remove warning after API has been finalized
-  @Override
-  public Collection<byte[]> getPayload() throws IOException {
-    final BytesRef payload = postings.getPayload();
-    final byte[] bytes;
-    if (payload != null) {
-      bytes = new byte[payload.length];
-      System.arraycopy(payload.bytes, payload.offset, bytes, 0, payload.length);
-    } else {
-      bytes = null;
-    }
-    return Collections.singletonList(bytes);
-  }
-
-  // TODO: Remove warning after API has been finalized
-  @Override
-  public boolean isPayloadAvailable() {
-    return postings.hasPayload();
-  }
-
-  @Override
-  public String toString() {
-    return "spans(" + term.toString() + ")@" +
-            (doc == -1 ? "START" : (doc == Integer.MAX_VALUE) ? "END" : doc + "-" + position);
-  }
-
-  public DocsAndPositionsEnum getPostings() {
-    return postings;
-  }
-
-  private static final class EmptyTermSpans extends TermSpans {
-
-    @Override
-    public boolean next() {
-      return false;
-    }
-
-    @Override
-    public boolean skipTo(int target) {
-      return false;
-    }
-
-    @Override
-    public int doc() {
-      return DocIdSetIterator.NO_MORE_DOCS;
-    }
-    
-    @Override
-    public int start() {
-      return -1;
-    }
-
-    @Override
-    public int end() {
-      return -1;
-    }
-
-    @Override
-    public Collection<byte[]> getPayload() {
-      return null;
-    }
-
-    @Override
-    public boolean isPayloadAvailable() {
-      return false;
-    }
-  }
-
-  public static final TermSpans EMPTY_TERM_SPANS = new EmptyTermSpans();
-}
Index: lucene/src/test/org/apache/lucene/search/TestTermQuery.java
===================================================================
--- lucene/src/test/org/apache/lucene/search/TestTermQuery.java	(revision 0)
+++ lucene/src/test/org/apache/lucene/search/TestTermQuery.java	(revision 0)
@@ -0,0 +1,289 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.analysis.MockTokenizer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader.ReaderContext;
+import org.apache.lucene.index.codecs.CodecProvider;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.search.Weight.ScorerContext;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.ReaderUtil;
+
+/**
+ * This class contains tests related to {@link TermQuery}
+ */
+public class TestTermQuery extends LuceneTestCase {
+
+  private String fieldName;
+  private boolean usePayload;
+
+  public void setUp() throws Exception {
+    super.setUp();
+    fieldName = "field" + random.nextInt();
+    usePayload = true;//random.nextBoolean();
+  }
+
+  /**
+   * Simple testcase for {@link TermScorer#positions()}
+   */
+  public void testPositionsSimple() throws IOException {
+    Directory directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(
+            MockTokenizer.WHITESPACE, true, usePayload)));
+    for (int i = 0; i < 39; i++) {
+      Document doc = new Document();
+      doc.add(newField(fieldName, "1 2 3 4 5 6 7 8 9 10 " +  
+          "1 2 3 4 5 6 7 8 9 10 "+ "1 2 3 4 5 6 7 8 9 10 " + 
+          "1 2 3 4 5 6 7 8 9 10", Field.Store.YES,
+          Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+    IndexReader reader = writer.getReader();
+    IndexSearcher searcher = new IndexSearcher(reader);
+    writer.close();
+
+    for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) {
+      TermQuery one = new TermQuery(new Term(fieldName, "1"));
+      ReaderContext topReaderContext = reader.getTopReaderContext();
+      AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
+      Weight weight = one.createWeight(searcher);
+      for (AtomicReaderContext atomicReaderContext : leaves) {
+        Scorer scorer = weight.scorer(atomicReaderContext, ScorerContext.def()
+            .needsPositions(true).needsPayloads(usePayload));
+        assertNotNull(scorer);
+        final int advance = scorer.advance(1 + random.nextInt(27));
+        Positions positions = scorer.positions();
+        
+        do {
+          String msg = "Advanced to: " + advance + " current doc: "
+              + scorer.docID() + " usePayloads: " + usePayload;
+          assertTrue(msg, positions.next());
+          assertEquals(msg, 4, positions.freq);
+
+          assertEquals(msg, 0, positions.start);
+          assertEquals(msg, 1, positions.end);
+          checkPayload(0, positions);
+
+          assertTrue(msg, positions.next());
+          assertEquals(msg, 4, positions.freq);
+          assertEquals(msg, 10, positions.start);
+          assertEquals(msg, 11, positions.end);
+          checkPayload(10, positions);
+          
+          assertTrue(msg, positions.next());
+          assertEquals(msg, 4, positions.freq);
+
+          assertEquals(msg, 20, positions.start);
+          assertEquals(msg, 21, positions.end);
+          checkPayload(20, positions);
+          
+          assertTrue(msg, positions.next());
+          assertEquals(msg, 4, positions.freq);
+          assertEquals(msg, 30, positions.start);
+          assertEquals(msg, 31, positions.end);
+          checkPayload(30, positions);
+          
+          assertFalse(msg, positions.next());
+
+        } while (scorer.nextDoc() != Scorer.NO_MORE_DOCS);
+      }
+    }
+    searcher.close();
+    reader.close();
+    directory.close();
+  }
+  
+  public final void checkPayload(int pos, Positions positions) {
+    if(!CodecProvider.getDefault().getFieldCodec(fieldName).equals("Standard"))
+      return; // nocommit remove this once all codecs work with bulk payloads 
+    if (usePayload) {
+      assertTrue(positions.hasPayloads);
+      assertEquals("pos: "+ pos, positions.payload.utf8ToString());
+    } else {
+      assertFalse(positions.hasPayloads);
+    }
+    
+    
+  }
+
+  /**
+   * this test indexes random numbers within a range into a field and checks
+   * their occurrences by searching for a number from that range selected at
+   * random. All positions for that number are safed up front and compared to
+   * the terms scorers positions.
+   * 
+   */
+  public void testRandomPositons() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(
+            MockTokenizer.WHITESPACE, true, usePayload)));
+    int numDocs = 131;
+    int max = 1051;
+    int term = random.nextInt(max);
+    Integer[][] positionsInDoc = new Integer[numDocs][];
+    for (int i = 0; i < numDocs; i++) {
+      Document doc = new Document();
+      ArrayList<Integer> positions = new ArrayList<Integer>();
+      StringBuilder builder = new StringBuilder();
+      for (int j = 0; j < 3049; j++) {
+        int nextInt = random.nextInt(max);
+        builder.append(nextInt).append(" ");
+        if (nextInt == term) {
+          positions.add(Integer.valueOf(j));
+        }
+      }
+      doc.add(newField(fieldName, builder.toString(), Field.Store.YES,
+          Field.Index.ANALYZED));
+      positionsInDoc[i] = positions.toArray(new Integer[0]);
+      writer.addDocument(doc);
+    }
+
+    IndexReader reader = writer.getReader();
+    writer.close();
+    IndexSearcher searcher = new IndexSearcher(reader);
+
+    for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) {
+      TermQuery one = new TermQuery(new Term(fieldName, "" + term));
+      ReaderContext topReaderContext = reader.getTopReaderContext();
+      AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
+      Weight weight = one.createWeight(searcher);
+      for (AtomicReaderContext atomicReaderContext : leaves) {
+        Scorer scorer = weight.scorer(atomicReaderContext, ScorerContext.def()
+            .needsPositions(true));
+        assertNotNull(scorer);
+        int initDoc = 0;
+        int maxDoc = atomicReaderContext.reader.maxDoc();
+        // initially advance or do next doc
+        if (random.nextBoolean()) {
+          initDoc = scorer.nextDoc();
+        } else {
+          initDoc = scorer.advance(random.nextInt(maxDoc));
+        }
+        // now run through the scorer and check if all positions are there...
+        do {
+          int docID = scorer.docID();
+          if (docID == Scorer.NO_MORE_DOCS) {
+            break;
+          }
+          Positions positions = scorer.positions();
+          Integer[] pos = positionsInDoc[atomicReaderContext.docBase + docID];
+          assertEquals(pos.length, positions.freq);
+          // number of positions read should be random - don't read all of them
+          // allways
+          final int howMany = random.nextInt(20) == 0 ? pos.length
+              - random.nextInt(pos.length) : pos.length;
+          for (int j = 0; j < howMany; j++) {
+            assertTrue(positions.next());
+            assertEquals("iteration: " + i + " initDoc: " + initDoc + " doc: "
+                + docID + " base: " + atomicReaderContext.docBase
+                + " positions: " + Arrays.toString(pos) + " usePayloads: " + usePayload, pos[j].intValue(),
+                positions.start);
+            assertEquals(pos[j].intValue() + 1, positions.end);
+          }
+          if (howMany == pos.length) {
+            assertFalse(positions.next());
+          }
+
+          if (random.nextInt(10) == 0) { // once is a while advance
+            scorer.advance(docID + 1 + random.nextInt((maxDoc - docID)));
+          }
+
+        } while (scorer.nextDoc() != Scorer.NO_MORE_DOCS);
+      }
+
+    }
+    reader.close();
+    dir.close();
+  }
+
+  /**
+   * tests retrieval of positions for terms that have a large number of
+   * occurrences to force test of buffer refill during positions iteration.
+   */
+  public void testLargeNumberOfPositions() throws IOException {
+    Directory dir = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(
+            MockTokenizer.WHITESPACE, true, usePayload)));
+    int howMany = 1000;
+    for (int i = 0; i < 39; i++) {
+      Document doc = new Document();
+      StringBuilder builder = new StringBuilder();
+      for (int j = 0; j < howMany; j++) {
+        if (j % 2 == 0) {
+          builder.append("even ");
+        } else {
+          builder.append("odd ");
+        }
+      }
+      doc.add(newField(fieldName, builder.toString(), Field.Store.YES,
+          Field.Index.ANALYZED));
+      writer.addDocument(doc);
+    }
+
+    // now do seaches
+    IndexReader reader = writer.getReader();
+    writer.close();
+    IndexSearcher searcher = new IndexSearcher(reader);
+
+    for (int i = 0; i < 39 * RANDOM_MULTIPLIER; i++) {
+      TermQuery one = new TermQuery(new Term(fieldName, "even"));
+      ReaderContext topReaderContext = reader.getTopReaderContext();
+      AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
+      Weight weight = one.createWeight(searcher);
+      for (AtomicReaderContext atomicReaderContext : leaves) {
+        Scorer scorer = weight.scorer(atomicReaderContext, ScorerContext.def()
+            .needsPositions(true));
+        assertNotNull(scorer);
+
+        int initDoc = 0;
+        int maxDoc = atomicReaderContext.reader.maxDoc();
+        // initially advance or do next doc
+        if (random.nextBoolean()) {
+          initDoc = scorer.nextDoc();
+        } else {
+          initDoc = scorer.advance(random.nextInt(maxDoc));
+        }
+        String msg = "Iteration: " + i + " initDoc: " + initDoc + " payloads: " + usePayload;
+        Positions positions = scorer.positions();
+        assertEquals(howMany / 2, positions.freq);
+        for (int j = 0; j < howMany; j += 2) {
+          assertTrue("next returned false at index: "+j + " with freq: " + positions.freq + " -- "+  msg, positions.next());
+          assertEquals("position missmatch index: "+j + " with freq: " + positions.freq + " -- "+  msg, j, positions.start);
+        }
+        assertFalse("next returned true -- "+  msg, positions.next());
+      }
+    }
+    reader.close();
+    dir.close();
+  }
+
+}
