Index: lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighterRanking.java
===================================================================
--- lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighterRanking.java	(revision 1464783)
+++ lucene/highlighter/src/test/org/apache/lucene/search/postingshighlight/TestPostingsHighlighterRanking.java	(working copy)
@@ -324,4 +324,39 @@
     ir.close();
     dir.close();
   }
+  
+  public void testSimpleProximity() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
+    iwc.setMergePolicy(newLogMergePolicy());
+    RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
+    
+    FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
+    offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
+    Field body = new Field("body", "", offsetsType);
+    Document doc = new Document();
+    doc.add(body);
+    
+    body.setStringValue("Foo is present in this sentence, but quite some distance away from bar. " + 
+                        "On the other hand this sentence contains a string exactly of foo bar. " + 
+                        "This has only bar bar bar bar bar bar bar bar bar bar bar bar.");
+    iw.addDocument(doc);
+    
+    IndexReader ir = iw.getReader();
+    iw.close();
+    
+    IndexSearcher searcher = newSearcher(ir);
+    PostingsHighlighter highlighter = new PostingsHighlighter();
+    BooleanQuery query = new BooleanQuery();
+    query.add(new TermQuery(new Term("body", "foo")), BooleanClause.Occur.SHOULD);
+    query.add(new TermQuery(new Term("body", "bar")), BooleanClause.Occur.SHOULD);
+    TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
+    assertEquals(1, topDocs.totalHits);
+    String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 1);
+    assertEquals(1, snippets.length);
+    assertTrue(snippets[0], snippets[0].startsWith("On the other hand"));
+    
+    ir.close();
+    dir.close();
+  }
 }
Index: lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PassageScorer.java
===================================================================
--- lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PassageScorer.java	(revision 1464783)
+++ lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PassageScorer.java	(working copy)
@@ -84,10 +84,19 @@
    * @param passageLen length of the passage in characters.
    * @return term weight
    */
-  public float tf(int freq, int passageLen) {
+  public float tf(float freq, int passageLen) {
     float norm = k1 * ((1 - b) + b * (passageLen / pivot));
     return freq / (freq + norm);
   }
+  
+  /**
+   * Increments accumulator for proximity match 
+   * @param distance number of positions between this term and an adjacent term
+   * @return increment
+   */
+  public float inc(int distance) {
+    return 1f/(distance * distance);
+  }
     
   /**
    * Normalize a passage according to its position in the document.
Index: lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java
===================================================================
--- lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java	(revision 1464783)
+++ lucene/highlighter/src/java/org/apache/lucene/search/postingshighlight/PostingsHighlighter.java	(working copy)
@@ -23,12 +23,11 @@
 import java.util.Arrays;
 import java.util.Comparator;
 import java.util.HashMap;
+import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Locale;
 import java.util.Map;
 import java.util.PriorityQueue;
-import java.util.SortedSet;
-import java.util.TreeSet;
 
 import org.apache.lucene.index.AtomicReader;
 import org.apache.lucene.index.AtomicReaderContext;
@@ -49,7 +48,6 @@
 import org.apache.lucene.search.TopDocs;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.SorterTemplate;
-import org.apache.lucene.util.UnicodeUtil;
 
 /**
  * Simple highlighter that does not analyze fields nor use
@@ -297,7 +295,8 @@
     }
     final IndexReader reader = searcher.getIndexReader();
     query = rewrite(query);
-    SortedSet<Term> queryTerms = new TreeSet<Term>();
+    // preserve insertion order for prox scoring
+    LinkedHashSet<Term> queryTerms = new LinkedHashSet<Term>();
     query.extractTerms(queryTerms);
 
     IndexReaderContext readerContext = reader.getContext();
@@ -346,21 +345,21 @@
     // pull stored data:
     String[][] contents = loadFieldValues(searcher, fields, docids, maxLength);
     
-    Map<String,String[]> highlights = new HashMap<String,String[]>();;
+    Map<String,String[]> highlights = new HashMap<String,String[]>();
     for (int i = 0; i < fields.length; i++) {
       String field = fields[i];
       int numPassages = maxPassages[i];
-      Term floor = new Term(field, "");
-      Term ceiling = new Term(field, UnicodeUtil.BIG_TERM);
-      SortedSet<Term> fieldTerms = queryTerms.subSet(floor, ceiling);
+      ArrayList<BytesRef> fieldTerms = new ArrayList<BytesRef>();
+      // TODO: this could be done smarter in case there are many terms and fields...
+      // but this highlighter won't be great for that case anyway
+      for (Term t : queryTerms) {
+        if (field.equals(t.field())) {
+          fieldTerms.add(t.bytes());
+        }
+      }
       // TODO: should we have some reasonable defaults for term pruning? (e.g. stopwords)
 
-      // Strip off the redundant field:
-      BytesRef terms[] = new BytesRef[fieldTerms.size()];
-      int termUpto = 0;
-      for(Term term : fieldTerms) {
-        terms[termUpto++] = term.bytes();
-      }
+      BytesRef terms[] = fieldTerms.toArray(new BytesRef[fieldTerms.size()]);
       Map<Integer,String> fieldHighlights = highlightField(field, contents[i], getBreakIterator(field), terms, docids, leaves, numPassages);
         
       String[] result = new String[docids.length];
@@ -474,12 +473,12 @@
 
       if (doc == pDoc) {
         weights[i] = scorer.weight(contentLength, de.freq());
-        de.nextPosition();
-        pq.add(new OffsetsEnum(de, i));
+        int pos = de.nextPosition();
+        pq.add(new OffsetsEnum(de, i, pos));
       }
     }
     
-    pq.add(new OffsetsEnum(EMPTY, Integer.MAX_VALUE)); // a sentinel for termination
+    pq.add(new OffsetsEnum(EMPTY, Integer.MAX_VALUE, Integer.MAX_VALUE)); // a sentinel for termination
     
     PriorityQueue<Passage> passageQueue = new PriorityQueue<Passage>(n, new Comparator<Passage>() {
       @Override
@@ -495,6 +494,12 @@
     });
     Passage current = new Passage();
     
+    // initialize prox scoring
+    // TODO: should this be in passage?
+    int lastPos = 0;
+    int lastTermID = -1;
+    float lastAccum = 0;
+    
     OffsetsEnum off;
     while ((off = pq.poll()) != null) {
       final DocsAndPositionsEnum dp = off.dp;
@@ -506,6 +511,10 @@
       if (start >= current.endOffset) {
         if (current.startOffset >= 0) {
           // finalize current
+          // last term's accumulator
+          if (lastAccum > 0) {
+            current.score += Math.min(1f, weights[lastTermID]) * scorer.tf(lastAccum, current.endOffset - current.startOffset);
+          }
           current.score *= scorer.norm(current.startOffset);
           // new sentence: first add 'current' to queue 
           if (passageQueue.size() == n && current.score < passageQueue.peek().score) {
@@ -519,6 +528,9 @@
               current = new Passage();
             }
           }
+          // reset prox scoring
+          lastTermID = -1;
+          lastAccum = 0;
         }
         // if we exceed limit, we are done
         if (start >= contentLength) {
@@ -542,14 +554,16 @@
         current.endOffset = Math.min(bi.next(), contentLength);
       }
       int tf = 0;
+      int pos = 0;
       while (true) {
         tf++;
+        pos = off.pos;
         current.addMatch(start, end, terms[off.id]);
-        if (off.pos == dp.freq()) {
+        if (off.posCount == dp.freq()) {
           break; // removed from pq
         } else {
-          off.pos++;
-          dp.nextPosition();
+          off.posCount++;
+          off.pos = dp.nextPosition();
           start = dp.startOffset();
           end = dp.endOffset();
         }
@@ -558,7 +572,21 @@
           break;
         }
       }
+      // note, this currently cheats: as it only uses the highest position value for each term within each passage
+      // alternatively we could mark the previous term's positions in a bitset, or even do the whole scoring as "PAAT" 
+      // and keep accumulators... but this is all probably slow and overkill for highlighting short passages.
+      float accum = 0;
+      if (lastTermID >= 0 && pos > lastPos) {
+        float increment = scorer.inc(pos - lastPos);
+        accum += weights[lastTermID] * increment;
+        lastAccum += weights[off.id] * increment;
+        // finalize the previous term's accumulator into the score, its done.
+        current.score += Math.min(1f, weights[lastTermID]) * scorer.tf(lastAccum, current.endOffset - current.startOffset);
+      }
       current.score += weights[off.id] * scorer.tf(tf, current.endOffset - current.startOffset);
+      lastPos = pos;
+      lastTermID = off.id;
+      lastAccum = accum;
     }
 
     // Dead code but compiler disagrees:
@@ -594,12 +622,14 @@
   private static class OffsetsEnum implements Comparable<OffsetsEnum> {
     DocsAndPositionsEnum dp;
     int pos;
+    int posCount;
     int id;
     
-    OffsetsEnum(DocsAndPositionsEnum dp, int id) throws IOException {
+    OffsetsEnum(DocsAndPositionsEnum dp, int id, int pos) throws IOException {
       this.dp = dp;
       this.id = id;
-      this.pos = 1;
+      this.pos = pos;
+      this.posCount = 1;
     }
 
     @Override
