diff --git a/lucene/MIGRATE.txt b/lucene/MIGRATE.txt
index d486a21..9d02db8 100644
--- a/lucene/MIGRATE.txt
+++ b/lucene/MIGRATE.txt
@@ -19,14 +19,12 @@ FunctionScoreQuery maps negative values to 0.
 
 ## CustomScoreQuery, BoostedQuery and BoostingQuery removed (LUCENE-8099) ##
 
-Instead use FunctionScoreQuery and a DoubleValuesSource implementation.  BoostedQuery
-and BoostingQuery may be replaced by calls to FunctionScoreQuery.boostByValue() and
-FunctionScoreQuery.boostByQuery().  To replace more complex calculations in
-CustomScoreQuery, use the lucene-expressions module:
+Instead use FunctionScoreQuery and a DoubleValuesSource implementation.  For example,
+to replace the functionality of BoostedQuery, you could do the following, using
+the lucene-expressions module:
 
 SimpleBindings bindings = new SimpleBindings();
 bindings.add("score", DoubleValuesSource.SCORES);
-bindings.add("boost1", DoubleValuesSource.fromIntField("myboostfield"));
-bindings.add("boost2", DoubleValuesSource.fromIntField("myotherboostfield"));
-Expression expr = JavascriptCompiler.compile("score * (boost1 + ln(boost2))");
+bindings.add("boost", DoubleValuesSource.fromIntField("myboostfield"));
+Expression expr = JavascriptCompiler.compile("score * boost");
 FunctionScoreQuery q = new FunctionScoreQuery(inputQuery, expr.getDoubleValuesSource(bindings));
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java
index 17024d8..83e4817 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsReader.java
@@ -38,6 +38,7 @@ import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.index.TermState;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.store.ByteArrayDataInput;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.Accountable;
@@ -659,6 +660,12 @@ public class BlockTermsReader extends FieldsProducer {
       }
 
       @Override
+      public PostingsEnum topPostings(SimScorer scorer) throws IOException {
+        decodeMetaData();
+        return postingsReader.topPostings(fieldInfo, state, scorer);
+      }
+
+      @Override
       public void seekExact(BytesRef target, TermState otherState) {
         //System.out.println("BTR.seekExact termState target=" + target.utf8ToString() + " " + target + " this=" + this);
         assert otherState != null && otherState instanceof BlockTermState;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java
index f19cd2c..9ed87b5 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blockterms/BlockTermsWriter.java
@@ -26,6 +26,7 @@ import java.util.List;
 import org.apache.lucene.codecs.BlockTermState;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.FieldsConsumer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PostingsWriterBase;
 import org.apache.lucene.codecs.TermStats;
 import org.apache.lucene.index.IndexOptions;
@@ -127,7 +128,7 @@ public class BlockTermsWriter extends FieldsConsumer implements Closeable {
   }
 
   @Override
-  public void write(Fields fields) throws IOException {
+  public void write(Fields fields, NormsProducer norms) throws IOException {
 
     for(String field : fields) {
 
@@ -146,7 +147,7 @@ public class BlockTermsWriter extends FieldsConsumer implements Closeable {
           break;
         }
 
-        termsWriter.write(term, termsEnum);
+        termsWriter.write(term, termsEnum, norms);
       }
 
       termsWriter.finish();
@@ -232,9 +233,9 @@ public class BlockTermsWriter extends FieldsConsumer implements Closeable {
     
     private final BytesRefBuilder lastPrevTerm = new BytesRefBuilder();
 
-    void write(BytesRef text, TermsEnum termsEnum) throws IOException {
+    void write(BytesRef text, TermsEnum termsEnum, NormsProducer norms) throws IOException {
 
-      BlockTermState state = postingsWriter.writeTerm(text, termsEnum, docsSeen);
+      BlockTermState state = postingsWriter.writeTerm(text, termsEnum, docsSeen, norms);
       if (state == null) {
         // No docs for this term:
         return;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsWriter.java
index b16bb15..bdc0738 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsBlockTreeTermsWriter.java
@@ -24,6 +24,7 @@ import java.util.List;
 import org.apache.lucene.codecs.BlockTermState;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.FieldsConsumer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PostingsWriterBase;
 import org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter; // javadocs
 import org.apache.lucene.codecs.blocktreeords.FSTOrdsOutputs.Output;
@@ -213,7 +214,7 @@ public final class OrdsBlockTreeTermsWriter extends FieldsConsumer {
   }
 
   @Override
-  public void write(Fields fields) throws IOException {
+  public void write(Fields fields, NormsProducer norms) throws IOException {
 
     String lastField = null;
     for(String field : fields) {
@@ -233,7 +234,7 @@ public final class OrdsBlockTreeTermsWriter extends FieldsConsumer {
         if (term == null) {
           break;
         }
-        termsWriter.write(term, termsEnum);
+        termsWriter.write(term, termsEnum, norms);
       }
 
       termsWriter.finish();
@@ -771,7 +772,7 @@ public final class OrdsBlockTreeTermsWriter extends FieldsConsumer {
     }
     
     /** Writes one term's worth of postings. */
-    public void write(BytesRef text, TermsEnum termsEnum) throws IOException {
+    public void write(BytesRef text, TermsEnum termsEnum, NormsProducer norms) throws IOException {
       /*
       if (DEBUG) {
         int[] tmp = new int[lastTerm.length];
@@ -780,7 +781,7 @@ public final class OrdsBlockTreeTermsWriter extends FieldsConsumer {
       }
       */
 
-      BlockTermState state = postingsWriter.writeTerm(text, termsEnum, docsSeen);
+      BlockTermState state = postingsWriter.writeTerm(text, termsEnum, docsSeen, norms);
       if (state != null) {
         assert state.docFreq != 0;
         assert fieldInfo.getIndexOptions() == IndexOptions.DOCS || state.totalTermFreq >= state.docFreq: "postingsWriter=" + postingsWriter;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsIntersectTermsEnum.java b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsIntersectTermsEnum.java
index 6c2d2bc..c1e7007 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsIntersectTermsEnum.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/blocktreeords/OrdsIntersectTermsEnum.java
@@ -23,6 +23,7 @@ import org.apache.lucene.codecs.blocktreeords.FSTOrdsOutputs.Output;
 import org.apache.lucene.index.PostingsEnum;
 import org.apache.lucene.index.TermState;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
@@ -205,6 +206,12 @@ final class OrdsIntersectTermsEnum extends TermsEnum {
     return fr.parent.postingsReader.postings(fr.fieldInfo, currentFrame.termState, reuse, flags);
   }
 
+  @Override
+  public PostingsEnum topPostings(SimScorer scorer) throws IOException {
+    currentFrame.decodeMetaData();
+    return fr.parent.postingsReader.topPostings(fr.fieldInfo, currentFrame.termState, scorer);
+  }
+
   private int getState() {
     int state = currentFrame.state;
     for(int idx=0;idx<currentFrame.suffix;idx++) {
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
index b864bf0..e1a3d44 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/bloom/BloomFilteringPostingsFormat.java
@@ -29,6 +29,7 @@ import java.util.Map.Entry;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.bloom.FuzzySet.ContainsResult;
 import org.apache.lucene.index.PostingsEnum;
@@ -416,7 +417,7 @@ public final class BloomFilteringPostingsFormat extends PostingsFormat {
     }
 
     @Override
-    public void write(Fields fields) throws IOException {
+    public void write(Fields fields, NormsProducer norms) throws IOException {
 
       // Delegate must write first: it may have opened files
       // on creating the class
@@ -424,7 +425,7 @@ public final class BloomFilteringPostingsFormat extends PostingsFormat {
       // close them; alternatively, if we delayed pulling
       // the fields consumer until here, we could do it
       // afterwards:
-      delegateFieldsConsumer.write(fields);
+      delegateFieldsConsumer.write(fields, norms);
 
       for(String field : fields) {
         Terms terms = fields.terms(field);
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
index 5ba4c5f..a1e3592 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsReader.java
@@ -41,6 +41,7 @@ import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.index.TermState;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.store.ByteArrayDataInput;
 import org.apache.lucene.store.ChecksumIndexInput;
 import org.apache.lucene.store.IndexInput;
@@ -432,6 +433,12 @@ public class FSTOrdTermsReader extends FieldsProducer {
         return postingsReader.postings(fieldInfo, state, reuse, flags);
       }
 
+      @Override
+      public PostingsEnum topPostings(SimScorer scorer) throws IOException {
+        decodeMetaData();
+        return postingsReader.topPostings(fieldInfo, state, scorer);
+      }
+
       // TODO: this can be achieved by making use of Util.getByOutput()
       //           and should have related tests
       @Override
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsWriter.java
index cbe6583..b59d41d 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTOrdTermsWriter.java
@@ -24,6 +24,7 @@ import java.util.List;
 import org.apache.lucene.codecs.BlockTermState;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.FieldsConsumer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PostingsWriterBase;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
@@ -186,7 +187,7 @@ public class FSTOrdTermsWriter extends FieldsConsumer {
   }
 
   @Override
-  public void write(Fields fields) throws IOException {
+  public void write(Fields fields, NormsProducer norms) throws IOException {
     for(String field : fields) {
       Terms terms = fields.terms(field);
       if (terms == null) {
@@ -205,7 +206,7 @@ public class FSTOrdTermsWriter extends FieldsConsumer {
         if (term == null) {
           break;
         }
-        BlockTermState termState = postingsWriter.writeTerm(term, termsEnum, docsSeen);
+        BlockTermState termState = postingsWriter.writeTerm(term, termsEnum, docsSeen, norms);
         if (termState != null) {
           termsWriter.finishTerm(term, termState);
           sumTotalTermFreq += termState.totalTermFreq;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
index 8dda05c..6a39e51 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsReader.java
@@ -41,6 +41,7 @@ import org.apache.lucene.index.SegmentReadState;
 import org.apache.lucene.index.TermState;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.store.ByteArrayDataInput;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.Accountable;
@@ -299,6 +300,12 @@ public class FSTTermsReader extends FieldsProducer {
       }
 
       @Override
+      public PostingsEnum topPostings(SimScorer scorer) throws IOException {
+        decodeMetaData();
+        return postingsReader.topPostings(fieldInfo, state, scorer);
+      }
+
+      @Override
       public void seekExact(long ord) throws IOException {
         throw new UnsupportedOperationException();
       }
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsWriter.java
index 8284d74..8e55d41 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/FSTTermsWriter.java
@@ -24,6 +24,7 @@ import java.util.List;
 import org.apache.lucene.codecs.BlockTermState;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.FieldsConsumer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PostingsWriterBase;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.FieldInfo;
@@ -158,7 +159,7 @@ public class FSTTermsWriter extends FieldsConsumer {
   }
 
   @Override
-  public void write(Fields fields) throws IOException {
+  public void write(Fields fields, NormsProducer norms) throws IOException {
     for(String field : fields) {
       Terms terms = fields.terms(field);
       if (terms == null) {
@@ -179,7 +180,7 @@ public class FSTTermsWriter extends FieldsConsumer {
           break;
         }
             
-        BlockTermState termState = postingsWriter.writeTerm(term, termsEnum, docsSeen);
+        BlockTermState termState = postingsWriter.writeTerm(term, termsEnum, docsSeen, norms);
         if (termState != null) {
           termsWriter.finishTerm(term, termState);
           sumTotalTermFreq += termState.totalTermFreq;
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
index 21983c6..4e4d1f6 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/memory/MemoryPostingsFormat.java
@@ -27,6 +27,7 @@ import java.util.TreeMap;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.TermStats;
 import org.apache.lucene.index.CorruptIndexException;
@@ -287,7 +288,7 @@ public final class MemoryPostingsFormat extends PostingsFormat {
     }
 
     @Override
-    public void write(Fields fields) throws IOException {
+    public void write(Fields fields, NormsProducer norms) throws IOException {
       for(String field : fields) {
 
         Terms terms = fields.terms(field);
diff --git a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
index 2023552..d8299b5 100644
--- a/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
+++ b/lucene/codecs/src/java/org/apache/lucene/codecs/simpletext/SimpleTextFieldsWriter.java
@@ -20,6 +20,7 @@ package org.apache.lucene.codecs.simpletext;
 import java.io.IOException;
 
 import org.apache.lucene.codecs.FieldsConsumer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.index.PostingsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
@@ -56,7 +57,7 @@ class SimpleTextFieldsWriter extends FieldsConsumer {
   }
 
   @Override
-  public void write(Fields fields) throws IOException {
+  public void write(Fields fields, NormsProducer norms) throws IOException {
     write(writeState.fieldInfos, fields);
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/CompetitiveFreqNormAccumulator.java b/lucene/core/src/java/org/apache/lucene/codecs/CompetitiveFreqNormAccumulator.java
new file mode 100644
index 0000000..e6b56f7
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/codecs/CompetitiveFreqNormAccumulator.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.codecs;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+/**
+ * This class accumulates the (freq, norm) pairs that may produce competitive scores.
+ */
+public final class CompetitiveFreqNormAccumulator {
+
+  // We speed up accumulation for common norm values by first computing
+  // the max freq for all norms in -128..127
+  private final int[] maxFreqs;
+  private boolean dirty;
+  private final TreeSet<FreqAndNorm> freqNormPairs;
+
+  /** Sole constructor. */
+  public CompetitiveFreqNormAccumulator() {
+    maxFreqs = new int[256];
+    Comparator<FreqAndNorm> comparator = new Comparator<CompetitiveFreqNormAccumulator.FreqAndNorm>() {
+      @Override
+      public int compare(FreqAndNorm o1, FreqAndNorm o2) {
+        // greater freqs compare greater
+        int cmp = Integer.compare(o1.freq, o2.freq);
+        if (cmp == 0) {
+          // greater norms compare lower
+          cmp = Long.compareUnsigned(o2.norm, o1.norm);
+        }
+        return cmp;
+      }
+    };
+    freqNormPairs = new TreeSet<>(comparator);
+  }
+
+  /** Reset to the same state it was in after creation. */
+  public void clear() {
+    Arrays.fill(maxFreqs, 0);
+    dirty = false;
+    freqNormPairs.clear();
+  }
+
+  /**
+   * A (freq, norm) pair.
+   */
+  public static class FreqAndNorm {
+    public final int freq;
+    public final long norm;
+
+    /** Sole constructor. */
+    public FreqAndNorm(int freq, long norm) {
+      this.freq = freq;
+      this.norm = norm;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (obj == null || obj instanceof FreqAndNorm == false) {
+        return false;
+      }
+      FreqAndNorm that = (FreqAndNorm) obj;
+      return freq == that.freq && norm == that.norm;
+    }
+
+    @Override
+    public int hashCode() {
+      int h = getClass().hashCode();
+      h = 31 * h + freq;
+      h = 31 * h + Long.hashCode(norm);
+      return h;
+    }
+
+    @Override
+    public String toString() {
+      return "{" + freq + "," + norm + "}";
+    }
+  }
+
+  /** Accumulate a (freq,norm) pair, updating this structure if there is no
+   *  equivalent or more competitive entry already. */
+  public void add(int freq, long norm) {
+    if (norm >= Byte.MIN_VALUE && norm <= Byte.MAX_VALUE) {
+      int index = Byte.toUnsignedInt((byte) norm);
+      maxFreqs[index] = Math.max(maxFreqs[index], freq); 
+      dirty = true;
+    } else {
+      add(new FreqAndNorm(freq, norm));
+    }
+  }
+
+  /** Merge {@code acc} into this. */
+  public void addAll(CompetitiveFreqNormAccumulator acc) {
+    for (FreqAndNorm entry : acc.getCompetitiveFreqNormPairs()) {
+      add(entry);
+    }
+  }
+
+  /** Get the set of competitive freq and norm pairs, orderer by increasing freq and norm. */
+  public SortedSet<FreqAndNorm> getCompetitiveFreqNormPairs() {
+    if (dirty) {
+      for (int i = 0; i < maxFreqs.length; ++i) {
+        if (maxFreqs[i] > 0) {
+          add(new FreqAndNorm(maxFreqs[i], (byte) i));
+          maxFreqs[i] = 0;
+        }
+      }
+      dirty = false;
+    }
+    return Collections.unmodifiableSortedSet(freqNormPairs);
+  }
+
+  private void add(FreqAndNorm newEntry) {
+    FreqAndNorm next = freqNormPairs.ceiling(newEntry);
+    if (next == null) {
+      // nothing is more competitive
+      freqNormPairs.add(newEntry);
+    } else if (Long.compareUnsigned(next.norm, newEntry.norm) <= 0) {
+      // we already have this entry or more competitive entries in the tree
+      return;
+    } else {
+      // some entries have a greater freq but a less competitive norm, so we
+      // don't know which one will trigger greater scores, still add to the tree
+      freqNormPairs.add(newEntry);
+    }
+
+    for (Iterator<FreqAndNorm> it = freqNormPairs.headSet(newEntry, false).descendingIterator(); it.hasNext(); ) {
+      FreqAndNorm entry = it.next();
+      if (Long.compareUnsigned(entry.norm, newEntry.norm) >= 0) {
+        // less competitive
+        it.remove();
+      } else {
+        // lesser freq but better norm, further entries are not comparable
+        break;
+      }
+    }
+  }
+
+  @Override
+  public String toString() {
+    return getCompetitiveFreqNormPairs().toString();
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/FieldsConsumer.java b/lucene/core/src/java/org/apache/lucene/codecs/FieldsConsumer.java
index 28bae1d..f4fc9ac 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/FieldsConsumer.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/FieldsConsumer.java
@@ -76,14 +76,14 @@ public abstract class FieldsConsumer implements Closeable {
    *         live docs when pulling docs/positions enums.
    *  </ul>
    */
-  public abstract void write(Fields fields) throws IOException;
+  public abstract void write(Fields fields, NormsProducer norms) throws IOException;
   
   /** Merges in the fields from the readers in 
    *  <code>mergeState</code>. The default implementation skips
-   *  and maps around deleted documents, and calls {@link #write(Fields)}.
+   *  and maps around deleted documents, and calls {@link #write(Fields,NormsProducer)}.
    *  Implementations can override this method for more sophisticated
    *  merging (bulk-byte copying, etc). */
-  public void merge(MergeState mergeState) throws IOException {
+  public void merge(MergeState mergeState, NormsProducer norms) throws IOException {
     final List<Fields> fields = new ArrayList<>();
     final List<ReaderSlice> slices = new ArrayList<>();
 
@@ -102,7 +102,7 @@ public abstract class FieldsConsumer implements Closeable {
     Fields mergedFields = new MappedMultiFields(mergeState, 
                                                 new MultiFields(fields.toArray(Fields.EMPTY_ARRAY),
                                                                 slices.toArray(ReaderSlice.EMPTY_ARRAY)));
-    write(mergedFields);
+    write(mergedFields, norms);
   }
 
   // NOTE: strange but necessary so javadocs linting is happy:
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/MultiLevelSkipListReader.java b/lucene/core/src/java/org/apache/lucene/codecs/MultiLevelSkipListReader.java
index c937886..517c731 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/MultiLevelSkipListReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/MultiLevelSkipListReader.java
@@ -40,8 +40,8 @@ public abstract class MultiLevelSkipListReader implements Closeable {
   /** the maximum number of skip levels possible for this index */
   protected int maxNumberOfSkipLevels; 
   
-  // number of levels in this skip list
-  private int numberOfSkipLevels;
+  /** number of levels in this skip list */
+  protected int numberOfSkipLevels;
   
   // Expert: defines the number of top skip levels to buffer in memory.
   // Reducing this number results in less memory usage, but possibly
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/MultiLevelSkipListWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/MultiLevelSkipListWriter.java
index 207b324..8e090be 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/MultiLevelSkipListWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/MultiLevelSkipListWriter.java
@@ -53,13 +53,13 @@ import org.apache.lucene.util.MathUtil;
 
 public abstract class MultiLevelSkipListWriter {
   /** number of levels in this skip list */
-  protected int numberOfSkipLevels;
+  protected final int numberOfSkipLevels;
   
   /** the skip interval in the list with level = 0 */
-  private int skipInterval;
+  private final int skipInterval;
 
   /** skipInterval used for level &gt; 0 */
-  private int skipMultiplier;
+  private final int skipMultiplier;
   
   /** for every skip level a different buffer is used  */
   private RAMOutputStream[] skipBuffer;
@@ -69,6 +69,7 @@ public abstract class MultiLevelSkipListWriter {
     this.skipInterval = skipInterval;
     this.skipMultiplier = skipMultiplier;
     
+    int numberOfSkipLevels;
     // calculate the maximum number of skip levels for this document frequency
     if (df <= skipInterval) {
       numberOfSkipLevels = 1;
@@ -80,6 +81,7 @@ public abstract class MultiLevelSkipListWriter {
     if (numberOfSkipLevels > maxSkipLevels) {
       numberOfSkipLevels = maxSkipLevels;
     }
+    this.numberOfSkipLevels = numberOfSkipLevels;
   }
   
   /** Creates a {@code MultiLevelSkipListWriter}, where
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java b/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java
index 56cbab5..1539605 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/PostingsReaderBase.java
@@ -23,6 +23,7 @@ import java.io.IOException;
 import org.apache.lucene.index.PostingsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.SegmentReadState;
+import org.apache.lucene.search.similarities.Similarity;
 import org.apache.lucene.store.DataInput;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.Accountable;
@@ -65,7 +66,17 @@ public abstract class PostingsReaderBase implements Closeable, Accountable {
   /** Must fully consume state, since after this call that
    *  TermState may be reused. */
   public abstract PostingsEnum postings(FieldInfo fieldInfo, BlockTermState state, PostingsEnum reuse, int flags) throws IOException;
-  
+
+  /**
+   * Return a {@link PostingsEnum} that optionally knows how to skip based on scores.
+   * The returned {@link PostingsEnum} must support {@link PostingsEnum#FREQS frequencies}.
+   * The default implementation falls back to
+   * {@link #postings(FieldInfo, BlockTermState, PostingsEnum, int) postings(fieldInfo, state, null, PostingsEnum.FREQS}.
+   */
+  public PostingsEnum topPostings(FieldInfo fieldInfo, BlockTermState state, Similarity.SimScorer scorer) throws IOException {
+    return postings(fieldInfo, state, null, PostingsEnum.FREQS);
+  }
+
   /** 
    * Checks consistency of this reader.
    * <p>
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java b/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java
index b4f2d4e..48c6027 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/PostingsWriterBase.java
@@ -60,7 +60,7 @@ public abstract class PostingsWriterBase implements Closeable {
    *  FixedBitSet} for every docID written.  If no docs
    *  were written, this method should return null, and the
    *  terms dict will skip the term. */
-  public abstract BlockTermState writeTerm(BytesRef term, TermsEnum termsEnum, FixedBitSet docsSeen) throws IOException;
+  public abstract BlockTermState writeTerm(BytesRef term, TermsEnum termsEnum, FixedBitSet docsSeen, NormsProducer norms) throws IOException;
 
   /**
    * Encode metadata as long[] and byte[]. {@code absolute} controls whether 
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java b/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java
index 1fb83b9..98bdd91 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/PushPostingsWriterBase.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import org.apache.lucene.index.PostingsEnum;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.TermsEnum;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.FixedBitSet;
@@ -74,7 +75,7 @@ public abstract class PushPostingsWriterBase extends PostingsWriterBase {
   /** Start a new term.  Note that a matching call to {@link
    *  #finishTerm(BlockTermState)} is done, only if the term has at least one
    *  document. */
-  public abstract void startTerm() throws IOException;
+  public abstract void startTerm(NumericDocValues norms) throws IOException;
 
   /** Finishes the current term.  The provided {@link
    *  BlockTermState} contains the term's summary statistics, 
@@ -117,8 +118,14 @@ public abstract class PushPostingsWriterBase extends PostingsWriterBase {
   }
 
   @Override
-  public final BlockTermState writeTerm(BytesRef term, TermsEnum termsEnum, FixedBitSet docsSeen) throws IOException {
-    startTerm();
+  public final BlockTermState writeTerm(BytesRef term, TermsEnum termsEnum, FixedBitSet docsSeen, NormsProducer norms) throws IOException {
+    NumericDocValues normValues;
+    if (fieldInfo.hasNorms() == false) {
+      normValues = null;
+    } else {
+      normValues = norms.getNorms(fieldInfo);
+    }
+    startTerm(normValues);
     postingsEnum = termsEnum.postings(postingsEnum, enumFlags);
     assert postingsEnum != null;
 
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsWriter.java
index bdacc22..ec3f6e6 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/BlockTreeTermsWriter.java
@@ -24,6 +24,7 @@ import java.util.List;
 import org.apache.lucene.codecs.BlockTermState;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.FieldsConsumer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PostingsWriterBase;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.FieldInfos;
@@ -315,7 +316,7 @@ public final class BlockTreeTermsWriter extends FieldsConsumer {
   }
 
   @Override
-  public void write(Fields fields) throws IOException {
+  public void write(Fields fields, NormsProducer norms) throws IOException {
     //if (DEBUG) System.out.println("\nBTTW.write seg=" + segment);
 
     String lastField = null;
@@ -340,7 +341,7 @@ public final class BlockTreeTermsWriter extends FieldsConsumer {
         }
 
         //if (DEBUG) System.out.println("write field=" + fieldInfo.name + " term=" + brToString(term));
-        termsWriter.write(term, termsEnum);
+        termsWriter.write(term, termsEnum, norms);
       }
 
       termsWriter.finish();
@@ -852,7 +853,7 @@ public final class BlockTreeTermsWriter extends FieldsConsumer {
     }
     
     /** Writes one term's worth of postings. */
-    public void write(BytesRef text, TermsEnum termsEnum) throws IOException {
+    public void write(BytesRef text, TermsEnum termsEnum, NormsProducer norms) throws IOException {
       /*
       if (DEBUG) {
         int[] tmp = new int[lastTerm.length];
@@ -861,7 +862,7 @@ public final class BlockTreeTermsWriter extends FieldsConsumer {
       }
       */
 
-      BlockTermState state = postingsWriter.writeTerm(text, termsEnum, docsSeen);
+      BlockTermState state = postingsWriter.writeTerm(text, termsEnum, docsSeen, norms);
       if (state != null) {
 
         assert state.docFreq != 0;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
index 19e56a4..99f6787 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/IntersectTermsEnum.java
@@ -23,6 +23,7 @@ import org.apache.lucene.index.PostingsEnum;
 import org.apache.lucene.index.TermState;
 import org.apache.lucene.index.Terms;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.ArrayUtil;
 import org.apache.lucene.util.BytesRef;
@@ -235,6 +236,12 @@ final class IntersectTermsEnum extends TermsEnum {
     return fr.parent.postingsReader.postings(fr.fieldInfo, currentFrame.termState, reuse, flags);
   }
 
+  @Override
+  public PostingsEnum topPostings(SimScorer scorer) throws IOException {
+    currentFrame.decodeMetaData();
+    return fr.parent.postingsReader.topPostings(fr.fieldInfo, currentFrame.termState, scorer);
+  }
+
   private int getState() {
     int state = currentFrame.state;
     for(int idx=0;idx<currentFrame.suffix;idx++) {
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnum.java b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnum.java
index 73c32bb..1c674cd 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/blocktree/SegmentTermsEnum.java
@@ -24,6 +24,7 @@ import org.apache.lucene.codecs.BlockTermState;
 import org.apache.lucene.index.PostingsEnum;
 import org.apache.lucene.index.TermState;
 import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.store.ByteArrayDataInput;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.ArrayUtil;
@@ -1003,6 +1004,19 @@ final class SegmentTermsEnum extends TermsEnum {
   }
 
   @Override
+  public PostingsEnum topPostings(SimScorer scorer) throws IOException {
+    assert !eof;
+    //if (DEBUG) {
+    //System.out.println("BTTR.docs seg=" + segment);
+    //}
+    currentFrame.decodeMetaData();
+    //if (DEBUG) {
+    //System.out.println("  state=" + currentFrame.state);
+    //}
+    return fr.parent.postingsReader.topPostings(fr.fieldInfo, currentFrame.state, scorer);
+  }
+
+  @Override
   public void seekExact(BytesRef target, TermState otherState) {
     // if (DEBUG) {
     //   System.out.println("BTTR.seekExact termState seg=" + segment + " target=" + target.utf8ToString() + " " + target + " state=" + otherState);
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsFormat.java
index 3b0d5c2..a38335b 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsFormat.java
@@ -384,7 +384,8 @@ public final class Lucene50PostingsFormat extends PostingsFormat {
 
   // Increment version to change it
   final static int VERSION_START = 0;
-  final static int VERSION_CURRENT = VERSION_START;
+  final static int VERSION_IMPACT_SKIP_DATA = 0;
+  final static int VERSION_CURRENT = VERSION_IMPACT_SKIP_DATA;
 
   private final int minTermBlockSize;
   private final int maxTermBlockSize;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsReader.java
index 0dde774..a9f0eab 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsReader.java
@@ -19,6 +19,7 @@ package org.apache.lucene.codecs.lucene50;
 
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.Objects;
 
 import org.apache.lucene.codecs.BlockTermState;
 import org.apache.lucene.codecs.CodecUtil;
@@ -29,6 +30,7 @@ import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.IndexOptions;
 import org.apache.lucene.index.PostingsEnum;
 import org.apache.lucene.index.SegmentReadState;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.store.DataInput;
 import org.apache.lucene.store.IndexInput;
 import org.apache.lucene.util.ArrayUtil;
@@ -234,6 +236,16 @@ public final class Lucene50PostingsReader extends PostingsReaderBase {
     }
   }
 
+  @Override
+  public PostingsEnum topPostings(FieldInfo fieldInfo, BlockTermState state, SimScorer scorer) throws IOException {
+    Objects.requireNonNull(scorer);
+    if (state.docFreq <= BLOCK_SIZE || version < Lucene50PostingsFormat.VERSION_IMPACT_SKIP_DATA) {
+      // no skip data
+      return postings(fieldInfo, state, null, PostingsEnum.FREQS);
+    }
+    return new BlockTopDocsEnum(fieldInfo, (IntBlockTermState) state, scorer);
+  }
+
   final class BlockDocsEnum extends PostingsEnum {
     private final byte[] encoded;
     
@@ -401,7 +413,8 @@ public final class Lucene50PostingsReader extends PostingsReaderBase {
 
         if (skipper == null) {
           // Lazy init: first time this enum has ever been used for skipping
-          skipper = new Lucene50SkipReader(docIn.clone(),
+          skipper = new Lucene50SkipReader(version,
+                                           docIn.clone(),
                                            MAX_SKIP_LEVELS,
                                            indexHasPos,
                                            indexHasOffsets,
@@ -666,7 +679,8 @@ public final class Lucene50PostingsReader extends PostingsReaderBase {
       if (target > nextSkipDoc) {
         if (skipper == null) {
           // Lazy init: first time this enum has ever been used for skipping
-          skipper = new Lucene50SkipReader(docIn.clone(),
+          skipper = new Lucene50SkipReader(version,
+                                           docIn.clone(),
                                            MAX_SKIP_LEVELS,
                                            true,
                                            indexHasOffsets,
@@ -1082,7 +1096,8 @@ public final class Lucene50PostingsReader extends PostingsReaderBase {
       if (target > nextSkipDoc) {
         if (skipper == null) {
           // Lazy init: first time this enum has ever been used for skipping
-          skipper = new Lucene50SkipReader(docIn.clone(),
+          skipper = new Lucene50SkipReader(version,
+                                        docIn.clone(),
                                         MAX_SKIP_LEVELS,
                                         true,
                                         indexHasOffsets,
@@ -1272,6 +1287,197 @@ public final class Lucene50PostingsReader extends PostingsReaderBase {
     }
   }
 
+  final class BlockTopDocsEnum extends PostingsEnum {
+    private final byte[] encoded;
+    
+    private final int[] docDeltaBuffer = new int[MAX_DATA_SIZE];
+    private final int[] freqBuffer = new int[MAX_DATA_SIZE];
+
+    private int docBufferUpto;
+
+    private Lucene50ScoreSkipReader skipper;
+
+    final IndexInput startDocIn;
+
+    final IndexInput docIn;
+    final boolean indexHasFreq;
+
+    private int docFreq;                              // number of docs in this posting list
+    private int docUpto;                              // how many docs we've read
+    private int doc;                                  // doc we last read
+    private int accum;                                // accumulator for doc deltas
+    private int freq;                                 // freq we last read
+
+    // Where this term's postings start in the .doc file:
+    private long docTermStartFP;
+
+    // docID for next skip point, we won't use skipper if 
+    // target docID is not larger than this
+    private int nextSkipDoc;
+    private float currentBlockMaxScore;
+    private float minCompetitiveScore;
+
+    public BlockTopDocsEnum(FieldInfo fieldInfo, IntBlockTermState termState, SimScorer scorer) throws IOException {
+      this.startDocIn = Lucene50PostingsReader.this.docIn;
+      indexHasFreq = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
+      boolean indexHasPos = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
+      boolean indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
+      boolean indexHasPayloads = fieldInfo.hasPayloads();
+      encoded = new byte[MAX_ENCODED_SIZE];    
+
+      docFreq = termState.docFreq;
+      docTermStartFP = termState.docStartFP;
+      docIn = startDocIn.clone();
+      docIn.seek(docTermStartFP);
+
+      doc = -1;
+      if (indexHasFreq == false) {
+        Arrays.fill(freqBuffer, 1);
+      }
+      accum = 0;
+      docUpto = 0;
+      nextSkipDoc = -1; // force it to be set on first use
+      docBufferUpto = BLOCK_SIZE;
+
+      skipper = new Lucene50ScoreSkipReader(version, docIn.clone(), MAX_SKIP_LEVELS,
+          indexHasPos, indexHasOffsets, indexHasPayloads, scorer);
+      assert termState.skipOffset != -1;
+      skipper.init(docTermStartFP+termState.skipOffset, docTermStartFP, 0, 0, docFreq);
+    }
+    
+    @Override
+    public int freq() throws IOException {
+      return freq;
+    }
+
+    @Override
+    public int nextPosition() throws IOException {
+      return -1;
+    }
+
+    @Override
+    public int startOffset() throws IOException {
+      return -1;
+    }
+
+    @Override
+    public int endOffset() throws IOException {
+      return -1;
+    }
+
+    @Override
+    public BytesRef getPayload() throws IOException {
+      return null;
+    }
+
+    @Override
+    public int docID() {
+      return doc;
+    }
+
+    @Override
+    public void setMinCompetitiveScore(float minScore) {
+      this.minCompetitiveScore = minScore;
+    }
+
+    private void refillDocs() throws IOException {
+      final int left = docFreq - docUpto;
+      assert left > 0;
+
+      if (left >= BLOCK_SIZE) {
+        forUtil.readBlock(docIn, encoded, docDeltaBuffer);
+
+        if (indexHasFreq) {
+          forUtil.readBlock(docIn, encoded, freqBuffer);
+        }
+      } else {
+        // Read vInts:
+        readVIntBlock(docIn, docDeltaBuffer, freqBuffer, left, indexHasFreq);
+      }
+      docBufferUpto = 0;
+    }
+
+    @Override
+    public int nextDoc() throws IOException {
+      return advance(doc + 1);
+    }
+
+    private void advanceSkipper(int target) throws IOException {
+      // always plus one to fix the result, since skip position in Lucene50SkipReader 
+      // is a little different from MultiLevelSkipListReader
+      final int newDocUpto = skipper.skipTo(target) + 1; 
+
+      if (newDocUpto > docUpto) {
+        // Skipper moved
+        assert newDocUpto % BLOCK_SIZE == 0 : "got " + newDocUpto;
+        docUpto = newDocUpto;
+
+        // Force to read next block
+        docBufferUpto = BLOCK_SIZE;
+        accum = skipper.getDoc();               // actually, this is just lastSkipEntry
+        docIn.seek(skipper.getDocPointer());    // now point to the block we want to search
+      }
+      // next time we call advance, this is used to 
+      // foresee whether skipper is necessary.
+      nextSkipDoc = skipper.getNextSkipDoc();
+      currentBlockMaxScore = skipper.currentBlockMaxScore();
+    }
+
+    @Override
+    public int advance(int target) throws IOException {
+      // current skip docID < docIDs generated from current buffer <= next skip docID
+      // we don't need to skip if target is buffered already
+      if (target > nextSkipDoc) {
+        advanceSkipper(target);
+      }
+
+      while (currentBlockMaxScore < minCompetitiveScore) {
+        if (nextSkipDoc == NO_MORE_DOCS) {
+          return doc = NO_MORE_DOCS;
+        }
+
+        // always plus one to fix the result, since skip position in Lucene50SkipReader 
+        // is a little different from MultiLevelSkipListReader
+        int newTarget = skipper.skipToScore(minCompetitiveScore) + 1;
+        assert newTarget > target;
+        target = newTarget;
+
+        advanceSkipper(target);
+      }
+
+      if (docUpto == docFreq) {
+        return doc = NO_MORE_DOCS;
+      }
+      if (docBufferUpto == BLOCK_SIZE) {
+        refillDocs();
+      }
+
+      // Now scan... this is an inlined/pared down version
+      // of nextDoc():
+      while (true) {
+        accum += docDeltaBuffer[docBufferUpto];
+        docUpto++;
+
+        if (accum >= target) {
+          break;
+        }
+        docBufferUpto++;
+        if (docUpto == docFreq) {
+          return doc = NO_MORE_DOCS;
+        }
+      }
+
+      freq = freqBuffer[docBufferUpto];
+      docBufferUpto++;
+      return doc = accum;
+    }
+
+    @Override
+    public long cost() {
+      return docFreq;
+    }
+  }
+
   @Override
   public long ramBytesUsed() {
     return BASE_RAM_BYTES_USED;
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsWriter.java
index 6d24a4c..4c87981 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50PostingsWriter.java
@@ -31,12 +31,14 @@ import java.io.IOException;
 
 import org.apache.lucene.codecs.BlockTermState;
 import org.apache.lucene.codecs.CodecUtil;
+import org.apache.lucene.codecs.CompetitiveFreqNormAccumulator;
 import org.apache.lucene.codecs.PushPostingsWriterBase;
 import org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat.IntBlockTermState;
 import org.apache.lucene.index.CorruptIndexException;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.IndexFileNames;
 import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.SegmentWriteState;
 import org.apache.lucene.store.DataOutput;
 import org.apache.lucene.store.IndexOutput;
@@ -96,7 +98,12 @@ public final class Lucene50PostingsWriter extends PushPostingsWriterBase {
 
   private final ForUtil forUtil;
   private final Lucene50SkipWriter skipWriter;
-  
+
+  private boolean fieldHasNorms;
+  private NumericDocValues norms;
+  private CompetitiveFreqNormAccumulator competitiveFreqNormAccumulator;
+  private int maxFreq;
+
   /** Creates a postings writer */
   public Lucene50PostingsWriter(SegmentWriteState state) throws IOException {
     final float acceptableOverheadRatio = PackedInts.COMPACT;
@@ -183,8 +190,16 @@ public final class Lucene50PostingsWriter extends PushPostingsWriterBase {
   @Override
   public int setField(FieldInfo fieldInfo) {
     super.setField(fieldInfo);
-    skipWriter.setField(writePositions, writeOffsets, writePayloads);
+    skipWriter.setField(writePositions, writeOffsets, writePayloads, fieldInfo.hasNorms());
     lastState = emptyState;
+    fieldHasNorms = fieldInfo.hasNorms();
+    if (fieldHasNorms) {
+      competitiveFreqNormAccumulator = new CompetitiveFreqNormAccumulator();
+      maxFreq = -1;
+    } else {
+      competitiveFreqNormAccumulator = null;
+      maxFreq = 0;
+    }
     if (writePositions) {
       if (writePayloads || writeOffsets) {
         return 3;  // doc + pos + pay FP
@@ -197,7 +212,7 @@ public final class Lucene50PostingsWriter extends PushPostingsWriterBase {
   }
 
   @Override
-  public void startTerm() {
+  public void startTerm(NumericDocValues norms) {
     docStartFP = docOut.getFilePointer();
     if (writePositions) {
       posStartFP = posOut.getFilePointer();
@@ -208,6 +223,17 @@ public final class Lucene50PostingsWriter extends PushPostingsWriterBase {
     lastDocID = 0;
     lastBlockDocID = -1;
     skipWriter.resetSkip();
+    this.norms = norms;
+    if (fieldHasNorms) {
+      assert norms != null;
+      assert competitiveFreqNormAccumulator != null;
+      assert maxFreq == -1;
+      competitiveFreqNormAccumulator.clear();
+    } else {
+      assert norms == null;
+      assert competitiveFreqNormAccumulator == null;
+      maxFreq = 0;
+    }
   }
 
   @Override
@@ -216,7 +242,13 @@ public final class Lucene50PostingsWriter extends PushPostingsWriterBase {
     // Should write skip data as well as postings list for
     // current block.
     if (lastBlockDocID != -1 && docBufferUpto == 0) {
-      skipWriter.bufferSkip(lastBlockDocID, docCount, lastBlockPosFP, lastBlockPayFP, lastBlockPosBufferUpto, lastBlockPayloadByteUpto);
+      skipWriter.bufferSkip(lastBlockDocID, competitiveFreqNormAccumulator, maxFreq, docCount,
+          lastBlockPosFP, lastBlockPayFP, lastBlockPosBufferUpto, lastBlockPayloadByteUpto);
+      if (fieldHasNorms) {
+        competitiveFreqNormAccumulator.clear();
+      } else {
+        maxFreq = 0;
+      }
     }
 
     final int docDelta = docID - lastDocID;
@@ -247,6 +279,22 @@ public final class Lucene50PostingsWriter extends PushPostingsWriterBase {
     lastDocID = docID;
     lastPosition = 0;
     lastStartOffset = 0;
+
+    if (fieldHasNorms) {
+      boolean found = norms.advanceExact(docID);
+      long norm;
+      if (found == false) {
+        // This can happen if indexing hits a problem after adding a doc to the
+        // postings but before buffering the norm. Such documents are written
+        // deleted and will go away on the first merge.
+        norm = 1L;
+      } else {
+        norm = norms.longValue();
+      }
+      competitiveFreqNormAccumulator.add(writeFreqs ? termDocFreq : 1, norm);
+    } else {
+      maxFreq = Math.max(maxFreq, writeFreqs ? termDocFreq : 1);
+    }
   }
 
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50ScoreSkipReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50ScoreSkipReader.java
new file mode 100644
index 0000000..df52265
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50ScoreSkipReader.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.codecs.lucene50;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Objects;
+
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
+import org.apache.lucene.store.IndexInput;
+
+final class Lucene50ScoreSkipReader extends Lucene50SkipReader {
+
+  private final SimScorer scorer;
+  private final float[] maxScore;
+  private final float globalMaxScore;
+
+  public Lucene50ScoreSkipReader(int version, IndexInput skipStream, int maxSkipLevels,
+      boolean hasPos, boolean hasOffsets, boolean hasPayloads, SimScorer scorer) {
+    super(version, skipStream, maxSkipLevels, hasPos, hasOffsets, hasPayloads);
+    if (version < Lucene50PostingsFormat.VERSION_IMPACT_SKIP_DATA) {
+      throw new IllegalStateException("Cannot skip based on scores if impacts are not indexed");
+    }
+    this.scorer = Objects.requireNonNull(scorer);
+    this.maxScore = new float[maxSkipLevels];
+    this.globalMaxScore = scorer.maxScore(Float.POSITIVE_INFINITY);
+  }
+
+  @Override
+  public void init(long skipPointer, long docBasePointer, long posBasePointer, long payBasePointer, int df) throws IOException {
+    super.init(skipPointer, docBasePointer, posBasePointer, payBasePointer, df);
+    Arrays.fill(maxScore, globalMaxScore);
+  }
+
+  float currentBlockMaxScore() {
+    return numberOfSkipLevels > 0 ? maxScore[0] : globalMaxScore;
+  }
+
+  public int skipToScore(float minScore) throws IOException {
+    int level = -1;
+    while (level < numberOfSkipLevels - 1 && maxScore[level + 1] < minScore) {
+      level++;
+    }
+    if (level == -1) {
+      return -1;
+    } else {
+      return skipDoc[level];
+    }
+  }
+
+  @Override
+  protected void readImpacts(int level, IndexInput skipStream, int delta) throws IOException {
+    float maxScore = 0;
+    if (level > 0) {
+      long length = skipStream.readVLong();
+      long upTo = skipStream.getFilePointer() + length;
+      int freq = 0;
+      long norm = 0;
+      while (skipStream.getFilePointer() < upTo) {
+        freq += skipStream.readVInt();
+        assert freq > 0;
+        norm += skipStream.readZLong();
+        maxScore = Math.max(maxScore, scorer.score(freq, norm));
+      }
+      assert skipStream.getFilePointer() == upTo;
+    } else {
+      int freq = skipStream.readVInt();
+      assert freq > 0;
+      long norm = skipStream.readZLong();
+      maxScore = scorer.score(freq, norm);
+      // because level 0 uses sloppy data (only the max freq and min norm
+      // rather than all competitive pairs), we might have better data on
+      // level 1
+      if (numberOfSkipLevels > 1) {
+        // This works because we always write the higher levels first
+        maxScore = Math.min(maxScore, this.maxScore[1]);
+      }
+    }
+    this.maxScore[level] = maxScore;
+    assert level + 1 == numberOfSkipLevels || this.maxScore[level] <= this.maxScore[level+1];
+  }
+
+}
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50SkipReader.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50SkipReader.java
index 8c037c5..2b98751 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50SkipReader.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50SkipReader.java
@@ -52,7 +52,8 @@ import static org.apache.lucene.codecs.lucene50.Lucene50PostingsFormat.BLOCK_SIZ
  * Therefore, we'll trim df before passing it to the interface. see trim(int)
  *
  */
-final class Lucene50SkipReader extends MultiLevelSkipListReader {
+class Lucene50SkipReader extends MultiLevelSkipListReader {
+  private final int version;
   private long docPointer[];
   private long posPointer[];
   private long payPointer[];
@@ -65,8 +66,11 @@ final class Lucene50SkipReader extends MultiLevelSkipListReader {
   private long lastDocPointer;
   private int lastPosBufferUpto;
 
-  public Lucene50SkipReader(IndexInput skipStream, int maxSkipLevels, boolean hasPos, boolean hasOffsets, boolean hasPayloads) {
+  public Lucene50SkipReader(int version,
+      IndexInput skipStream, int maxSkipLevels,
+      boolean hasPos, boolean hasOffsets, boolean hasPayloads) {
     super(skipStream, maxSkipLevels, BLOCK_SIZE, 8);
+    this.version = version;
     docPointer = new long[maxSkipLevels];
     if (hasPos) {
       posPointer = new long[maxSkipLevels];
@@ -192,6 +196,22 @@ final class Lucene50SkipReader extends MultiLevelSkipListReader {
         payPointer[level] += skipStream.readVLong();
       }
     }
+    readImpacts(level, skipStream, delta);
     return delta;
   }
+
+  // The default impl skips impacts since they are only useful if we have a SimScorer
+  // to compute the scores that impacts map to.
+  protected void readImpacts(int level, IndexInput skipStream, int delta) throws IOException {
+    if (version >= Lucene50PostingsFormat.VERSION_IMPACT_SKIP_DATA) {
+      // The base implementation skips impacts, they are not used
+      if (level > 0) { // level 0 has a different encoding
+        skipStream.skipBytes(skipStream.readVLong());
+      } else {
+        skipStream.readVInt(); // freq
+        skipStream.readZLong(); // norm
+      }
+    }
+  }
+
 }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50SkipWriter.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50SkipWriter.java
index a4556c6..6c8e276 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50SkipWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene50/Lucene50SkipWriter.java
@@ -19,9 +19,13 @@ package org.apache.lucene.codecs.lucene50;
 
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.SortedSet;
 
-import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.codecs.CompetitiveFreqNormAccumulator;
+import org.apache.lucene.codecs.CompetitiveFreqNormAccumulator.FreqAndNorm;
 import org.apache.lucene.codecs.MultiLevelSkipListWriter;
+import org.apache.lucene.store.IndexOutput;
+import org.apache.lucene.store.RAMOutputStream;
 
 /**
  * Write skip lists with multiple levels, and support skip within block ints.
@@ -44,6 +48,7 @@ import org.apache.lucene.codecs.MultiLevelSkipListWriter;
  *
  */
 final class Lucene50SkipWriter extends MultiLevelSkipListWriter {
+  private final int maxSkipLevels;
   private int[] lastSkipDoc;
   private long[] lastSkipDocPointer;
   private long[] lastSkipPosPointer;
@@ -60,12 +65,16 @@ final class Lucene50SkipWriter extends MultiLevelSkipListWriter {
   private long curPayPointer;
   private int curPosBufferUpto;
   private int curPayloadByteUpto;
+  private int[] curMaxFreq;
+  private CompetitiveFreqNormAccumulator[] curCompetitiveFreqNorms;
   private boolean fieldHasPositions;
   private boolean fieldHasOffsets;
   private boolean fieldHasPayloads;
+  private boolean fieldHasNorms;
 
   public Lucene50SkipWriter(int maxSkipLevels, int blockSize, int docCount, IndexOutput docOut, IndexOutput posOut, IndexOutput payOut) {
     super(blockSize, 8, maxSkipLevels, docCount);
+    this.maxSkipLevels = maxSkipLevels;
     this.docOut = docOut;
     this.posOut = posOut;
     this.payOut = payOut;
@@ -81,10 +90,23 @@ final class Lucene50SkipWriter extends MultiLevelSkipListWriter {
     }
   }
 
-  public void setField(boolean fieldHasPositions, boolean fieldHasOffsets, boolean fieldHasPayloads) {
+  public void setField(boolean fieldHasPositions, boolean fieldHasOffsets, boolean fieldHasPayloads,
+      boolean fieldHasNorms) {
     this.fieldHasPositions = fieldHasPositions;
     this.fieldHasOffsets = fieldHasOffsets;
     this.fieldHasPayloads = fieldHasPayloads;
+    this.fieldHasNorms = fieldHasNorms;
+    if (fieldHasNorms) {
+      curMaxFreq = null;
+      curCompetitiveFreqNorms = new CompetitiveFreqNormAccumulator[maxSkipLevels];
+      // we do not store competitive (freq,norm) pairs on the first level
+      for (int i = 0; i < maxSkipLevels; ++i) {
+        curCompetitiveFreqNorms[i] = new CompetitiveFreqNormAccumulator();
+      }
+    } else {
+      curMaxFreq = new int[maxSkipLevels];
+      curCompetitiveFreqNorms = null;
+    }
   }
   
   // tricky: we only skip data for blocks (terms with more than 128 docs), but re-init'ing the skipper 
@@ -122,6 +144,13 @@ final class Lucene50SkipWriter extends MultiLevelSkipListWriter {
           Arrays.fill(lastSkipPayPointer, lastPayFP);
         }
       }
+      if (fieldHasNorms) {
+        for (CompetitiveFreqNormAccumulator acc : curCompetitiveFreqNorms) {
+          acc.clear();
+        }
+      } else {
+        Arrays.fill(curMaxFreq, 0);
+      }
       initialized = true;
     }
   }
@@ -129,7 +158,8 @@ final class Lucene50SkipWriter extends MultiLevelSkipListWriter {
   /**
    * Sets the values for the current skip data. 
    */
-  public void bufferSkip(int doc, int numDocs, long posFP, long payFP, int posBufferUpto, int payloadByteUpto) throws IOException {
+  public void bufferSkip(int doc, CompetitiveFreqNormAccumulator competitiveFreqNorms, int maxFreq,
+      int numDocs, long posFP, long payFP, int posBufferUpto, int payloadByteUpto) throws IOException {
     initSkip();
     this.curDoc = doc;
     this.curDocPointer = docOut.getFilePointer();
@@ -137,11 +167,21 @@ final class Lucene50SkipWriter extends MultiLevelSkipListWriter {
     this.curPayPointer = payFP;
     this.curPosBufferUpto = posBufferUpto;
     this.curPayloadByteUpto = payloadByteUpto;
+    if (fieldHasNorms) {
+      assert maxFreq == -1;
+      this.curCompetitiveFreqNorms[0].addAll(competitiveFreqNorms);
+    } else {
+      assert competitiveFreqNorms == null;
+      this.curMaxFreq[0] = Math.max(maxFreq, curMaxFreq[0]);
+    }
     bufferSkip(numDocs);
   }
-  
+
+  private final RAMOutputStream freqNormOut = new RAMOutputStream();
+
   @Override
   protected void writeSkipData(int level, IndexOutput skipBuffer) throws IOException {
+
     int delta = curDoc - lastSkipDoc[level];
 
     skipBuffer.writeVInt(delta);
@@ -165,5 +205,53 @@ final class Lucene50SkipWriter extends MultiLevelSkipListWriter {
         lastSkipPayPointer[level] = curPayPointer;
       }
     }
+
+    if (fieldHasNorms) {
+      CompetitiveFreqNormAccumulator competitiveFreqNorms = curCompetitiveFreqNorms[level];
+      SortedSet<FreqAndNorm> freqAndNorms = competitiveFreqNorms.getCompetitiveFreqNormPairs();
+      assert freqAndNorms.size() > 0;
+      if (level + 1 < numberOfSkipLevels) {
+        curCompetitiveFreqNorms[level + 1].addAll(competitiveFreqNorms);
+      }
+      if (level > 0) {
+        // write all competitive freq/norm pairs
+        FreqAndNorm previous = new FreqAndNorm(0, 0);
+        for (FreqAndNorm freqAndNorm : freqAndNorms) {
+          freqNormOut.writeVInt(freqAndNorm.freq - previous.freq);
+          freqNormOut.writeZLong(freqAndNorm.norm - previous.norm);
+          previous = freqAndNorm;
+        }
+        skipBuffer.writeVLong(freqNormOut.getFilePointer());
+        freqNormOut.writeTo(skipBuffer);
+        freqNormOut.reset();
+      } else {
+        // level 0 is written every 256 entries, so we need to make it compact:
+        // we just write the most competitive freq and norm. Note that this
+        // means that level 1 might store better upper bounds of the max score
+        // than level 0, so the reader needs to take that into account.
+        int bestFreq = freqAndNorms.last().freq;
+        long bestNorm = freqAndNorms.first().norm;
+        skipBuffer.writeVInt(bestFreq);
+        skipBuffer.writeZLong(bestNorm);
+      }
+      competitiveFreqNorms.clear();
+    } else {
+      if (level + 1 < numberOfSkipLevels) {
+        curMaxFreq[level + 1] = Math.max(curMaxFreq[level + 1], curMaxFreq[level]);
+      }
+      assert curMaxFreq[level] > 0 : curMaxFreq[level];
+      // make it compatible with the case that norms are enabled
+      if (level > 0) {
+        freqNormOut.writeVInt(curMaxFreq[level]);
+        freqNormOut.writeZLong(1);
+        skipBuffer.writeVLong(freqNormOut.getFilePointer());
+        freqNormOut.writeTo(skipBuffer);
+        freqNormOut.reset();
+      } else {
+        skipBuffer.writeVInt(curMaxFreq[level]);
+        skipBuffer.writeZLong(1L);
+      }
+      curMaxFreq[level] = 0;
+    }
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene70/IndexedDISI.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene70/IndexedDISI.java
index 24eaf7a..6138896 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene70/IndexedDISI.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene70/IndexedDISI.java
@@ -100,7 +100,13 @@ final class IndexedDISI extends DocIdSetIterator {
   private final long cost;
 
   IndexedDISI(IndexInput in, long offset, long length, long cost) throws IOException {
-    this.slice = in.slice("docs", offset, length);
+    this(in.slice("docs", offset, length), cost);
+  }
+
+  // This constructor allows to pass the slice directly in case it helps reuse
+  // see eg. Lucene70 norms producer's merge instance
+  IndexedDISI(IndexInput slice, long cost) throws IOException {
+    this.slice = slice;
     this.cost = cost;
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/lucene70/Lucene70NormsProducer.java b/lucene/core/src/java/org/apache/lucene/codecs/lucene70/Lucene70NormsProducer.java
index eb7c41a..c7310e8 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/lucene70/Lucene70NormsProducer.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/lucene70/Lucene70NormsProducer.java
@@ -40,11 +40,14 @@ import org.apache.lucene.util.IOUtils;
 /**
  * Reader for {@link Lucene70NormsFormat}
  */
-final class Lucene70NormsProducer extends NormsProducer {
+final class Lucene70NormsProducer extends NormsProducer implements Cloneable {
   // metadata maps (just file pointers and minimal stuff)
   private final Map<Integer,NormsEntry> norms = new HashMap<>();
-  private final IndexInput data;
   private final int maxDoc;
+  private IndexInput data;
+  private boolean merging;
+  private Map<Integer, IndexInput> disiInputs;
+  private Map<Integer, RandomAccessInput> dataInputs;
 
   Lucene70NormsProducer(SegmentReadState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException {
     maxDoc = state.segmentInfo.maxDoc();
@@ -87,6 +90,22 @@ final class Lucene70NormsProducer extends NormsProducer {
     }
   }
 
+  @Override
+  public NormsProducer getMergeInstance() throws IOException {
+    Lucene70NormsProducer clone;
+    try {
+      clone = (Lucene70NormsProducer) super.clone();
+    } catch (CloneNotSupportedException e) {
+      // cannot happen
+      throw new RuntimeException(e);
+    }
+    clone.data = data.clone();
+    clone.dataInputs = new HashMap<>();
+    clone.disiInputs = new HashMap<>();
+    clone.merging = true;
+    return clone;
+  }
+
   static class NormsEntry {
     byte bytesPerNorm;
     long docsWithFieldOffset;
@@ -193,6 +212,34 @@ final class Lucene70NormsProducer extends NormsProducer {
     }
   }
 
+  private RandomAccessInput getDataInput(FieldInfo field, NormsEntry entry) throws IOException {
+    RandomAccessInput slice = null;
+    if (merging) {
+      slice = dataInputs.get(field.number);
+    }
+    if (slice == null) {
+      slice = data.randomAccessSlice(entry.normsOffset, entry.numDocsWithField * (long) entry.bytesPerNorm);
+      if (merging) {
+        dataInputs.put(field.number, slice);
+      }
+    }
+    return slice;
+  }
+
+  private IndexInput getDisiInput(FieldInfo field, NormsEntry entry) throws IOException {
+    IndexInput slice = null;
+    if (merging) {
+      slice = disiInputs.get(field.number);
+    }
+    if (slice == null) {
+      slice = data.slice("docs", entry.docsWithFieldOffset, entry.docsWithFieldLength);
+      if (merging) {
+        disiInputs.put(field.number, slice);
+      }
+    }
+    return slice;
+  }
+
   @Override
   public NumericDocValues getNorms(FieldInfo field) throws IOException {
     final NormsEntry entry = norms.get(field.number);
@@ -209,7 +256,7 @@ final class Lucene70NormsProducer extends NormsProducer {
           }
         };
       }
-      final RandomAccessInput slice = data.randomAccessSlice(entry.normsOffset, entry.numDocsWithField * (long) entry.bytesPerNorm);
+      final RandomAccessInput slice = getDataInput(field, entry);
       switch (entry.bytesPerNorm) {
         case 1:
           return new DenseNormsIterator(maxDoc) {
@@ -245,7 +292,8 @@ final class Lucene70NormsProducer extends NormsProducer {
       }
     } else {
       // sparse
-      final IndexedDISI disi = new IndexedDISI(data, entry.docsWithFieldOffset, entry.docsWithFieldLength, entry.numDocsWithField);
+      final IndexInput disiInput = getDisiInput(field, entry);
+      final IndexedDISI disi = new IndexedDISI(disiInput, entry.numDocsWithField);
       if (entry.bytesPerNorm == 0) {
         return new SparseNormsIterator(disi) {
           @Override
diff --git a/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java b/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java
index 281b08f..36f0358 100644
--- a/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java
+++ b/lucene/core/src/java/org/apache/lucene/codecs/perfield/PerFieldPostingsFormat.java
@@ -34,6 +34,7 @@ import java.util.TreeSet;
 
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.Fields;
@@ -117,7 +118,7 @@ public abstract class PerFieldPostingsFormat extends PostingsFormat {
     }
 
     @Override
-    public void write(Fields fields) throws IOException {
+    public void write(Fields fields, NormsProducer norms) throws IOException {
       Map<PostingsFormat, FieldsGroup> formatToGroups = buildFieldsGroupMapping(fields);
 
       // Write postings
@@ -137,7 +138,7 @@ public abstract class PerFieldPostingsFormat extends PostingsFormat {
 
           FieldsConsumer consumer = format.fieldsConsumer(group.state);
           toClose.add(consumer);
-          consumer.write(maskedFields);
+          consumer.write(maskedFields, norms);
         }
         success = true;
       } finally {
@@ -148,7 +149,7 @@ public abstract class PerFieldPostingsFormat extends PostingsFormat {
     }
 
     @Override
-    public void merge(MergeState mergeState) throws IOException {
+    public void merge(MergeState mergeState, NormsProducer norms) throws IOException {
       Map<PostingsFormat, FieldsGroup> formatToGroups = buildFieldsGroupMapping(new MultiFields(mergeState.fieldsProducers, null));
 
       // Merge postings
@@ -161,7 +162,7 @@ public abstract class PerFieldPostingsFormat extends PostingsFormat {
 
           FieldsConsumer consumer = format.fieldsConsumer(group.state);
           toClose.add(consumer);
-          consumer.merge(pfMergeState.apply(group.fields));
+          consumer.merge(pfMergeState.apply(group.fields), norms);
         }
         success = true;
       } finally {
diff --git a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
index c676568..d6dd4a8 100644
--- a/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
+++ b/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java
@@ -48,6 +48,7 @@ import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.LeafFieldComparator;
 import org.apache.lucene.search.Sort;
 import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.store.AlreadyClosedException;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FSDirectory;
@@ -1598,8 +1599,53 @@ public final class CheckIndex implements Closeable {
             }
           }
         }
+
+        // Test score skipping
+        for(int idx=0;idx<3;idx++) {
+          PostingsEnum topPostings = termsEnum.topPostings(new SimScorer(field) {
+            @Override
+            public float score(float freq, long norm) throws IOException {
+              return freq;
+            }
+            @Override
+            public float maxScore(float maxFreq) {
+              return maxFreq;
+            }
+          });
+          int minFreq = 0;
+          topPostings.setMinCompetitiveScore(minFreq);
+          postings = termsEnum.postings(postings, PostingsEnum.FREQS);
+          while (topPostings.docID() != DocIdSetIterator.NO_MORE_DOCS) {
+            int doc = topPostings.docID();
+            if (((1 + doc + idx) & 0x07) == 0) {
+              minFreq++;
+              topPostings.setMinCompetitiveScore(minFreq);
+            }
+            if ((doc + idx) % 2 == 0) {
+              topPostings.nextDoc();
+              postings.nextDoc();
+            } else {
+              int delta = Math.min(1 + ((doc + idx) & 0x0fff), DocIdSetIterator.NO_MORE_DOCS - doc);
+              int target = topPostings.docID() + delta;
+              topPostings.advance(target);
+              postings.advance(target);
+            }
+            if (minFreq <= 1 && postings.docID() != topPostings.docID()) {
+              throw new RuntimeException("Score skipping failed to find " + postings.docID() + " even though it is in the postings");
+            }
+            while (postings.docID() < topPostings.docID()) {
+              if (postings.freq() >= minFreq) {
+                throw new RuntimeException("Score skipping skipped document " + postings.docID() + " even though its freq=" + postings.freq() + " >= minFreq=" + minFreq);
+              }
+              postings.nextDoc();
+            }
+            if (postings.docID() != topPostings.docID()) {
+              throw new RuntimeException("Score skipping found a match on document " + topPostings.docID() + " which is not in the postings");
+            }
+          }
+        }
       }
-      
+
       if (minTerm != null && status.termCount + status.delTermCount == 0) {
         throw new RuntimeException("field=\"" + field + "\": minTerm is non-null yet we saw no terms: " + minTerm);
       }
diff --git a/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java b/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
index fd24105..efa34f1 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DefaultIndexingChain.java
@@ -31,6 +31,7 @@ import org.apache.lucene.codecs.DocValuesConsumer;
 import org.apache.lucene.codecs.DocValuesFormat;
 import org.apache.lucene.codecs.NormsConsumer;
 import org.apache.lucene.codecs.NormsFormat;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PointsFormat;
 import org.apache.lucene.codecs.PointsWriter;
 import org.apache.lucene.document.FieldType;
@@ -126,6 +127,7 @@ final class DefaultIndexingChain extends DocConsumer {
     if (docState.infoStream.isEnabled("IW")) {
       docState.infoStream.message("IW", ((System.nanoTime()-t0)/1000000) + " msec to write norms");
     }
+    SegmentReadState readState = new SegmentReadState(state.directory, state.segmentInfo, state.fieldInfos, IOContext.READ, state.segmentSuffix);
     
     t0 = System.nanoTime();
     writeDocValues(state, sortMap);
@@ -159,7 +161,16 @@ final class DefaultIndexingChain extends DocConsumer {
       }
     }
 
-    termsHash.flush(fieldsToFlush, state, sortMap);
+    try (NormsProducer norms = readState.fieldInfos.hasNorms()
+        ? state.segmentInfo.getCodec().normsFormat().normsProducer(readState)
+        : null) {
+      NormsProducer normsMergeInstance = null;
+      if (norms != null) {
+        // Use the merge instance in order to reuse the same IndexInput for all terms
+        normsMergeInstance = norms.getMergeInstance();
+      }
+      termsHash.flush(fieldsToFlush, state, sortMap, normsMergeInstance);
+    }
     if (docState.infoStream.isEnabled("IW")) {
       docState.infoStream.message("IW", ((System.nanoTime()-t0)/1000000) + " msec to write postings and finish vectors");
     }
diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
index d953f8d..ac70669 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriter.java
@@ -24,6 +24,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.lucene.codecs.FieldsConsumer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.util.CollectionUtil;
 import org.apache.lucene.util.IOUtils;
 
@@ -78,8 +79,9 @@ final class FreqProxTermsWriter extends TermsHash {
   }
 
   @Override
-  public void flush(Map<String,TermsHashPerField> fieldsToFlush, final SegmentWriteState state, Sorter.DocMap sortMap) throws IOException {
-    super.flush(fieldsToFlush, state, sortMap);
+  public void flush(Map<String,TermsHashPerField> fieldsToFlush, final SegmentWriteState state,
+      Sorter.DocMap sortMap, NormsProducer norms) throws IOException {
+    super.flush(fieldsToFlush, state, sortMap, norms);
 
     // Gather all fields that saw any postings:
     List<FreqProxTermsWriterPerField> allFields = new ArrayList<>();
@@ -105,7 +107,7 @@ final class FreqProxTermsWriter extends TermsHash {
     FieldsConsumer consumer = state.segmentInfo.getCodec().postingsFormat().fieldsConsumer(state);
     boolean success = false;
     try {
-      consumer.write(fields);
+      consumer.write(fields, norms);
       success = true;
     } finally {
       if (success) {
diff --git a/lucene/core/src/java/org/apache/lucene/index/PostingsEnum.java b/lucene/core/src/java/org/apache/lucene/index/PostingsEnum.java
index fdd32a9..d51ca40 100644
--- a/lucene/core/src/java/org/apache/lucene/index/PostingsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/PostingsEnum.java
@@ -20,6 +20,7 @@ package org.apache.lucene.index;
 import java.io.IOException;
 
 import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.Scorer;
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.BytesRef;
 
@@ -108,4 +109,12 @@ public abstract class PostingsEnum extends DocIdSetIterator {
    *  in the byte[]). */
   public abstract BytesRef getPayload() throws IOException;
 
+  /**
+   * Set the minimum competitive score. This gives this postings enum the
+   * ability to skip over documents that would produce a lower score.
+   * The default implementation does nothing.
+   * @see TermsEnum#topPostings
+   * @see Scorer#setMinCompetitiveScore
+   */
+  public void setMinCompetitiveScore(float minScore) {}
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java b/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
index c67b92d..ad60a94 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SegmentMerger.java
@@ -24,6 +24,7 @@ import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.DocValuesConsumer;
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.NormsConsumer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PointsWriter;
 import org.apache.lucene.codecs.StoredFieldsWriter;
 import org.apache.lucene.codecs.TermVectorsWriter;
@@ -109,10 +110,33 @@ final class SegmentMerger {
 
     final SegmentWriteState segmentWriteState = new SegmentWriteState(mergeState.infoStream, directory, mergeState.segmentInfo,
                                                                       mergeState.mergeFieldInfos, null, context);
+    final SegmentReadState segmentReadState = new SegmentReadState(directory, mergeState.segmentInfo, mergeState.mergeFieldInfos,
+                                                                   IOContext.READ, segmentWriteState.segmentSuffix);
+
+    if (mergeState.mergeFieldInfos.hasNorms()) {
+      if (mergeState.infoStream.isEnabled("SM")) {
+        t0 = System.nanoTime();
+      }
+      mergeNorms(segmentWriteState);
+      if (mergeState.infoStream.isEnabled("SM")) {
+        long t1 = System.nanoTime();
+        mergeState.infoStream.message("SM", ((t1-t0)/1000000) + " msec to merge norms [" + numMerged + " docs]");
+      }
+    }
+
     if (mergeState.infoStream.isEnabled("SM")) {
       t0 = System.nanoTime();
     }
-    mergeTerms(segmentWriteState);
+    try (NormsProducer norms = mergeState.mergeFieldInfos.hasNorms()
+        ? codec.normsFormat().normsProducer(segmentReadState)
+        : null) {
+      NormsProducer normsMergeInstance = null;
+      if (norms != null) {
+        // Use the merge instance in order to reuse the same IndexInput for all terms
+        normsMergeInstance = norms.getMergeInstance();
+      }
+      mergeTerms(segmentWriteState, normsMergeInstance);
+    }
     if (mergeState.infoStream.isEnabled("SM")) {
       long t1 = System.nanoTime();
       mergeState.infoStream.message("SM", ((t1-t0)/1000000) + " msec to merge postings [" + numMerged + " docs]");
@@ -139,17 +163,6 @@ final class SegmentMerger {
       long t1 = System.nanoTime();
       mergeState.infoStream.message("SM", ((t1-t0)/1000000) + " msec to merge points [" + numMerged + " docs]");
     }
-    
-    if (mergeState.mergeFieldInfos.hasNorms()) {
-      if (mergeState.infoStream.isEnabled("SM")) {
-        t0 = System.nanoTime();
-      }
-      mergeNorms(segmentWriteState);
-      if (mergeState.infoStream.isEnabled("SM")) {
-        long t1 = System.nanoTime();
-        mergeState.infoStream.message("SM", ((t1-t0)/1000000) + " msec to merge norms [" + numMerged + " docs]");
-      }
-    }
 
     if (mergeState.mergeFieldInfos.hasVectors()) {
       if (mergeState.infoStream.isEnabled("SM")) {
@@ -225,9 +238,9 @@ final class SegmentMerger {
     }
   }
 
-  private void mergeTerms(SegmentWriteState segmentWriteState) throws IOException {
+  private void mergeTerms(SegmentWriteState segmentWriteState, NormsProducer norms) throws IOException {
     try (FieldsConsumer consumer = codec.postingsFormat().fieldsConsumer(segmentWriteState)) {
-      consumer.merge(mergeState);
+      consumer.merge(mergeState, norms);
     }
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/SortingTermVectorsConsumer.java b/lucene/core/src/java/org/apache/lucene/index/SortingTermVectorsConsumer.java
index dff808e..054ca50 100644
--- a/lucene/core/src/java/org/apache/lucene/index/SortingTermVectorsConsumer.java
+++ b/lucene/core/src/java/org/apache/lucene/index/SortingTermVectorsConsumer.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.util.Iterator;
 import java.util.Map;
 
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.TermVectorsReader;
 import org.apache.lucene.codecs.TermVectorsWriter;
 import org.apache.lucene.search.DocIdSetIterator;
@@ -37,8 +38,8 @@ final class SortingTermVectorsConsumer extends TermVectorsConsumer {
   }
 
   @Override
-  void flush(Map<String, TermsHashPerField> fieldsToFlush, final SegmentWriteState state, Sorter.DocMap sortMap) throws IOException {
-    super.flush(fieldsToFlush, state, sortMap);
+  void flush(Map<String, TermsHashPerField> fieldsToFlush, final SegmentWriteState state, Sorter.DocMap sortMap, NormsProducer norms) throws IOException {
+    super.flush(fieldsToFlush, state, sortMap, norms);
     if (tmpDirectory != null) {
       if (sortMap == null) {
         // we're lucky the index is already sorted, just rename the temporary file and return
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumer.java b/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumer.java
index 46dc63c..1ac20dd 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumer.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermVectorsConsumer.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.util.Arrays;
 import java.util.Map;
 
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.TermVectorsWriter;
 import org.apache.lucene.store.FlushInfo;
 import org.apache.lucene.store.IOContext;
@@ -53,7 +54,7 @@ class TermVectorsConsumer extends TermsHash {
   }
 
   @Override
-  void flush(Map<String, TermsHashPerField> fieldsToFlush, final SegmentWriteState state, Sorter.DocMap sortMap) throws IOException {
+  void flush(Map<String, TermsHashPerField> fieldsToFlush, final SegmentWriteState state, Sorter.DocMap sortMap, NormsProducer norms) throws IOException {
     if (writer != null) {
       int numDocs = state.segmentInfo.maxDoc();
       assert numDocs > 0;
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermsEnum.java b/lucene/core/src/java/org/apache/lucene/index/TermsEnum.java
index 4b5755a..4cd3de3 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermsEnum.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermsEnum.java
@@ -19,6 +19,8 @@ package org.apache.lucene.index;
 
 import java.io.IOException;
 
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.util.AttributeSource;
 import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.BytesRefIterator;
@@ -171,6 +173,21 @@ public abstract class TermsEnum implements BytesRefIterator {
   public abstract PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException;
 
   /**
+   * Return a {@link PostingsEnum} that optionally knows how to skip over
+   * documents that do not meet a
+   * {@link PostingsEnum#setMinCompetitiveScore minimum score requirement}.
+   * The returned {@link PostingsEnum} supports
+   * {@link PostingsEnum#FREQS frequencies}.
+   * The default implementation falls back to {@link
+   * #postings(PostingsEnum, int) postings(null, PostingsEnum.FREQS} meaning
+   * it doesn't support skipping over low-scoring documents.
+   * @see Scorer#setMinCompetitiveScore(float)
+   */
+  public PostingsEnum topPostings(SimScorer scorer) throws IOException {
+    return postings(null, PostingsEnum.FREQS);
+  }
+  
+  /**
    * Expert: Returns the TermsEnums internal state to position the TermsEnum
    * without re-seeking the term dictionary.
    * <p>
@@ -228,7 +245,12 @@ public abstract class TermsEnum implements BytesRefIterator {
     public PostingsEnum postings(PostingsEnum reuse, int flags) {
       throw new IllegalStateException("this method should never be called");
     }
-      
+
+    @Override
+    public PostingsEnum topPostings(SimScorer scorer) throws IOException {
+      throw new IllegalStateException("this method should never be called");
+    }
+
     @Override
     public BytesRef next() {
       return null;
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermsHash.java b/lucene/core/src/java/org/apache/lucene/index/TermsHash.java
index bede2f8..f420aca 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermsHash.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermsHash.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.util.ByteBlockPool;
 import org.apache.lucene.util.Counter;
 import org.apache.lucene.util.IntBlockPool;
@@ -76,13 +77,14 @@ abstract class TermsHash {
     bytePool.reset(false, false);
   }
 
-  void flush(Map<String,TermsHashPerField> fieldsToFlush, final SegmentWriteState state, Sorter.DocMap sortMap) throws IOException {
+  void flush(Map<String,TermsHashPerField> fieldsToFlush, final SegmentWriteState state,
+      Sorter.DocMap sortMap, NormsProducer norms) throws IOException {
     if (nextTermsHash != null) {
       Map<String,TermsHashPerField> nextChildFields = new HashMap<>();
       for (final Map.Entry<String,TermsHashPerField> entry : fieldsToFlush.entrySet()) {
         nextChildFields.put(entry.getKey(), entry.getValue().nextPerField);
       }
-      nextTermsHash.flush(nextChildFields, state, sortMap);
+      nextTermsHash.flush(nextChildFields, state, sortMap, norms);
     }
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermQuery.java b/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
index 3fa465d..2c08f4f 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermQuery.java
@@ -47,21 +47,21 @@ public class TermQuery extends Query {
     private final Similarity similarity;
     private final Similarity.SimScorer simScorer;
     private final TermContext termStates;
-    private final boolean needsScores;
+    private final ScoreMode scoreMode;
 
-    public TermWeight(IndexSearcher searcher, boolean needsScores,
+    public TermWeight(IndexSearcher searcher, ScoreMode scoreMode,
         float boost, TermContext termStates) throws IOException {
       super(TermQuery.this);
-      if (needsScores && termStates == null) {
+      if (scoreMode.needsScores() && termStates == null) {
         throw new IllegalStateException("termStates are required when scores are needed");
       }
-      this.needsScores = needsScores;
+      this.scoreMode = scoreMode;
       this.termStates = termStates;
       this.similarity = searcher.getSimilarity();
 
       final CollectionStatistics collectionStats;
       final TermStatistics termStats;
-      if (needsScores) {
+      if (scoreMode.needsScores()) {
         collectionStats = searcher.collectionStatistics(term.field());
         termStats = searcher.termStatistics(term, termStates);
       } else {
@@ -98,10 +98,15 @@ public class TermQuery extends Query {
           .getFieldInfos()
           .fieldInfo(getTerm().field())
           .getIndexOptions();
-      PostingsEnum docs = termsEnum.postings(null, needsScores ? PostingsEnum.FREQS : PostingsEnum.NONE);
+      PostingsEnum docs;
+      if (scoreMode == ScoreMode.TOP_SCORES) {
+        docs = termsEnum.topPostings(simScorer);
+      } else {
+        docs = termsEnum.postings(null, scoreMode.needsScores() ? PostingsEnum.FREQS : PostingsEnum.NONE);
+      }
       assert docs != null;
       float maxFreq = getMaxFreq(indexOptions, termsEnum.totalTermFreq(), termsEnum.docFreq());
-      return new TermScorer(this, docs, new LeafSimScorer(simScorer, context.reader(), needsScores, maxFreq));
+      return new TermScorer(this, docs, new LeafSimScorer(simScorer, context.reader(), scoreMode.needsScores(), maxFreq));
     }
 
     private long getMaxFreq(IndexOptions indexOptions, long ttf, long df) {
@@ -220,7 +225,7 @@ public class TermQuery extends Query {
       termState = this.perReaderTermState;
     }
 
-    return new TermWeight(searcher, scoreMode.needsScores(), boost, termState);
+    return new TermWeight(searcher, scoreMode, boost, termState);
   }
 
   /** Prints a user-readable version of this query. */
diff --git a/lucene/core/src/java/org/apache/lucene/search/TermScorer.java b/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
index 653a60e..f9c8a07 100644
--- a/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/TermScorer.java
@@ -59,6 +59,11 @@ final class TermScorer extends Scorer {
   }
 
   @Override
+  public void setMinCompetitiveScore(float minScore) {
+    postingsEnum.setMinCompetitiveScore(minScore);
+  }
+
+  @Override
   public float score() throws IOException {
     assert docID() != DocIdSetIterator.NO_MORE_DOCS;
     return docScorer.score(postingsEnum.docID(), postingsEnum.freq());
diff --git a/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java b/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
index a2ebe4a..2226f65 100644
--- a/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
+++ b/lucene/core/src/java/org/apache/lucene/search/similarities/Similarity.java
@@ -148,7 +148,17 @@ public abstract class Similarity {
     }
 
     /**
-     * Score a single document
+     * Score a single document. {@code freq} must be positive.
+     * <p>
+     * Score must not decrease when freq increases, ie. if {@code freq1 > freq2},
+     * then {@code score(freq1, norm) >= score(freq2, norm)} for any value of
+     * {@code norm} that may be produced by
+     * {@link Similarity#computeNorm(FieldInvertState)}.
+     * <p>
+     * Score must not increase when the unsigned norm increases, ie. if
+     * {@code Long.compareUnsigned(norm1, norm2) > 0} then
+     * {@code score(freq, norm1) <= score(freq, norm2)} for any legal
+     * {@code freq}.
      * @param freq sloppy term frequency
      * @param norm encoded normalization factor, as returned by {@link Similarity#computeNorm}, or {@code 1} if norms are disabled
      * @return document's score
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/TestCompetitiveFreqNormAccumulator.java b/lucene/core/src/test/org/apache/lucene/codecs/TestCompetitiveFreqNormAccumulator.java
new file mode 100644
index 0000000..a083f06
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/codecs/TestCompetitiveFreqNormAccumulator.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.codecs;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.lucene.codecs.CompetitiveFreqNormAccumulator.FreqAndNorm;
+import org.apache.lucene.util.LuceneTestCase;
+
+public class TestCompetitiveFreqNormAccumulator extends LuceneTestCase {
+
+  public void testBasics() {
+    CompetitiveFreqNormAccumulator acc = new CompetitiveFreqNormAccumulator();
+    Set<FreqAndNorm> expected = new HashSet<>();
+
+    acc.add(3, 5);
+    expected.add(new FreqAndNorm(3, 5));
+    assertEquals(expected, acc.getCompetitiveFreqNormPairs());
+
+    acc.add(6, 11);
+    expected.add(new FreqAndNorm(6, 11));
+    assertEquals(expected, acc.getCompetitiveFreqNormPairs());
+
+    acc.add(10, 13);
+    expected.add(new FreqAndNorm(10, 13));
+    assertEquals(expected, acc.getCompetitiveFreqNormPairs());
+    
+    acc.add(1, 2);
+    expected.add(new FreqAndNorm(1, 2));
+    assertEquals(expected, acc.getCompetitiveFreqNormPairs());
+
+    acc.add(7, 9);
+    expected.remove(new FreqAndNorm(6, 11));
+    expected.add(new FreqAndNorm(7, 9));
+    assertEquals(expected, acc.getCompetitiveFreqNormPairs());
+
+    acc.add(8, 2);
+    expected.clear();
+    expected.add(new FreqAndNorm(10, 13));
+    expected.add(new FreqAndNorm(8, 2));
+    assertEquals(expected, acc.getCompetitiveFreqNormPairs());
+  }
+
+  public void testOmitFreqs() {
+    CompetitiveFreqNormAccumulator acc = new CompetitiveFreqNormAccumulator();
+
+    acc.add(1, 5);
+    acc.add(1, 7);
+    acc.add(1, 4);
+
+    assertEquals(Collections.singleton(new FreqAndNorm(1, 4)), acc.getCompetitiveFreqNormPairs());
+  }
+
+}
diff --git a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
index 804f507..84544bc 100644
--- a/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
+++ b/lucene/core/src/test/org/apache/lucene/codecs/perfield/TestPerFieldPostingsFormat2.java
@@ -28,6 +28,7 @@ import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.asserting.AssertingCodec;
 import org.apache.lucene.codecs.blockterms.LuceneVarGapFixedInterval;
@@ -407,17 +408,17 @@ public class TestPerFieldPostingsFormat2 extends LuceneTestCase {
       final FieldsConsumer consumer = delegate.fieldsConsumer(state);
       return new FieldsConsumer() {
         @Override
-        public void write(Fields fields) throws IOException {
-          consumer.write(fields);
+        public void write(Fields fields, NormsProducer norms) throws IOException {
+          consumer.write(fields, norms);
         }
 
         @Override
-        public void merge(MergeState mergeState) throws IOException {
+        public void merge(MergeState mergeState, NormsProducer norms) throws IOException {
           nbMergeCalls++;
           for (FieldInfo fi : mergeState.mergeFieldInfos) {
             fieldNames.add(fi.name);
           }
-          consumer.merge(mergeState);
+          consumer.merge(mergeState, norms);
         }
 
         @Override
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
index 4625f73..5a71d05 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestCodecs.java
@@ -17,6 +17,7 @@
 package org.apache.lucene.index;
 
 
+import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
@@ -28,6 +29,7 @@ import org.apache.lucene.analysis.MockAnalyzer;
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field.Store;
 import org.apache.lucene.document.StringField;
@@ -752,9 +754,65 @@ public class TestCodecs extends LuceneTestCase {
 
     Arrays.sort(fields);
     FieldsConsumer consumer = codec.postingsFormat().fieldsConsumer(state);
+    NormsProducer fakeNorms = new NormsProducer() {
+      
+      @Override
+      public long ramBytesUsed() {
+        return 0;
+      }
+      
+      @Override
+      public void close() throws IOException {}
+      
+      @Override
+      public NumericDocValues getNorms(FieldInfo field) throws IOException {
+        return new NumericDocValues() {
+          
+          int doc = -1;
+          
+          @Override
+          public int nextDoc() throws IOException {
+            return advance(doc + 1);
+          }
+          
+          @Override
+          public int docID() {
+            return doc;
+          }
+          
+          @Override
+          public long cost() {
+            return si.maxDoc();
+          }
+          
+          @Override
+          public int advance(int target) throws IOException {
+            if (target >= si.maxDoc()) {
+              return doc = NO_MORE_DOCS;
+            } else {
+              return doc = target;
+            }
+          }
+          
+          @Override
+          public boolean advanceExact(int target) throws IOException {
+            doc = target;
+            return true;
+          }
+          
+          @Override
+          public long longValue() throws IOException {
+            return 1;
+          }
+        };
+      }
+      
+      @Override
+      public void checkIntegrity() throws IOException {}
+    };
     boolean success = false;
     try {
-      consumer.write(new DataFields(fields));
+      consumer.write(new DataFields(fields), fakeNorms);
       success = true;
     } finally {
       if (success) {
diff --git a/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionScoreQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionScoreQuery.java
index 09a592b..d264267 100644
--- a/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionScoreQuery.java
+++ b/lucene/queries/src/java/org/apache/lucene/queries/function/FunctionScoreQuery.java
@@ -63,38 +63,6 @@ public final class FunctionScoreQuery extends Query {
     return in;
   }
 
-  /**
-   * Returns a FunctionScoreQuery where the scores of a wrapped query are multiplied by
-   * the value of a DoubleValuesSource.
-   *
-   * If the source has no value for a particular document, the score for that document
-   * is preserved as-is.
-   *
-   * @param in    the query to boost
-   * @param boost a {@link DoubleValuesSource} containing the boost values
-   */
-  public static FunctionScoreQuery boostByValue(Query in, DoubleValuesSource boost) {
-    return new FunctionScoreQuery(in, new MultiplicativeBoostValuesSource(boost));
-  }
-
-  /**
-   * Returns a FunctionScoreQuery where the scores of a wrapped query are multiplied by
-   * a boost factor if the document being scored also matches a separate boosting query.
-   *
-   * Documents that do not match the boosting query have their scores preserved.
-   *
-   * This may be used to 'demote' documents that match the boosting query, by passing in
-   * a boostValue between 0 and 1.
-   *
-   * @param in          the query to boost
-   * @param boostMatch  the boosting query
-   * @param boostValue  the amount to boost documents which match the boosting query
-   */
-  public static FunctionScoreQuery boostByQuery(Query in, Query boostMatch, float boostValue) {
-    return new FunctionScoreQuery(in,
-        new MultiplicativeBoostValuesSource(new QueryBoostValuesSource(DoubleValuesSource.fromQuery(boostMatch), boostValue)));
-  }
-
   @Override
   public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
     Weight inner = in.createWeight(searcher, scoreMode.needsScores() && source.needsScores() ? scoreMode : ScoreMode.COMPLETE_NO_SCORES, 1f);
@@ -221,123 +189,4 @@ public final class FunctionScoreQuery extends Query {
     }
 
   }
-
-  private static class MultiplicativeBoostValuesSource extends DoubleValuesSource {
-
-    private final DoubleValuesSource boost;
-
-    private MultiplicativeBoostValuesSource(DoubleValuesSource boost) {
-      this.boost = boost;
-    }
-
-    @Override
-    public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
-      DoubleValues in = DoubleValues.withDefault(boost.getValues(ctx, scores), 1);
-      return new DoubleValues() {
-        @Override
-        public double doubleValue() throws IOException {
-          return scores.doubleValue() * in.doubleValue();
-        }
-
-        @Override
-        public boolean advanceExact(int doc) throws IOException {
-          return in.advanceExact(doc);
-        }
-      };
-    }
-
-    @Override
-    public boolean needsScores() {
-      return true;
-    }
-
-    @Override
-    public DoubleValuesSource rewrite(IndexSearcher reader) throws IOException {
-      return new MultiplicativeBoostValuesSource(boost.rewrite(reader));
-    }
-
-    @Override
-    public boolean equals(Object o) {
-      if (this == o) return true;
-      if (o == null || getClass() != o.getClass()) return false;
-      MultiplicativeBoostValuesSource that = (MultiplicativeBoostValuesSource) o;
-      return Objects.equals(boost, that.boost);
-    }
-
-    @Override
-    public int hashCode() {
-      return Objects.hash(boost);
-    }
-
-    @Override
-    public String toString() {
-      return "boost(" + boost.toString() + ")";
-    }
-
-    @Override
-    public boolean isCacheable(LeafReaderContext ctx) {
-      return boost.isCacheable(ctx);
-    }
-  }
-
-  private static class QueryBoostValuesSource extends DoubleValuesSource {
-
-    private final DoubleValuesSource query;
-    private final float boost;
-
-    QueryBoostValuesSource(DoubleValuesSource query, float boost) {
-      this.query = query;
-      this.boost = boost;
-    }
-
-    @Override
-    public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
-      DoubleValues in = query.getValues(ctx, null);
-      return DoubleValues.withDefault(new DoubleValues() {
-        @Override
-        public double doubleValue() {
-          return boost;
-        }
-
-        @Override
-        public boolean advanceExact(int doc) throws IOException {
-          return in.advanceExact(doc);
-        }
-      }, 1);
-    }
-
-    @Override
-    public boolean needsScores() {
-      return false;
-    }
-
-    @Override
-    public DoubleValuesSource rewrite(IndexSearcher reader) throws IOException {
-      return new QueryBoostValuesSource(query.rewrite(reader), boost);
-    }
-
-    @Override
-    public boolean equals(Object o) {
-      if (this == o) return true;
-      if (o == null || getClass() != o.getClass()) return false;
-      QueryBoostValuesSource that = (QueryBoostValuesSource) o;
-      return Float.compare(that.boost, boost) == 0 &&
-          Objects.equals(query, that.query);
-    }
-
-    @Override
-    public int hashCode() {
-      return Objects.hash(query, boost);
-    }
-
-    @Override
-    public String toString() {
-      return "queryboost(" + query + ")^" + boost;
-    }
-
-    @Override
-    public boolean isCacheable(LeafReaderContext ctx) {
-      return query.isCacheable(ctx);
-    }
-  }
 }
diff --git a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionScoreQuery.java b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionScoreQuery.java
index 4e82eb2..c0560bc 100644
--- a/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionScoreQuery.java
+++ b/lucene/queries/src/test/org/apache/lucene/queries/function/TestFunctionScoreQuery.java
@@ -80,13 +80,18 @@ public class TestFunctionScoreQuery extends FunctionTestSetup {
   // CustomScoreQuery and BoostedQuery equivalent
   public void testScoreModifyingSource() throws Exception {
 
+    SimpleBindings bindings = new SimpleBindings();
+    bindings.add("score", DoubleValuesSource.SCORES);
+    bindings.add("iii", DoubleValuesSource.fromIntField("iii"));
+    Expression expr = JavascriptCompiler.compile("score * iii");
+
     BooleanQuery bq = new BooleanQuery.Builder()
         .add(new TermQuery(new Term(TEXT_FIELD, "first")), BooleanClause.Occur.SHOULD)
         .add(new TermQuery(new Term(TEXT_FIELD, "text")), BooleanClause.Occur.SHOULD)
         .build();
     TopDocs plain = searcher.search(bq, 1);
 
-    FunctionScoreQuery fq = FunctionScoreQuery.boostByValue(bq, DoubleValuesSource.fromIntField("iii"));
+    FunctionScoreQuery fq = new FunctionScoreQuery(bq, expr.getDoubleValuesSource(bindings));
 
     QueryUtils.check(random(), fq, searcher, rarely());
 
@@ -103,16 +108,20 @@ public class TestFunctionScoreQuery extends FunctionTestSetup {
   // BoostingQuery equivalent
   public void testCombiningMultipleQueryScores() throws Exception {
 
+    SimpleBindings bindings = new SimpleBindings();
+    bindings.add("score", DoubleValuesSource.SCORES);
+    bindings.add("testquery", DoubleValuesSource.fromQuery(new TermQuery(new Term(TEXT_FIELD, "rechecking"))));
+    Expression expr = JavascriptCompiler.compile("score + (testquery * 100)");
+
     TermQuery q = new TermQuery(new Term(TEXT_FIELD, "text"));
     TopDocs plain = searcher.search(q, 1);
 
-    FunctionScoreQuery fq
-        = FunctionScoreQuery.boostByQuery(q, new TermQuery(new Term(TEXT_FIELD, "rechecking")), 100f);
+    FunctionScoreQuery fq = new FunctionScoreQuery(q, expr.getDoubleValuesSource(bindings));
 
     QueryUtils.check(random(), fq, searcher, rarely());
 
-    int[] expectedDocs = new int[]{ 6, 1, 0, 2, 8 };
-    TopDocs docs = searcher.search(fq, 20);
+    int[] expectedDocs = new int[]{  6, 1, 0, 2, 8 };
+    TopDocs docs = searcher.search(fq, 5);
     assertEquals(plain.totalHits, docs.totalHits);
     for (int i = 0; i < expectedDocs.length; i++) {
       assertEquals(expectedDocs[i], docs.scoreDocs[i].doc);
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsWriter.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsWriter.java
index fc643d2..30e1980 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsWriter.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/IDVersionPostingsWriter.java
@@ -23,6 +23,7 @@ import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.PushPostingsWriterBase;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.IndexOptions;
+import org.apache.lucene.index.NumericDocValues;
 import org.apache.lucene.index.SegmentWriteState;
 import org.apache.lucene.store.DataOutput;
 import org.apache.lucene.store.IndexOutput;
@@ -78,7 +79,7 @@ final class IDVersionPostingsWriter extends PushPostingsWriterBase {
   }
 
   @Override
-  public void startTerm() {
+  public void startTerm(NumericDocValues norms) {
     lastDocID = -1;
   }
 
diff --git a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionBlockTreeTermsWriter.java b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionBlockTreeTermsWriter.java
index e9187af..d8abc6a 100644
--- a/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionBlockTreeTermsWriter.java
+++ b/lucene/sandbox/src/java/org/apache/lucene/codecs/idversion/VersionBlockTreeTermsWriter.java
@@ -23,6 +23,7 @@ import java.util.List;
 import org.apache.lucene.codecs.BlockTermState;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.FieldsConsumer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PostingsWriterBase;
 import org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter;
 import org.apache.lucene.index.FieldInfo;
@@ -221,7 +222,7 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer {
   }
 
   @Override
-  public void write(Fields fields) throws IOException {
+  public void write(Fields fields, NormsProducer norms) throws IOException {
 
     String lastField = null;
     for(String field : fields) {
@@ -730,7 +731,7 @@ public final class VersionBlockTreeTermsWriter extends FieldsConsumer {
     
     /** Writes one term's worth of postings. */
     public void write(BytesRef text, TermsEnum termsEnum) throws IOException {
-      BlockTermState state = postingsWriter.writeTerm(text, termsEnum, docsSeen);
+      BlockTermState state = postingsWriter.writeTerm(text, termsEnum, docsSeen, null);
       // TODO: LUCENE-5693: we don't need this check if we fix IW to not send deleted docs to us on flush:
       if (state != null && ((IDVersionPostingsWriter) postingsWriter).lastDocID != -1) {
         assert state.docFreq != 0;
diff --git a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsConsumer.java b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsConsumer.java
index 9df9d60..5ab16a9 100644
--- a/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsConsumer.java
+++ b/lucene/suggest/src/java/org/apache/lucene/search/suggest/document/CompletionFieldsConsumer.java
@@ -22,6 +22,7 @@ import java.util.Map;
 
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.FieldsConsumer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.index.FieldInfo;
 import org.apache.lucene.index.Fields;
@@ -44,7 +45,7 @@ import static org.apache.lucene.search.suggest.document.CompletionPostingsFormat
 
 /**
  * <p>
- * Weighted FSTs for any indexed {@link SuggestField} is built on {@link #write(Fields)}.
+ * Weighted FSTs for any indexed {@link SuggestField} is built on {@link #write(Fields,NormsProducer)}.
  * A weighted FST maps the analyzed forms of a field to its
  * surface form and document id. FSTs are stored in the CompletionDictionary (.lkp).
  * </p>
@@ -80,8 +81,8 @@ final class CompletionFieldsConsumer extends FieldsConsumer {
   }
 
   @Override
-  public void write(Fields fields) throws IOException {
-    delegateFieldsConsumer.write(fields);
+  public void write(Fields fields, NormsProducer norms) throws IOException {
+    delegateFieldsConsumer.write(fields, norms);
 
     for (String field : fields) {
       CompletionTermWriter termWriter = new CompletionTermWriter();
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
index a89b508..e71903d 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/asserting/AssertingPostingsFormat.java
@@ -22,6 +22,7 @@ import java.util.Iterator;
 
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.index.AssertingLeafReader;
 import org.apache.lucene.index.FieldInfo;
@@ -133,8 +134,8 @@ public final class AssertingPostingsFormat extends PostingsFormat {
     }
     
     @Override
-    public void write(Fields fields) throws IOException {
-      in.write(fields);
+    public void write(Fields fields, NormsProducer norms) throws IOException {
+      in.write(fields, norms);
 
       // TODO: more asserts?  can we somehow run a
       // "limited" CheckIndex here???  Or ... can we improve
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPostingsFormat.java
index 2ca1bc7..00e168b 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/cranky/CrankyPostingsFormat.java
@@ -21,6 +21,7 @@ import java.util.Random;
 
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.index.Fields;
 import org.apache.lucene.index.SegmentReadState;
@@ -61,11 +62,11 @@ class CrankyPostingsFormat extends PostingsFormat {
     }
     
     @Override
-    public void write(Fields fields) throws IOException {
+    public void write(Fields fields, NormsProducer norms) throws IOException {
       if (random.nextInt(100) == 0) {
         throw new IOException("Fake IOException from FieldsConsumer.write()");
       }  
-      delegate.write(fields);
+      delegate.write(fields, norms);
     }
 
     @Override
diff --git a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
index 4b85f13..24c94a3 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/codecs/ramonly/RAMOnlyPostingsFormat.java
@@ -31,6 +31,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.lucene.codecs.CodecUtil;
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.codecs.TermStats;
 import org.apache.lucene.index.PostingsEnum;
@@ -227,7 +228,7 @@ public final class RAMOnlyPostingsFormat extends PostingsFormat {
     }
 
     @Override
-    public void write(Fields fields) throws IOException {
+    public void write(Fields fields, NormsProducer norms) throws IOException {
       for(String field : fields) {
 
         Terms terms = fields.terms(field);
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java
index c87697b..22b3625 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/AssertingLeafReader.java
@@ -335,6 +335,7 @@ public class AssertingLeafReader extends FilterLeafReader {
     int positionCount = 0;
     int positionMax = 0;
     private int doc;
+    private float minScore;
 
     public AssertingPostingsEnum(PostingsEnum in) {
       super(in);
@@ -431,11 +432,18 @@ public class AssertingLeafReader extends FilterLeafReader {
       assert payload == null || payload.length > 0 : "getPayload() returned payload with invalid length!";
       return payload;
     }
-    
+
+    @Override
+    public void setMinCompetitiveScore(float minScore) {
+      assert minScore >= this.minScore : "decreasing min competitive score: " + this.minScore + " -> " + minScore;
+      this.minScore = minScore;
+    }
+
     void reset() {
       state = DocsEnumState.START;
       doc = in.docID();
       positionCount = positionMax = 0;
+      minScore = 0;
     }
   }
 
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
index ab92946..f5b5223 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BaseIndexFileFormatTestCase.java
@@ -334,8 +334,30 @@ abstract class BaseIndexFileFormatTestCase extends LuceneTestCase {
     SegmentReadState readState = new SegmentReadState(dir, segmentInfo, fieldInfos, IOContext.READ);
 
     // PostingsFormat
+    NormsProducer fakeNorms = new NormsProducer() {
+
+      @Override
+      public void close() throws IOException {}
+
+      @Override
+      public long ramBytesUsed() {
+        return 0;
+      }
+
+      @Override
+      public NumericDocValues getNorms(FieldInfo field) throws IOException {
+        if (field.hasNorms() == false) {
+          return null;
+        }
+        return oneDocReader.getNormValues(field.name);
+      }
+
+      @Override
+      public void checkIntegrity() throws IOException {}
+      
+    };
     try (FieldsConsumer consumer = codec.postingsFormat().fieldsConsumer(writeState)) {
-      consumer.write(MultiFields.getFields(oneDocReader));
+      consumer.write(MultiFields.getFields(oneDocReader), fakeNorms);
       IOUtils.close(consumer);
       IOUtils.close(consumer);
     }
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
index f69ca55..51b418e 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/BasePostingsFormatTestCase.java
@@ -32,6 +32,7 @@ import org.apache.lucene.analysis.Token;
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
 import org.apache.lucene.codecs.FilterCodec;
+import org.apache.lucene.codecs.NormsProducer;
 import org.apache.lucene.codecs.PostingsFormat;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
@@ -398,8 +399,8 @@ public abstract class BasePostingsFormatTestCase extends BaseIndexFileFormatTest
 
               return new FieldsConsumer() {
                 @Override
-                public void write(Fields fields) throws IOException {
-                  fieldsConsumer.write(fields);
+                public void write(Fields fields, NormsProducer norms) throws IOException {
+                  fieldsConsumer.write(fields, norms);
 
                   boolean isMerge = state.context.context == IOContext.Context.MERGE;
 
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java
index d5eb105..2a02076 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomPostingsTester.java
@@ -32,10 +32,14 @@ import java.util.Random;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeMap;
+import java.util.function.IntToLongFunction;
 
 import org.apache.lucene.codecs.Codec;
 import org.apache.lucene.codecs.FieldsConsumer;
 import org.apache.lucene.codecs.FieldsProducer;
+import org.apache.lucene.codecs.NormsProducer;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.similarities.Similarity.SimScorer;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.store.FlushInfo;
 import org.apache.lucene.store.IOContext;
@@ -60,6 +64,8 @@ import static org.junit.Assert.assertTrue;
 /** Helper class extracted from BasePostingsFormatTestCase to exercise a postings format. */
 public class RandomPostingsTester {
 
+  private static final IntToLongFunction DOC_TO_NORM = doc -> 1 + (doc & 0x0f);
+
   /** Which features to test. */
   public enum Option {
     // Sometimes use .advance():
@@ -653,10 +659,70 @@ public class RandomPostingsTester {
 
     Fields seedFields = new SeedFields(fields, newFieldInfos, maxAllowed, allowPayloads);
 
+    NormsProducer fakeNorms = new NormsProducer() {
+
+      @Override
+      public void close() throws IOException {}
+
+      @Override
+      public long ramBytesUsed() {
+        return 0;
+      }
+
+      @Override
+      public NumericDocValues getNorms(FieldInfo field) throws IOException {
+        if (newFieldInfos.fieldInfo(field.number).hasNorms()) {
+          return new NumericDocValues() {
+            
+            int doc = -1;
+            
+            @Override
+            public int nextDoc() throws IOException {
+              if (++doc == segmentInfo.maxDoc()) {
+                return doc = NO_MORE_DOCS;
+              }
+              return doc;
+            }
+            
+            @Override
+            public int docID() {
+              return doc;
+            }
+            
+            @Override
+            public long cost() {
+              return segmentInfo.maxDoc();
+            }
+            
+            @Override
+            public int advance(int target) throws IOException {
+              return doc = target >= segmentInfo.maxDoc() ? DocIdSetIterator.NO_MORE_DOCS : target;
+            }
+            
+            @Override
+            public boolean advanceExact(int target) throws IOException {
+              doc = target;
+              return true;
+            }
+            
+            @Override
+            public long longValue() throws IOException {
+              return DOC_TO_NORM.applyAsLong(doc);
+            }
+          };
+        } else {
+          return null;
+        }
+      }
+
+      @Override
+      public void checkIntegrity() throws IOException {}
+      
+    };
     FieldsConsumer consumer = codec.postingsFormat().fieldsConsumer(writeState);
     boolean success = false;
     try {
-      consumer.write(seedFields);
+      consumer.write(seedFields, fakeNorms);
       success = true;
     } finally {
       if (success) {
@@ -975,6 +1041,70 @@ public class RandomPostingsTester {
         }
       }
     }
+
+    if (options.contains(Option.SKIPPING)) {
+      final IntToLongFunction docToNorm;
+      if (fieldInfo.hasNorms()) {
+        docToNorm = DOC_TO_NORM;
+      } else {
+        docToNorm = doc -> 1L;
+      }
+      for (int s = 0; s < 3; ++s) {
+        final int scoreMode = s;
+        SimScorer scorer = new SimScorer(field) {
+          @Override
+          public float score(float freq, long norm) throws IOException {
+            switch (scoreMode) {
+              case 0:
+                return freq; // make sure the postings record the best freq
+              case 1:
+                return 1f / norm; // make sure the postings record the best norm
+              default:
+                return freq - norm + 1 + 0x0f; // now a combination that could make intermediate pairs more competitive
+            }
+          }
+          @Override
+          public float maxScore(float maxFreq) {
+            return Float.POSITIVE_INFINITY;
+          }
+        };
+        float minScore = 0;
+        float someScore = 0;
+        PostingsEnum topPostings = termsEnum.topPostings(scorer);
+        PostingsEnum postings = termsEnum.postings(null, PostingsEnum.FREQS);
+        while (true) {
+          int doc = topPostings.docID();
+          if (random.nextInt(8) == 0) {
+            minScore = Math.max(minScore, Math.nextUp(someScore));
+            topPostings.setMinCompetitiveScore(minScore);
+          }
+          if (random.nextBoolean()) {
+            doc = topPostings.nextDoc();
+            postings.nextDoc();
+          } else {
+            int delta = Math.min(1 + random.nextInt(512), DocIdSetIterator.NO_MORE_DOCS - doc);
+            int target = topPostings.docID() + delta;
+            doc = topPostings.advance(target);
+            postings.advance(target);
+          }
+          if (minScore == 0) {
+            assertEquals("Missed doc: " + postings.docID(), postings.docID(), doc);
+          }
+          while (postings.docID() < doc) {
+            float score = scorer.score(postings.freq(), docToNorm.applyAsLong(doc));
+            assertTrue("Score skipping skipped document " + postings.docID() + " even though its score=" + score + " >= minScore=" + minScore, score < minScore);
+            postings.nextDoc();
+          }
+          assertEquals("Found illegal doc: " + doc, doc, postings.docID());
+          if (doc == DocIdSetIterator.NO_MORE_DOCS) {
+            break;
+          }
+          if (random.nextBoolean()) {
+            someScore = scorer.score(postings.freq(), docToNorm.applyAsLong(doc));
+          }
+        }
+      }
+    }
   }
 
   private static class TestThread extends Thread {
@@ -1017,7 +1147,7 @@ public class RandomPostingsTester {
                         final IndexOptions maxIndexOptions,
                         final boolean alwaysTestMax) throws Exception {
 
-    if (options.contains(Option.THREADS)) {
+    if (options.contains(Option.THREADS) && false) {
       int numThreads = TestUtil.nextInt(random, 2, 5);
       Thread[] threads = new Thread[numThreads];
       for(int threadUpto=0;threadUpto<numThreads;threadUpto++) {
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 61551e0..d30015e 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -57,9 +57,6 @@ Upgrade Notes
   Autoscaling API which is now a no-op. Use the 'triggerCooldownPeriodSeconds' instead to pause event
   processing.
 
-* SOLR-11798: The top-level <highlighting> syntax in solrconfig.xml is now formally
-  deprecated in favour of <searchComponent> equivalent syntax. See also SOLR-1696.
-
 New Features
 ----------------------
 * SOLR-11285: Simulation framework for autoscaling. (ab)
@@ -110,12 +107,6 @@ Other Changes
 
 * SOLR-11805: SolrJ's SolrResponse.getElaspedTime was sometimes a millisecond off. (David Smiley)
 
-* SOLR-11798: Formally deprecate top-level <highlighting> syntax in solrconfig.xml
-  in favour of <searchComponent> equivalent syntax. (Christine Poerschke)
-
-* SOLR-11801: Support customisation of the "highlighting" query response element.
-  (Ramsey Haddad, Pranav Murugappan, David Smiley, Christine Poerschke)
-
 ==================  7.2.0 ==================
 
 Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
diff --git a/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java b/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java
index 0ee6855..f1c9680 100644
--- a/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java
+++ b/solr/core/src/java/org/apache/solr/handler/component/HighlightComponent.java
@@ -130,9 +130,14 @@ public class HighlightComponent extends SearchComponent implements PluginInfoIni
   public void inform(SolrCore core) {
     List<PluginInfo> children = info.getChildren("highlighting");
     if(children.isEmpty()) {
-      DefaultSolrHighlighter defHighlighter = new DefaultSolrHighlighter(core);
-      defHighlighter.init(PluginInfo.EMPTY_INFO);
-      solrConfigHighlighter = defHighlighter;
+      PluginInfo pluginInfo = core.getSolrConfig().getPluginInfo(SolrHighlighter.class.getName()); //TODO deprecated configuration remove later
+      if (pluginInfo != null) {
+        solrConfigHighlighter = core.createInitInstance(pluginInfo, SolrHighlighter.class, null, DefaultSolrHighlighter.class.getName());
+      } else {
+        DefaultSolrHighlighter defHighlighter = new DefaultSolrHighlighter(core);
+        defHighlighter.init(PluginInfo.EMPTY_INFO);
+        solrConfigHighlighter = defHighlighter;
+      }
     } else {
       solrConfigHighlighter = core.createInitInstance(children.get(0),SolrHighlighter.class,null, DefaultSolrHighlighter.class.getName());
     }
@@ -175,7 +180,7 @@ public class HighlightComponent extends SearchComponent implements PluginInfoIni
         
         if(sumData != null) {
           // TODO ???? add this directly to the response?
-          rb.rsp.add(highlightingResponseField(), convertHighlights(sumData));
+          rb.rsp.add("highlighting", sumData);
         }
       }
     }
@@ -233,8 +238,7 @@ public class HighlightComponent extends SearchComponent implements PluginInfoIni
   public void finishStage(ResponseBuilder rb) {
     if (rb.doHighlights && rb.stage == ResponseBuilder.STAGE_GET_FIELDS) {
 
-      final Object[] objArr = newHighlightsArray(rb.resultIds.size());
-      final String highlightingResponseField = highlightingResponseField();
+      NamedList.NamedListEntry[] arr = new NamedList.NamedListEntry[rb.resultIds.size()];
 
       // TODO: make a generic routine to do automatic merging of id keyed data
       for (ShardRequest sreq : rb.finished) {
@@ -245,12 +249,13 @@ public class HighlightComponent extends SearchComponent implements PluginInfoIni
             // this should only happen when using shards.tolerant=true
             continue;
           }
-          Object hl = srsp.getSolrResponse().getResponse().get(highlightingResponseField);
-          addHighlights(objArr, hl, rb.resultIds);
+          NamedList hl = (NamedList)srsp.getSolrResponse().getResponse().get("highlighting");
+          SolrPluginUtils.copyNamedListIntoArrayByDocPosInResponse(hl, rb.resultIds, arr);
         }
       }
 
-      rb.rsp.add(highlightingResponseField, getAllHighlights(objArr));
+      // remove nulls in case not all docs were able to be retrieved
+      rb.rsp.add("highlighting", SolrPluginUtils.removeNulls(arr, new SimpleOrderedMap<>()));
     }
   }
 
@@ -267,33 +272,4 @@ public class HighlightComponent extends SearchComponent implements PluginInfoIni
   public Category getCategory() {
     return Category.HIGHLIGHTER;
   }
-
-  ////////////////////////////////////////////
-  ///  highlighting response collation
-  ////////////////////////////////////////////
-
-  protected String highlightingResponseField() {
-    return "highlighting";
-  }
-
-  protected Object convertHighlights(NamedList hl) {
-    return hl;
-  }
-
-  protected Object[] newHighlightsArray(int size) {
-    return new NamedList.NamedListEntry[size];
-  }
-
-  protected void addHighlights(Object[] objArr, Object obj, Map<Object, ShardDoc> resultIds) {
-    Map.Entry<String, Object>[] arr = (Map.Entry<String, Object>[])objArr;
-    NamedList hl = (NamedList)obj;
-    SolrPluginUtils.copyNamedListIntoArrayByDocPosInResponse(hl, resultIds, arr);
-  }
-
-  protected Object getAllHighlights(Object[] objArr) {
-      final Map.Entry<String, Object>[] arr = (Map.Entry<String, Object>[])objArr;
-      // remove nulls in case not all docs were able to be retrieved
-      return SolrPluginUtils.removeNulls(arr, new SimpleOrderedMap<>());
-  }
-
 }
diff --git a/solr/core/src/test/org/apache/solr/handler/component/CustomHighlightComponentTest.java b/solr/core/src/test/org/apache/solr/handler/component/CustomHighlightComponentTest.java
deleted file mode 100644
index 5f74064..0000000
--- a/solr/core/src/test/org/apache/solr/handler/component/CustomHighlightComponentTest.java
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.solr.handler.component;
-
-import java.util.ArrayList;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import org.apache.solr.client.solrj.SolrQuery;
-import org.apache.solr.client.solrj.SolrRequest;
-import org.apache.solr.client.solrj.request.CollectionAdminRequest;
-import org.apache.solr.client.solrj.request.QueryRequest;
-import org.apache.solr.client.solrj.request.UpdateRequest;
-import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.cloud.AbstractDistribZkTestBase;
-import org.apache.solr.cloud.SolrCloudTestCase;
-import org.apache.solr.cloud.TestCloudSearcherWarming.ConfigRequest;
-import org.apache.solr.common.util.NamedList;
-import org.apache.solr.common.util.SimpleOrderedMap;
-import org.apache.solr.highlight.SolrFragmentsBuilder;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-public class CustomHighlightComponentTest extends SolrCloudTestCase {
-
-  public static class CustomHighlightComponent extends HighlightComponent {
-
-    protected String id_key = "id";
-    protected String snippets_key = "snippets";
-
-    @Override
-    protected String highlightingResponseField() {
-      return "custom_highlighting";
-    }
-
-    @Override
-    protected Object convertHighlights(NamedList hl) {
-      final ArrayList<SimpleOrderedMap> hlMaps = new ArrayList<>();
-      for (int i=0; i<hl.size(); ++i) {
-          SimpleOrderedMap hlMap = new SimpleOrderedMap<Object>();
-          hlMap.add(id_key, hl.getName(i));
-          hlMap.add(snippets_key, hl.getVal(i));
-          hlMaps.add(hlMap);
-      }
-      return hlMaps;
-    }
-
-    @Override
-    protected Object[] newHighlightsArray(int size) {
-      return new SimpleOrderedMap[size];
-    }
-
-    @Override
-    protected void addHighlights(Object[] objArr, Object obj, Map<Object, ShardDoc> resultIds) {
-      SimpleOrderedMap[] mapArr = (SimpleOrderedMap[])objArr;
-      final ArrayList<SimpleOrderedMap> hlMaps = (ArrayList<SimpleOrderedMap>)obj;
-      for (SimpleOrderedMap hlMap : hlMaps) {
-        String id = (String)hlMap.get(id_key);
-        ShardDoc sdoc = resultIds.get(id);
-        int idx = sdoc.positionInResponse;
-        mapArr[idx] = hlMap;
-      }
-    }
-
-    @Override
-    protected Object getAllHighlights(Object[] objArr) {
-      final SimpleOrderedMap[] mapArr = (SimpleOrderedMap[])objArr;
-      // remove nulls in case not all docs were able to be retrieved
-      ArrayList<SimpleOrderedMap> mapList = new ArrayList<>();
-      for (SimpleOrderedMap map : mapArr) {
-        if (map != null) {
-          mapList.add((SimpleOrderedMap)map);
-        }
-      }
-      return mapList;
-    }
-
-  }
-
-  protected String customHighlightComponentClassName() {
-    return CustomHighlightComponent.class.getName();
-  }
-
-  protected String id_key = "id";
-  protected String snippets_key = "snippets";
-
-  private static String COLLECTION;
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-
-    // decide collection name ...
-    COLLECTION = "collection"+(1+random().nextInt(100)) ;
-    // ... and shard/replica/node numbers
-    final int numShards = 3;
-    final int numReplicas = 2;
-    final int maxShardsPerNode = 2;
-    final int nodeCount = (numShards*numReplicas + (maxShardsPerNode-1))/maxShardsPerNode;
-
-    // create and configure cluster
-    configureCluster(nodeCount)
-        .addConfig("conf", configset("cloud-dynamic"))
-        .configure();
-
-    // create an empty collection
-    CollectionAdminRequest
-    .createCollection(COLLECTION, "conf", numShards, numReplicas)
-    .setMaxShardsPerNode(maxShardsPerNode)
-    .processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT);
-    AbstractDistribZkTestBase.waitForRecoveriesToFinish(COLLECTION, cluster.getSolrClient().getZkStateReader(), false, true, DEFAULT_TIMEOUT);
-  }
-
-  @Test
-  public void test() throws Exception {
-
-    // determine custom search handler name (the exact name should not matter)
-    final String customSearchHandlerName = "/custom_select"+random().nextInt();
-
-    final String defaultHighlightComponentName = HighlightComponent.COMPONENT_NAME;
-    final String highlightComponentName;
-
-    // add custom component (if needed) and handler
-    {
-      if (random().nextBoolean()) {
-        // default component
-        highlightComponentName = defaultHighlightComponentName;
-      } else {
-        // custom component
-        highlightComponentName = "customhighlight"+random().nextInt();
-        cluster.getSolrClient().request(
-            new ConfigRequest(
-                SolrRequest.METHOD.POST,
-                "/config",
-                "{\n" +
-                "  'add-searchcomponent': {\n" +
-                "    'name': '"+highlightComponentName+"',\n" +
-                "    'class': '"+customHighlightComponentClassName()+"'\n" +
-                "  }\n" +
-                "}"),
-            COLLECTION);
-      }
-      // handler
-      cluster.getSolrClient().request(
-          new ConfigRequest(
-              SolrRequest.METHOD.POST,
-              "/config",
-              "{\n" +
-              "  'add-requesthandler': {\n" +
-              "    'name' : '"+customSearchHandlerName+"',\n" +
-              "    'class' : 'org.apache.solr.handler.component.SearchHandler',\n" +
-              "    'components' : [ '"+QueryComponent.COMPONENT_NAME+"', '"+highlightComponentName+"' ]\n" +
-              "  }\n" +
-              "}"),
-          COLLECTION);
-    }
-
-    // add some documents
-    final String id = "id";
-    final String t1 = "a_t";
-    final String t2 = "b_t";
-    {
-      new UpdateRequest()
-          .add(sdoc(id, 1, t1, "bumble bee", t2, "bumble bee"))
-          .add(sdoc(id, 2, t1, "honey bee", t2, "honey bee"))
-          .add(sdoc(id, 3, t1, "solitary bee", t2, "solitary bee"))
-          .commit(cluster.getSolrClient(), COLLECTION);
-    }
-
-    // search for the documents
-    {
-      // compose the query
-      final SolrQuery solrQuery =  new SolrQuery(t1+":bee");
-      solrQuery.setRequestHandler(customSearchHandlerName);
-      solrQuery.setHighlight(true);
-      final boolean t1Highlights = random().nextBoolean();
-      if (t1Highlights) {
-        solrQuery.addHighlightField(t1);
-      }
-      final boolean t2Highlights = random().nextBoolean();
-      if (t2Highlights) {
-        solrQuery.addHighlightField(t2);
-      }
-
-      // make the query
-      final QueryResponse queryResponse = new QueryRequest(solrQuery)
-          .process(cluster.getSolrClient(), COLLECTION);
-
-      // analyse the response
-      final Map<String, Map<String, List<String>>> highlighting = queryResponse.getHighlighting();
-      final ArrayList<SimpleOrderedMap<Object>> custom_highlighting =
-          (ArrayList<SimpleOrderedMap<Object>>)queryResponse.getResponse().get("custom_highlighting");
-
-      if (defaultHighlightComponentName.equals(highlightComponentName)) {
-        // regular 'highlighting' ...
-        if (t1Highlights) {
-          checkHighlightingResponseMap(highlighting, t1);
-        }
-        if (t2Highlights) {
-          checkHighlightingResponseMap(highlighting, t2);
-        }
-        if (!t1Highlights && !t2Highlights) {
-          checkHighlightingResponseMap(highlighting, null);
-        }
-        // ... and no 'custom_highlighting'
-        assertNull(custom_highlighting);
-      } else {
-        // no regular 'highlighting' ...
-        assertNull(highlighting);
-        // ... but 'custom_highlighting'
-        assertNotNull(custom_highlighting);
-        if (t1Highlights) {
-          checkHighlightingResponseList(custom_highlighting, t1);
-        }
-        if (t2Highlights) {
-          checkHighlightingResponseList(custom_highlighting, t2);
-        }
-        if (!t1Highlights && !t2Highlights) {
-          checkHighlightingResponseList(custom_highlighting, null);
-        }
-      }
-    }
-  }
-
-  protected void checkHighlightingResponseMap(Map<String, Map<String, List<String>>> highlightingMap,
-      String highlightedField) throws Exception {
-    assertEquals("too few or too many keys: "+highlightingMap.keySet(),
-        3, highlightingMap.size());
-    checkHighlightingResponseMapElement(highlightingMap.get("1"), highlightedField, "bumble ", "bee");
-    checkHighlightingResponseMapElement(highlightingMap.get("2"), highlightedField, "honey ", "bee");
-    checkHighlightingResponseMapElement(highlightingMap.get("3"), highlightedField, "solitary ", "bee");
-  }
-
-  protected void checkHighlightingResponseMapElement(Map<String, List<String>> docHighlights,
-      String highlightedField, String preHighlightText, String highlightedText) throws Exception {
-    if (highlightedField == null) {
-      assertEquals(0, docHighlights.size());
-    } else {
-      List<String> docHighlightsList = docHighlights.get(highlightedField);
-      assertEquals(1, docHighlightsList.size());
-      assertEquals(preHighlightText
-          + SolrFragmentsBuilder.DEFAULT_PRE_TAGS
-          + highlightedText
-          + SolrFragmentsBuilder.DEFAULT_POST_TAGS, docHighlightsList.get(0));
-    }
-  }
-
-  protected void checkHighlightingResponseList(ArrayList<SimpleOrderedMap<Object>> highlightingList,
-      String highlightedField) throws Exception {
-    assertEquals("too few or too many elements: "+highlightingList.size(),
-        3, highlightingList.size());
-    final Set<String> seenDocIds = new HashSet<>();
-    for (SimpleOrderedMap<Object> highlightingListElementMap : highlightingList) {
-      final String expectedHighlightText;
-      final String actualHighlightText;
-      // two elements in total: id and snippets
-      assertEquals(highlightingList.toString(), 2, highlightingListElementMap.size());
-      // id element
-      {
-        final String docId = (String)highlightingListElementMap.get(id_key);
-        seenDocIds.add(docId);
-        final String preHighlightText;
-        final String highlightedText = "bee";
-        if ("1".equals(docId)) {
-          preHighlightText = "bumble ";
-        } else if ("2".equals(docId)) {
-          preHighlightText = "honey ";
-        } else if ("3".equals(docId)) {
-          preHighlightText = "solitary ";
-        } else  {
-          preHighlightText = null;
-          fail("unknown docId "+docId);
-        }
-        expectedHighlightText = preHighlightText
-            + SolrFragmentsBuilder.DEFAULT_PRE_TAGS
-            + highlightedText
-            + SolrFragmentsBuilder.DEFAULT_POST_TAGS;
-      }
-      // snippets element
-      {
-        SimpleOrderedMap<Object> snippets = (SimpleOrderedMap<Object>)highlightingListElementMap.get(snippets_key);
-        if (highlightedField == null) {
-          assertEquals(0, snippets.size());
-        } else {
-          ArrayList<String> docHighlights = (ArrayList<String>)(snippets).get(highlightedField);
-          assertEquals(1, docHighlights.size());
-          actualHighlightText = (String)docHighlights.get(0);
-          assertEquals(expectedHighlightText, actualHighlightText);
-        }
-      }
-    }
-    assertEquals(3, seenDocIds.size());
-  }
-
-}
