Index: src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java
===================================================================
--- src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java	(revision 1534731)
+++ src/test/org/apache/lucene/search/vectorhighlight/FastVectorHighlighterTest.java	(working copy)
@@ -412,6 +412,54 @@
       clause( "field_der_red", "red" ), clause( "field_der_red", "der" ), clause( "field_exact", "a", "cat" ) );
   }
 
+  public void testMultiValuedSortByScore() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer( random() ) ) );
+    Document doc = new Document();
+    FieldType type = new FieldType( TextField.TYPE_STORED );
+    type.setStoreTermVectorOffsets( true );
+    type.setStoreTermVectorPositions( true );
+    type.setStoreTermVectors( true );
+    type.freeze();
+    doc.add( new Field( "field", "zero if naught", type ) ); // The first two fields contain the best match
+    doc.add( new Field( "field", "hero of legend", type ) ); // but total a lower score (3) than the bottom
+    doc.add( new Field( "field", "naught of hero", type ) ); // two fields (4)
+    doc.add( new Field( "field", "naught of hero", type ) );
+    writer.addDocument(doc);
+
+    FastVectorHighlighter highlighter = new FastVectorHighlighter();
+    
+    ScoreOrderFragmentsBuilder fragmentsBuilder = new ScoreOrderFragmentsBuilder();    
+    fragmentsBuilder.setDiscreteMultiValueHighlighting( true );
+    IndexReader reader = DirectoryReader.open(writer, true );
+    String[] preTags = new String[] { "<b>" };
+    String[] postTags = new String[] { "</b>" };
+    Encoder encoder = new DefaultEncoder();
+    int docId = 0;
+    BooleanQuery query = new BooleanQuery();
+    query.add( clause( "field", "hero" ), Occur.SHOULD);
+    query.add( clause( "field", "of" ), Occur.SHOULD);
+    query.add( clause( "field", "legend" ), Occur.SHOULD);
+    FieldQuery fieldQuery = highlighter.getFieldQuery( query, reader );
+
+    for ( FragListBuilder fragListBuilder : new FragListBuilder[] {
+      new SimpleFragListBuilder(), new WeightedFragListBuilder() } ) {
+      String[] bestFragments = highlighter.getBestFragments( fieldQuery, reader, docId, "field", 20, 1,
+          fragListBuilder, fragmentsBuilder, preTags, postTags, encoder );
+      assertEquals("<b>hero</b> <b>of</b> <b>legend</b>", bestFragments[0]);
+      bestFragments = highlighter.getBestFragments( fieldQuery, reader, docId, "field", 28, 1,
+          fragListBuilder, fragmentsBuilder, preTags, postTags, encoder );
+      assertEquals("<b>hero</b> <b>of</b> <b>legend</b>", bestFragments[0]);
+      bestFragments = highlighter.getBestFragments( fieldQuery, reader, docId, "field", 30000, 1,
+          fragListBuilder, fragmentsBuilder, preTags, postTags, encoder );
+      assertEquals("<b>hero</b> <b>of</b> <b>legend</b>", bestFragments[0]);
+    }
+
+    reader.close();
+    writer.close();
+    dir.close();
+  }
+
   private void matchedFieldsTestCase( String fieldValue, String expected, Query... queryClauses ) throws IOException {
     matchedFieldsTestCase( true, true, fieldValue, expected, queryClauses );
   }
Index: src/test/org/apache/lucene/search/vectorhighlight/WeightedFragListBuilderTest.java
===================================================================
--- src/test/org/apache/lucene/search/vectorhighlight/WeightedFragListBuilderTest.java	(revision 1534731)
+++ src/test/org/apache/lucene/search/vectorhighlight/WeightedFragListBuilderTest.java	(working copy)
@@ -17,6 +17,9 @@
  * limitations under the License.
  */
 
+import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo;
+import org.apache.lucene.search.vectorhighlight.FieldFragList.WeightedFragInfo.SubInfo;
+
 public class WeightedFragListBuilderTest extends AbstractTestCase {
   
   public void test2WeightedFragList() throws Exception {
@@ -29,7 +32,15 @@
     WeightedFragListBuilder wflb = new WeightedFragListBuilder();
     FieldFragList ffl = wflb.createFieldFragList( fpl, 100 );
     assertEquals( 1, ffl.getFragInfos().size() );
-    assertEquals( "subInfos=(theboth((195,203)))/0.86791086(149,249)", ffl.getFragInfos().get( 0 ).toString() );
+    assertEquals( "subInfos=(theboth((195,203)))/0.8679108(149,249)", ffl.getFragInfos().get( 0 ).toString() );
+
+    float totalSubInfoBoost = 0;
+    for ( WeightedFragInfo info : ffl.getFragInfos() ) {
+      for ( SubInfo subInfo : info.getSubInfos() ) {
+        totalSubInfoBoost = subInfo.getBoost();
+      }
+    }
+    assertEquals( 0.8679108, totalSubInfoBoost, .0000001 );
   }
 
 }
Index: src/java/org/apache/lucene/search/vectorhighlight/SimpleFieldFragList.java
===================================================================
--- src/java/org/apache/lucene/search/vectorhighlight/SimpleFieldFragList.java	(revision 1534731)
+++ src/java/org/apache/lucene/search/vectorhighlight/SimpleFieldFragList.java	(working copy)
@@ -45,7 +45,7 @@
     float totalBoost = 0;
     List<SubInfo> subInfos = new ArrayList<SubInfo>();
     for( WeightedPhraseInfo phraseInfo : phraseInfoList ){
-      subInfos.add( new SubInfo( phraseInfo.getText(), phraseInfo.getTermsOffsets(), phraseInfo.getSeqnum() ) );
+      subInfos.add( new SubInfo( phraseInfo.getText(), phraseInfo.getTermsOffsets(), phraseInfo.getSeqnum(), phraseInfo.getBoost() ) );
       totalBoost += phraseInfo.getBoost();
     }
     getFragInfos().add( new WeightedFragInfo( startOffset, endOffset, subInfos, totalBoost ) );
Index: src/java/org/apache/lucene/search/vectorhighlight/FieldFragList.java
===================================================================
--- src/java/org/apache/lucene/search/vectorhighlight/FieldFragList.java	(revision 1534731)
+++ src/java/org/apache/lucene/search/vectorhighlight/FieldFragList.java	(working copy)
@@ -107,12 +107,14 @@
       private final String text;  // unnecessary member, just exists for debugging purpose
       private final List<Toffs> termsOffsets;   // usually termsOffsets.size() == 1,
                               // but if position-gap > 1 and slop > 0 then size() could be greater than 1
-      private int seqnum;
+      private final int seqnum;
+      private final float boost; // used for scoring split WeightedPhraseInfos.
 
-      public SubInfo( String text, List<Toffs> termsOffsets, int seqnum ){
+      public SubInfo( String text, List<Toffs> termsOffsets, int seqnum, float boost ){
         this.text = text;
         this.termsOffsets = termsOffsets;
         this.seqnum = seqnum;
+        this.boost = boost;
       }
       
       public List<Toffs> getTermsOffsets(){
@@ -127,6 +129,10 @@
         return text;
       }
 
+      public float getBoost(){
+        return boost;
+      }
+
       @Override
       public String toString(){
         StringBuilder sb = new StringBuilder();
Index: src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java
===================================================================
--- src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java	(revision 1534731)
+++ src/java/org/apache/lucene/search/vectorhighlight/BaseFragmentsBuilder.java	(working copy)
@@ -258,9 +258,8 @@
 
 
         List<SubInfo> subInfos = new ArrayList<SubInfo>();
-        WeightedFragInfo weightedFragInfo = new WeightedFragInfo(fragStart, fragEnd, subInfos, fragInfo.getTotalBoost());
-
         Iterator<SubInfo> subInfoIterator = fragInfo.getSubInfos().iterator();
+        float boost = 0.0f;  //  The boost of the new info will be the sum of the boosts of its SubInfos
         while (subInfoIterator.hasNext()) {
           SubInfo subInfo = subInfoIterator.next();
           List<Toffs> toffsList = new ArrayList<Toffs>();
@@ -268,18 +267,21 @@
           while (toffsIterator.hasNext()) {
             Toffs toffs = toffsIterator.next();
             if (toffs.getStartOffset() >= fieldStart && toffs.getEndOffset() <= fieldEnd) {
+
               toffsList.add(toffs);
               toffsIterator.remove();
             }
           }
           if (!toffsList.isEmpty()) {
-            subInfos.add(new SubInfo(subInfo.getText(), toffsList, subInfo.getSeqnum()));
+            subInfos.add(new SubInfo(subInfo.getText(), toffsList, subInfo.getSeqnum(), subInfo.getBoost()));
+            boost += subInfo.getBoost();
           }
 
           if (subInfo.getTermsOffsets().isEmpty()) {
             subInfoIterator.remove();
           }
         }
+        WeightedFragInfo weightedFragInfo = new WeightedFragInfo(fragStart, fragEnd, subInfos, boost);
         fieldNameToFragInfos.get(field.name()).add(weightedFragInfo);
       }
     }
Index: src/java/org/apache/lucene/search/vectorhighlight/WeightedFieldFragList.java
===================================================================
--- src/java/org/apache/lucene/search/vectorhighlight/WeightedFieldFragList.java	(revision 1534731)
+++ src/java/org/apache/lucene/search/vectorhighlight/WeightedFieldFragList.java	(working copy)
@@ -44,33 +44,37 @@
    */ 
   @Override
   public void add( int startOffset, int endOffset, List<WeightedPhraseInfo> phraseInfoList ) {
-    
-    float totalBoost = 0;
-    
-    List<SubInfo> subInfos = new ArrayList<SubInfo>();
-    
-    HashSet<String> distinctTerms = new HashSet<String>();
-    
+    List<SubInfo> tempSubInfos = new ArrayList<SubInfo>();
+    List<SubInfo> realSubInfos = new ArrayList<SubInfo>();
+    HashSet<String> distinctTerms = new HashSet<String>();   
     int length = 0;
 
     for( WeightedPhraseInfo phraseInfo : phraseInfoList ){
-      
-      subInfos.add( new SubInfo( phraseInfo.getText(), phraseInfo.getTermsOffsets(), phraseInfo.getSeqnum() ) );
-      
+      float phraseTotalBoost = 0;
       for ( TermInfo ti :  phraseInfo.getTermsInfos()) {
         if ( distinctTerms.add( ti.getText() ) )
-          totalBoost += ti.getWeight() * phraseInfo.getBoost();
+          phraseTotalBoost += ti.getWeight() * phraseInfo.getBoost();
         length++;
       }
+      tempSubInfos.add( new SubInfo( phraseInfo.getText(), phraseInfo.getTermsOffsets(),
+        phraseInfo.getSeqnum(), phraseTotalBoost ) );
     }
     
     // We want that terms per fragment (length) is included into the weight. Otherwise a one-word-query
     // would cause an equal weight for all fragments regardless of how much words they contain.  
     // To avoid that fragments containing a high number of words possibly "outrank" more relevant fragments
-    // we "bend" the length with a standard-normalization a little bit.  
-    totalBoost *= length * ( 1 / Math.sqrt( length ) );
-    
-    getFragInfos().add( new WeightedFragInfo( startOffset, endOffset, subInfos, totalBoost ) );
+    // we "bend" the length with a standard-normalization a little bit.
+    float norm = length * ( 1 / (float)Math.sqrt( length ) );
+
+    float totalBoost = 0;
+    for ( SubInfo tempSubInfo : tempSubInfos ) {
+      float subInfoBoost = tempSubInfo.getBoost() * norm;
+      realSubInfos.add( new SubInfo( tempSubInfo.getText(), tempSubInfo.getTermsOffsets(),
+        tempSubInfo.getSeqnum(), subInfoBoost ));
+      totalBoost += subInfoBoost;
+    }
+
+    getFragInfos().add( new WeightedFragInfo( startOffset, endOffset, realSubInfos, totalBoost ) );
   }
   
 }
\ No newline at end of file
