Index: lucene/src/java/org/apache/lucene/index/values/VarSortedBytesImpl.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/values/VarSortedBytesImpl.java	(revision 1182584)
+++ lucene/src/java/org/apache/lucene/index/values/VarSortedBytesImpl.java	(revision )
@@ -58,6 +58,7 @@
       super(dir, id, CODEC_NAME, VERSION_CURRENT, bytesUsed, context);
       this.comp = comp;
       size = 0;
+      this.optimizePackedForSpeed = true;
     }
     @Override
     public void merge(MergeState mergeState, IndexDocValues[] docValues)
@@ -120,7 +121,7 @@
       // total bytes of data
       idxOut.writeLong(maxBytes);
       PackedInts.Writer offsetWriter = PackedInts.getWriter(idxOut, count+1,
-          PackedInts.bitsRequired(maxBytes));
+          bitsRequired(maxBytes));
       // first dump bytes data, recording index & write offset as
       // we go
       final BytesRef spare = new BytesRef();
Index: modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java
===================================================================
--- modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java	(revision 1182584)
+++ modules/grouping/src/test/org/apache/lucene/search/grouping/AllGroupsCollectorTest.java	(revision )
@@ -18,19 +18,20 @@
  */
 
 import org.apache.lucene.analysis.MockAnalyzer;
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.document.FieldType;
-import org.apache.lucene.document.TextField;
+import org.apache.lucene.document.*;
+import org.apache.lucene.index.IndexWriter;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.index.values.ValueType;
 import org.apache.lucene.queries.function.ValueSource;
 import org.apache.lucene.queries.function.valuesource.BytesRefFieldSource;
 import org.apache.lucene.search.IndexSearcher;
 import org.apache.lucene.search.TermQuery;
 import org.apache.lucene.search.grouping.function.FunctionAllGroupsCollector;
+import org.apache.lucene.search.grouping.idv.IDVAllGroupsCollector;
 import org.apache.lucene.search.grouping.term.TermAllGroupsCollector;
 import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.BytesRef;
 import org.apache.lucene.util.LuceneTestCase;
 
 import java.io.IOException;
@@ -46,27 +47,29 @@
 
     Directory dir = newDirectory();
     RandomIndexWriter w = new RandomIndexWriter(
-                               random,
-                               dir,
-                               newIndexWriterConfig(TEST_VERSION_CURRENT,
-                                                    new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+        random,
+        dir,
+        newIndexWriterConfig(TEST_VERSION_CURRENT,
+            new MockAnalyzer(random)).setMergePolicy(newLogMergePolicy()));
+    boolean canUseIDV = !"PreFlex".equals(w.w.getConfig().getCodecProvider().getFieldCodec(groupField));
+
     // 0
     Document doc = new Document();
-    doc.add(new Field(groupField, "author1", TextField.TYPE_STORED));
+    addGroupField(doc, groupField, "author1", canUseIDV);
     doc.add(new Field("content", "random text", TextField.TYPE_STORED));
     doc.add(new Field("id", "1", customType));
     w.addDocument(doc);
 
     // 1
     doc = new Document();
-    doc.add(new Field(groupField, "author1", TextField.TYPE_STORED));
+    addGroupField(doc, groupField, "author1", canUseIDV);
     doc.add(new Field("content", "some more random text blob", TextField.TYPE_STORED));
     doc.add(new Field("id", "2", customType));
     w.addDocument(doc);
 
     // 2
     doc = new Document();
-    doc.add(new Field(groupField, "author1", TextField.TYPE_STORED));
+    addGroupField(doc, groupField, "author1", canUseIDV);
     doc.add(new Field("content", "some more random textual data", TextField.TYPE_STORED));
     doc.add(new Field("id", "3", customType));
     w.addDocument(doc);
@@ -74,21 +77,21 @@
 
     // 3
     doc = new Document();
-    doc.add(new Field(groupField, "author2", TextField.TYPE_STORED));
+    addGroupField(doc, groupField, "author2", canUseIDV);
     doc.add(new Field("content", "some random text", TextField.TYPE_STORED));
     doc.add(new Field("id", "4", customType));
     w.addDocument(doc);
 
     // 4
     doc = new Document();
-    doc.add(new Field(groupField, "author3", TextField.TYPE_STORED));
+    addGroupField(doc, groupField, "author3", canUseIDV);
     doc.add(new Field("content", "some more random text", TextField.TYPE_STORED));
     doc.add(new Field("id", "5", customType));
     w.addDocument(doc);
 
     // 5
     doc = new Document();
-    doc.add(new Field(groupField, "author3", TextField.TYPE_STORED));
+    addGroupField(doc, groupField, "author3", canUseIDV);
     doc.add(new Field("content", "random blob", TextField.TYPE_STORED));
     doc.add(new Field("id", "6", customType));
     w.addDocument(doc);
@@ -102,15 +105,15 @@
     IndexSearcher indexSearcher = new IndexSearcher(w.getReader());
     w.close();
 
-    AbstractAllGroupsCollector c1 = createRandomCollector(groupField);
+    AbstractAllGroupsCollector c1 = createRandomCollector(groupField, canUseIDV);
     indexSearcher.search(new TermQuery(new Term("content", "random")), c1);
     assertEquals(4, c1.getGroupCount());
 
-    AbstractAllGroupsCollector c2 = createRandomCollector(groupField);
+    AbstractAllGroupsCollector c2 = createRandomCollector(groupField, canUseIDV);
     indexSearcher.search(new TermQuery(new Term("content", "some")), c2);
     assertEquals(3, c2.getGroupCount());
 
-    AbstractAllGroupsCollector c3 = createRandomCollector(groupField);
+    AbstractAllGroupsCollector c3 = createRandomCollector(groupField, canUseIDV);
     indexSearcher.search(new TermQuery(new Term("content", "blob")), c3);
     assertEquals(2, c3.getGroupCount());
 
@@ -118,13 +121,32 @@
     dir.close();
   }
 
-  private AbstractAllGroupsCollector createRandomCollector(String groupField) throws IOException {
-    if (random.nextBoolean()) {
-      return new TermAllGroupsCollector(groupField);
+  private void addGroupField(Document doc, String groupField, String value, boolean canUseIDV) {
+    doc.add(new Field(groupField, value, TextField.TYPE_STORED));
+    if (canUseIDV) {
+      IndexDocValuesField valuesField = new IndexDocValuesField(groupField);
+      valuesField.setBytes(new BytesRef(value), ValueType.BYTES_VAR_SORTED);
+      doc.add(valuesField);
+    }
+  }
+
+  private AbstractAllGroupsCollector createRandomCollector(String groupField, boolean canUseIDV) throws IOException {
+    AbstractAllGroupsCollector selected;
+    if (random.nextBoolean() && canUseIDV) {
+      boolean diskResident = random.nextBoolean();
+      selected = IDVAllGroupsCollector.create(groupField, ValueType.BYTES_VAR_SORTED, diskResident);
+    } else if (random.nextBoolean()) {
+      selected = new TermAllGroupsCollector(groupField);
     } else {
       ValueSource vs = new BytesRefFieldSource(groupField);
-      return new FunctionAllGroupsCollector(vs, new HashMap());
+      selected = new FunctionAllGroupsCollector(vs, new HashMap());
     }
+
+    if (VERBOSE) {
+      System.out.println("Select implementation: " + selected.getClass().getName());
-  }
+    }
 
+    return selected;
-}
+  }
+
+}
Index: modules/grouping/src/java/org/apache/lucene/search/grouping/idv/IDVFirstPassGroupingCollector.java
===================================================================
--- modules/grouping/src/java/org/apache/lucene/search/grouping/idv/IDVFirstPassGroupingCollector.java	(revision )
+++ modules/grouping/src/java/org/apache/lucene/search/grouping/idv/IDVFirstPassGroupingCollector.java	(revision )
@@ -0,0 +1,184 @@
+package org.apache.lucene.search.grouping.idv;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.values.IndexDocValues;
+import org.apache.lucene.index.values.ValueType;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.grouping.AbstractFirstPassGroupingCollector;
+import org.apache.lucene.util.BytesRef;
+
+import java.io.IOException;
+
+/**
+ * IDV based Implementations of {@link AbstractFirstPassGroupingCollector}.
+ */
+public abstract class IDVFirstPassGroupingCollector<GROUP_VALUE_TYPE> extends AbstractFirstPassGroupingCollector<GROUP_VALUE_TYPE> {
+
+  final String groupField;
+  final boolean diskResident;
+
+  public static IDVFirstPassGroupingCollector create(Sort groupSort, int topNGroups, String groupField, ValueType type, boolean diskResident) throws IOException {
+    switch (type) {
+      case VAR_INTS:
+      case FIXED_INTS_8:
+      case FIXED_INTS_16:
+      case FIXED_INTS_32:
+      case FIXED_INTS_64:
+        return new Lng(groupSort, topNGroups, groupField, diskResident);
+      case FLOAT_32:
+      case FLOAT_64:
+        return new Dbl(groupSort, topNGroups, groupField, diskResident);
+      case BYTES_FIXED_STRAIGHT:
+      case BYTES_FIXED_DEREF:
+      case BYTES_VAR_STRAIGHT:
+      case BYTES_VAR_DEREF:
+        return new BR(groupSort, topNGroups, groupField, diskResident);
+      case BYTES_VAR_SORTED:
+      case BYTES_FIXED_SORTED:
+        return new SortedBR(groupSort, topNGroups, groupField, diskResident);
+      default:
+        throw new IllegalArgumentException(String.format("ValueType %s not supported", type));
+    }
+  }
+
+  IDVFirstPassGroupingCollector(Sort groupSort, int topNGroups, String groupField, boolean diskResident) throws IOException {
+    super(groupSort, topNGroups);
+    this.groupField = groupField;
+    this.diskResident = diskResident;
+  }
+
+  IndexDocValues.Source getSource(IndexReader ir) throws IOException {
+    return diskResident ? ir.perDocValues().docValues(groupField).getDirectSource() :
+        ir.perDocValues().docValues(groupField).getSource();
+  }
+
+
+  static class Lng extends IDVFirstPassGroupingCollector<Long> {
+
+    private IndexDocValues.Source source;
+
+    Lng(Sort groupSort, int topNGroups, String groupField, boolean diskResident) throws IOException {
+      super(groupSort, topNGroups, groupField, diskResident);
+    }
+
+    protected Long getDocGroupValue(int doc) {
+      return source.getInt(doc);
+    }
+
+    protected Long copyDocGroupValue(Long groupValue, Long reuse) {
+      return groupValue;
+    }
+
+    @Override
+    public void setNextReader(IndexReader.AtomicReaderContext readerContext) throws IOException {
+      super.setNextReader(readerContext);
+      source = getSource(readerContext.reader);
+    }
+  }
+
+  static class Dbl extends IDVFirstPassGroupingCollector<Double> {
+
+    private IndexDocValues.Source source;
+
+    Dbl(Sort groupSort, int topNGroups, String groupField, boolean diskResident) throws IOException {
+      super(groupSort, topNGroups, groupField, diskResident);
+    }
+
+    protected Double getDocGroupValue(int doc) {
+      return source.getFloat(doc);
+    }
+
+    protected Double copyDocGroupValue(Double groupValue, Double reuse) {
+      return groupValue;
+    }
+
+    @Override
+    public void setNextReader(IndexReader.AtomicReaderContext readerContext) throws IOException {
+      super.setNextReader(readerContext);
+      source = getSource(readerContext.reader);
+    }
+  }
+
+  static class BR extends IDVFirstPassGroupingCollector<BytesRef> {
+
+    private IndexDocValues.Source source;
+    private final BytesRef spare = new BytesRef();
+
+    BR(Sort groupSort, int topNGroups, String groupField, boolean diskResident) throws IOException {
+      super(groupSort, topNGroups, groupField, diskResident);
+    }
+
+    protected BytesRef getDocGroupValue(int doc) {
+      return source.getBytes(doc, spare);
+    }
+
+    protected BytesRef copyDocGroupValue(BytesRef groupValue, BytesRef reuse) {
+      if (groupValue == null) {
+        return null;
+      } else if (reuse != null) {
+        reuse.copy(groupValue);
+        return reuse;
+      } else {
+        return new BytesRef(groupValue);
+      }
+    }
+
+    @Override
+    public void setNextReader(IndexReader.AtomicReaderContext readerContext) throws IOException {
+      super.setNextReader(readerContext);
+      source = getSource(readerContext.reader);
+
+    }
+  }
+
+  static class SortedBR extends IDVFirstPassGroupingCollector<BytesRef> {
+
+    private IndexDocValues.SortedSource sortedSource;
+    private final BytesRef spare = new BytesRef();
+
+    SortedBR(Sort groupSort, int topNGroups, String groupField, boolean diskResident) throws IOException {
+      super(groupSort, topNGroups, groupField, diskResident);
+    }
+
+    @Override
+    protected BytesRef getDocGroupValue(int doc) {
+      return sortedSource.getBytes(doc, spare);
+    }
+
+    @Override
+    protected BytesRef copyDocGroupValue(BytesRef groupValue, BytesRef reuse) {
+      if (groupValue == null) {
+        return null;
+      } else if (reuse != null) {
+        reuse.copy(groupValue);
+        return reuse;
+      } else {
+        return new BytesRef(groupValue);
+      }
+    }
+
+    @Override
+    public void setNextReader(IndexReader.AtomicReaderContext readerContext) throws IOException {
+      super.setNextReader(readerContext);
+      sortedSource = getSource(readerContext.reader).asSortedSource();
+    }
+  }
+
+}
Index: lucene/src/java/org/apache/lucene/index/values/FixedSortedBytesImpl.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/values/FixedSortedBytesImpl.java	(revision 1182584)
+++ lucene/src/java/org/apache/lucene/index/values/FixedSortedBytesImpl.java	(revision )
@@ -56,6 +56,7 @@
         Counter bytesUsed, IOContext context) throws IOException {
       super(dir, id, CODEC_NAME, VERSION_CURRENT, bytesUsed, context);
       this.comp = comp;
+      this.optimizePackedForSpeed = true;
     }
 
     @Override
Index: lucene/src/java/org/apache/lucene/index/values/Bytes.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/values/Bytes.java	(revision 1182584)
+++ lucene/src/java/org/apache/lucene/index/values/Bytes.java	(revision )
@@ -393,6 +393,7 @@
     protected int lastDocId = -1;
     protected int[] docToEntry;
     protected final BytesRefHash hash;
+    protected boolean optimizePackedForSpeed = false;
     protected long maxBytes = 0;
     
     protected DerefBytesWriterBase(Directory dir, String id, String codecName,
@@ -506,8 +507,7 @@
     
     protected void writeIndex(IndexOutput idxOut, int docCount,
         long maxValue, int[] addresses, int[] toEntry) throws IOException {
-      final PackedInts.Writer w = PackedInts.getWriter(idxOut, docCount,
-          PackedInts.bitsRequired(maxValue));
+      final PackedInts.Writer w = PackedInts.getWriter(idxOut, docCount, bitsRequired(maxValue));
       final int limit = docCount > docToEntry.length ? docToEntry.length
           : docCount;
       assert toEntry.length >= limit -1;
@@ -530,8 +530,7 @@
     
     protected void writeIndex(IndexOutput idxOut, int docCount,
         long maxValue, long[] addresses, int[] toEntry) throws IOException {
-      final PackedInts.Writer w = PackedInts.getWriter(idxOut, docCount,
-          PackedInts.bitsRequired(maxValue));
+      final PackedInts.Writer w = PackedInts.getWriter(idxOut, docCount, bitsRequired(maxValue));
       final int limit = docCount > docToEntry.length ? docToEntry.length
           : docCount;
       assert toEntry.length >= limit -1;
@@ -551,9 +550,14 @@
       }
       w.finish();
     }
-    
+
+    protected int bitsRequired(long maxValue){
+      return optimizePackedForSpeed ?
+          PackedInts.getNextFixedSize(PackedInts.bitsRequired(maxValue)) : PackedInts.bitsRequired(maxValue);
-  }
-  
+    }
+    
+  }
+  
   static abstract class BytesSortedSourceBase extends SortedSource {
     private final PagedBytes pagedBytes;
     
Index: modules/grouping/src/java/org/apache/lucene/search/grouping/idv/IDVAllGroupsCollector.java
===================================================================
--- modules/grouping/src/java/org/apache/lucene/search/grouping/idv/IDVAllGroupsCollector.java	(revision )
+++ modules/grouping/src/java/org/apache/lucene/search/grouping/idv/IDVAllGroupsCollector.java	(revision )
@@ -0,0 +1,221 @@
+package org.apache.lucene.search.grouping.idv;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.values.IndexDocValues;
+import org.apache.lucene.index.values.ValueType;
+import org.apache.lucene.search.grouping.AbstractAllGroupsCollector;
+import org.apache.lucene.search.grouping.SentinelIntSet;
+import org.apache.lucene.util.BytesRef;
+
+import java.io.IOException;
+import java.util.*;
+
+/**
+ * Implementation of {@link AbstractAllGroupsCollector} that groups documents based on
+ * {@link IndexDocValues} fields.
+ */
+public abstract class IDVAllGroupsCollector<GROUP_VALUE_TYPE> extends AbstractAllGroupsCollector<GROUP_VALUE_TYPE> {
+
+  private static final int DEFAULT_INITIAL_SIZE = 128;
+
+  /**
+   * Expert: Constructs a {@link IDVAllGroupsCollector}.
+   * Selects and constructs the most optimal all groups collector implementation for grouping by {@link IndexDocValues}.
+   * 
+   *
+   * @param groupField  The field to group by
+   * @param type The {@link ValueType} which is used to select a concrete implementation.
+   * @param diskResident Wether the values to group by should be disk resident
+   * @param initialSize The initial allocation size of the
+   *                    internal int set and group list
+   *                    which should roughly match the total
+   *                    number of expected unique groups. Be aware that the
+   *                    heap usage is 4 bytes * initialSize. Not all concrete implementions use this!
+   * @return the most optimal all groups collector implementation for grouping by {@link IndexDocValues}
+   */
+  public static IDVAllGroupsCollector create(String groupField, ValueType type, boolean diskResident, int initialSize) {
+    switch (type) {
+      case VAR_INTS:
+      case FIXED_INTS_8:
+      case FIXED_INTS_16:
+      case FIXED_INTS_32:
+      case FIXED_INTS_64:
+        return new Lng(groupField, diskResident);
+      case FLOAT_32:
+      case FLOAT_64:
+        return new Dbl(groupField, diskResident);
+      case BYTES_FIXED_STRAIGHT:
+      case BYTES_FIXED_DEREF:
+      case BYTES_VAR_STRAIGHT:
+      case BYTES_VAR_DEREF:
+        return new BR(groupField, diskResident);
+      case BYTES_VAR_SORTED:
+      case BYTES_FIXED_SORTED:
+        return new SortedBR(groupField, diskResident, initialSize);
+      default:
+        throw new IllegalArgumentException(String.format("ValueType %s not supported", type));
+    }
+  }
+
+  /**
+   * Constructs a {@link IDVAllGroupsCollector}.
+   * Selects and constructs the most optimal all groups collector implementation for grouping by {@link IndexDocValues}.
+   * If implementations require an initial allocation size then this will be set to 128.
+   *
+   *
+   * @param groupField  The field to group by
+   * @param type The {@link ValueType} which is used to select a concrete implementation.
+   * @param diskResident Wether the values to group by should be disk resident
+   * @return the most optimal all groups collector implementation for grouping by {@link IndexDocValues}
+   */
+  public static IDVAllGroupsCollector create(String groupField, ValueType type, boolean diskResident) {
+    return create(groupField, type, diskResident, DEFAULT_INITIAL_SIZE);
+  }
+
+  final String groupField;
+  final boolean diskResident;
+  final Collection<GROUP_VALUE_TYPE> groups;
+
+  IDVAllGroupsCollector(String groupField, boolean diskResident, Collection<GROUP_VALUE_TYPE> groups) {
+    this.groupField = groupField;
+    this.diskResident = diskResident;
+    this.groups = groups;
+  }
+
+  IndexDocValues.Source getSource(IndexReader ir) throws IOException {
+    return diskResident ? ir.perDocValues().docValues(groupField).getDirectSource() :
+        ir.perDocValues().docValues(groupField).getSource();
+  }
+
+  static class Lng extends IDVAllGroupsCollector<Long> {
+
+    private IndexDocValues.Source source;
+
+    Lng(String groupField, boolean diskResident) {
+      super(groupField, diskResident, new TreeSet<Long>());
+    }
+
+    public void collect(int doc) throws IOException {
+      long value = source.getInt(doc);
+      if (!groups.contains(value)) {
+        groups.add(value);
+      }
+    }
+
+    public Collection<Long> getGroups() {
+      return groups;
+    }
+
+    public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
+      source = getSource(context.reader);
+    }
+
+  }
+
+  static class Dbl extends IDVAllGroupsCollector<Double> {
+
+    private IndexDocValues.Source source;
+
+    Dbl(String groupField, boolean diskResident) {
+      super(groupField, diskResident, new TreeSet<Double>());
+    }
+
+    public void collect(int doc) throws IOException {
+      double value = source.getFloat(doc);
+      if (!groups.contains(value)) {
+        groups.add(value);
+      }
+    }
+
+    public Collection<Double> getGroups() {
+      return groups;
+    }
+
+    public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
+      source = getSource(context.reader);
+    }
+
+  }
+
+  static class BR extends IDVAllGroupsCollector<BytesRef> {
+
+    private final BytesRef spare = new BytesRef();
+
+    private IndexDocValues.Source source;
+
+    BR(String groupField, boolean diskResident) {
+      super(groupField, diskResident, new TreeSet<BytesRef>());
+    }
+
+    public void collect(int doc) throws IOException {
+      BytesRef value = source.getBytes(doc, spare);
+      if (!groups.contains(value)) {
+        groups.add(new BytesRef(value));
+      }
+    }
+
+    public Collection<BytesRef> getGroups() {
+      return groups;
+    }
+
+    public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
+      source = getSource(context.reader);
+    }
+
+  }
+
+  static class SortedBR extends IDVAllGroupsCollector<BytesRef> {
+
+    private final SentinelIntSet ordSet;
+    private final BytesRef spare = new BytesRef();
+
+    private IndexDocValues.SortedSource source;
+
+    SortedBR(String groupField, boolean diskResident, int initialSize) {
+      super(groupField, diskResident, new ArrayList<BytesRef>(initialSize));
+      ordSet = new SentinelIntSet(initialSize, -1);
+    }
+
+    public void collect(int doc) throws IOException {
+      int ord = source.ord(doc);
+      if (!ordSet.exists(ord)) {
+        ordSet.put(ord);
+        BytesRef value = source.getBytes(doc, new BytesRef());
+        groups.add(value);
+      }
+    }
+
+    public Collection<BytesRef> getGroups() {
+      return groups;
+    }
+
+    public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
+      source = getSource(context.reader).asSortedSource();
+      ordSet.clear();
+      for (BytesRef countedGroup : groups) {
+        int ord = source.getByValue(countedGroup, spare);
+        if (ord >= 0) {
+          ordSet.put(ord);
+        }
+      }
+    }
+  }
+
+}
