Index: lucene/src/java/org/apache/lucene/index/values/VarSortedBytesImpl.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/values/VarSortedBytesImpl.java	(revision 1181668)
+++ lucene/src/java/org/apache/lucene/index/values/VarSortedBytesImpl.java	(revision )
@@ -53,6 +53,7 @@
       super(dir, id, CODEC_NAME, VERSION_CURRENT, bytesUsed, context);
       this.comp = comp;
       size = 0;
+      this.optimizePackedForSpeed = true;
     }
 
     @Override
@@ -90,7 +91,7 @@
       writeIndex(idxOut, docCount, count, index, docToEntry);
       // next ord (0-based) -> offset
       PackedInts.Writer offsetWriter = PackedInts.getWriter(idxOut, count+1,
-          PackedInts.bitsRequired(offset));
+          bitsRequired(offset));
       for (int i = 0; i < count; i++) {
         offsetWriter.add(offsets[i]);
       }
Index: modules/grouping/src/java/org/apache/lucene/search/grouping/idv/IDVFirstPassGroupingCollector.java
===================================================================
--- modules/grouping/src/java/org/apache/lucene/search/grouping/idv/IDVFirstPassGroupingCollector.java	(revision )
+++ modules/grouping/src/java/org/apache/lucene/search/grouping/idv/IDVFirstPassGroupingCollector.java	(revision )
@@ -0,0 +1,184 @@
+package org.apache.lucene.search.grouping.idv;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.values.IndexDocValues;
+import org.apache.lucene.index.values.ValueType;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.grouping.AbstractFirstPassGroupingCollector;
+import org.apache.lucene.util.BytesRef;
+
+import java.io.IOException;
+
+/**
+ * IDV based Implementations of {@link AbstractFirstPassGroupingCollector}.
+ */
+public abstract class IDVFirstPassGroupingCollector<GROUP_VALUE_TYPE> extends AbstractFirstPassGroupingCollector<GROUP_VALUE_TYPE> {
+
+  final String groupField;
+  final boolean diskResident;
+
+  public static IDVFirstPassGroupingCollector create(Sort groupSort, int topNGroups, String groupField, ValueType type, boolean diskResident) throws IOException {
+    switch (type) {
+      case VAR_INTS:
+      case FIXED_INTS_8:
+      case FIXED_INTS_16:
+      case FIXED_INTS_32:
+      case FIXED_INTS_64:
+        return new Lng(groupSort, topNGroups, groupField, diskResident);
+      case FLOAT_32:
+      case FLOAT_64:
+        return new Dbl(groupSort, topNGroups, groupField, diskResident);
+      case BYTES_FIXED_STRAIGHT:
+      case BYTES_FIXED_DEREF:
+      case BYTES_VAR_STRAIGHT:
+      case BYTES_VAR_DEREF:
+        return new BR(groupSort, topNGroups, groupField, diskResident);
+      case BYTES_VAR_SORTED:
+      case BYTES_FIXED_SORTED:
+        return new SortedBR(groupSort, topNGroups, groupField, diskResident);
+      default:
+        throw new IllegalArgumentException(String.format("ValueType %s not supported", type));
+    }
+  }
+
+  IDVFirstPassGroupingCollector(Sort groupSort, int topNGroups, String groupField, boolean diskResident) throws IOException {
+    super(groupSort, topNGroups);
+    this.groupField = groupField;
+    this.diskResident = diskResident;
+  }
+
+  IndexDocValues.Source getSource(IndexReader ir) throws IOException {
+    return diskResident ? ir.perDocValues().docValues(groupField).getDirectSource() :
+        ir.perDocValues().docValues(groupField).getSource();
+  }
+
+
+  static class Lng extends IDVFirstPassGroupingCollector<Long> {
+
+    private IndexDocValues.Source source;
+
+    Lng(Sort groupSort, int topNGroups, String groupField, boolean diskResident) throws IOException {
+      super(groupSort, topNGroups, groupField, diskResident);
+    }
+
+    protected Long getDocGroupValue(int doc) {
+      return source.getInt(doc);
+    }
+
+    protected Long copyDocGroupValue(Long groupValue, Long reuse) {
+      return groupValue;
+    }
+
+    @Override
+    public void setNextReader(IndexReader.AtomicReaderContext readerContext) throws IOException {
+      super.setNextReader(readerContext);
+      source = getSource(readerContext.reader);
+    }
+  }
+
+  static class Dbl extends IDVFirstPassGroupingCollector<Double> {
+
+    private IndexDocValues.Source source;
+
+    Dbl(Sort groupSort, int topNGroups, String groupField, boolean diskResident) throws IOException {
+      super(groupSort, topNGroups, groupField, diskResident);
+    }
+
+    protected Double getDocGroupValue(int doc) {
+      return source.getFloat(doc);
+    }
+
+    protected Double copyDocGroupValue(Double groupValue, Double reuse) {
+      return groupValue;
+    }
+
+    @Override
+    public void setNextReader(IndexReader.AtomicReaderContext readerContext) throws IOException {
+      super.setNextReader(readerContext);
+      source = getSource(readerContext.reader);
+    }
+  }
+
+  static class BR extends IDVFirstPassGroupingCollector<BytesRef> {
+
+    private IndexDocValues.Source source;
+    private final BytesRef spare = new BytesRef();
+
+    BR(Sort groupSort, int topNGroups, String groupField, boolean diskResident) throws IOException {
+      super(groupSort, topNGroups, groupField, diskResident);
+    }
+
+    protected BytesRef getDocGroupValue(int doc) {
+      return source.getBytes(doc, spare);
+    }
+
+    protected BytesRef copyDocGroupValue(BytesRef groupValue, BytesRef reuse) {
+      if (groupValue == null) {
+        return null;
+      } else if (reuse != null) {
+        reuse.copy(groupValue);
+        return reuse;
+      } else {
+        return new BytesRef(groupValue);
+      }
+    }
+
+    @Override
+    public void setNextReader(IndexReader.AtomicReaderContext readerContext) throws IOException {
+      super.setNextReader(readerContext);
+      source = getSource(readerContext.reader);
+
+    }
+  }
+
+  static class SortedBR extends IDVFirstPassGroupingCollector<BytesRef> {
+
+    private IndexDocValues.SortedSource sortedSource;
+    private final BytesRef spare = new BytesRef();
+
+    SortedBR(Sort groupSort, int topNGroups, String groupField, boolean diskResident) throws IOException {
+      super(groupSort, topNGroups, groupField, diskResident);
+    }
+
+    @Override
+    protected BytesRef getDocGroupValue(int doc) {
+      return sortedSource.getBytes(doc, spare);
+    }
+
+    @Override
+    protected BytesRef copyDocGroupValue(BytesRef groupValue, BytesRef reuse) {
+      if (groupValue == null) {
+        return null;
+      } else if (reuse != null) {
+        reuse.copy(groupValue);
+        return reuse;
+      } else {
+        return new BytesRef(groupValue);
+      }
+    }
+
+    @Override
+    public void setNextReader(IndexReader.AtomicReaderContext readerContext) throws IOException {
+      super.setNextReader(readerContext);
+      sortedSource = getSource(readerContext.reader).asSortedSource();
+    }
+  }
+
+}
Index: lucene/src/java/org/apache/lucene/index/values/FixedSortedBytesImpl.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/values/FixedSortedBytesImpl.java	(revision 1181668)
+++ lucene/src/java/org/apache/lucene/index/values/FixedSortedBytesImpl.java	(revision )
@@ -51,6 +51,7 @@
         Counter bytesUsed, IOContext context) throws IOException {
       super(dir, id, CODEC_NAME, VERSION_CURRENT, bytesUsed, context);
       this.comp = comp;
+      this.optimizePackedForSpeed = true;
     }
 
     // Important that we get docCount, in case there were
Index: lucene/src/java/org/apache/lucene/index/values/Bytes.java
===================================================================
--- lucene/src/java/org/apache/lucene/index/values/Bytes.java	(revision 1181668)
+++ lucene/src/java/org/apache/lucene/index/values/Bytes.java	(revision )
@@ -393,6 +393,7 @@
     protected int lastDocId = -1;
     protected int[] docToEntry;
     protected final BytesRefHash hash;
+    protected boolean optimizePackedForSpeed = false;
     
     protected DerefBytesWriterBase(Directory dir, String id, String codecName,
         int codecVersion, Counter bytesUsed, IOContext context)
@@ -502,8 +503,7 @@
     
     protected void writeIndex(IndexOutput idxOut, int docCount,
         long maxValue, int[] addresses, int[] toEntry) throws IOException {
-      final PackedInts.Writer w = PackedInts.getWriter(idxOut, docCount,
-          PackedInts.bitsRequired(maxValue));
+      final PackedInts.Writer w = PackedInts.getWriter(idxOut, docCount, bitsRequired(maxValue));
       final int limit = docCount > docToEntry.length ? docToEntry.length
           : docCount;
       assert toEntry.length >= limit -1;
@@ -526,8 +526,7 @@
     
     protected void writeIndex(IndexOutput idxOut, int docCount,
         long maxValue, long[] addresses, int[] toEntry) throws IOException {
-      final PackedInts.Writer w = PackedInts.getWriter(idxOut, docCount,
-          PackedInts.bitsRequired(maxValue));
+      final PackedInts.Writer w = PackedInts.getWriter(idxOut, docCount, bitsRequired(maxValue));
       final int limit = docCount > docToEntry.length ? docToEntry.length
           : docCount;
       assert toEntry.length >= limit -1;
@@ -548,8 +547,13 @@
       w.finish();
     }
     
+    protected int bitsRequired(long maxValue){
+      return optimizePackedForSpeed ?
+          PackedInts.getNextFixedSize(PackedInts.bitsRequired(maxValue)) : PackedInts.bitsRequired(maxValue);
-  }
-  
+    }
+    
+  }
+  
   static abstract class BytesSortedSourceBase extends SortedSource {
     private final PagedBytes pagedBytes;
     
Index: modules/grouping/src/java/org/apache/lucene/search/grouping/idv/IDVAllGroupsCollector.java
===================================================================
--- modules/grouping/src/java/org/apache/lucene/search/grouping/idv/IDVAllGroupsCollector.java	(revision )
+++ modules/grouping/src/java/org/apache/lucene/search/grouping/idv/IDVAllGroupsCollector.java	(revision )
@@ -0,0 +1,221 @@
+package org.apache.lucene.search.grouping.idv;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.values.IndexDocValues;
+import org.apache.lucene.index.values.ValueType;
+import org.apache.lucene.search.grouping.AbstractAllGroupsCollector;
+import org.apache.lucene.search.grouping.SentinelIntSet;
+import org.apache.lucene.util.BytesRef;
+
+import java.io.IOException;
+import java.util.*;
+
+/**
+ * Implementation of {@link AbstractAllGroupsCollector} that groups documents based on
+ * {@link IndexDocValues} fields.
+ */
+public abstract class IDVAllGroupsCollector<GROUP_VALUE_TYPE> extends AbstractAllGroupsCollector<GROUP_VALUE_TYPE> {
+
+  private static final int DEFAULT_INITIAL_SIZE = 128;
+
+  /**
+   * Expert: Constructs a {@link IDVAllGroupsCollector}.
+   * Selects and constructs the most optimal all groups collector implementation for grouping by {@link IndexDocValues}.
+   * 
+   *
+   * @param groupField  The field to group by
+   * @param type The {@link ValueType} which is used to select a concrete implementation.
+   * @param diskResident Wether the values to group by should be disk resident
+   * @param initialSize The initial allocation size of the
+   *                    internal int set and group list
+   *                    which should roughly match the total
+   *                    number of expected unique groups. Be aware that the
+   *                    heap usage is 4 bytes * initialSize. Not all concrete implementions use this!
+   * @return the most optimal all groups collector implementation for grouping by {@link IndexDocValues}
+   */
+  public static IDVAllGroupsCollector create(String groupField, ValueType type, boolean diskResident, int initialSize) {
+    switch (type) {
+      case VAR_INTS:
+      case FIXED_INTS_8:
+      case FIXED_INTS_16:
+      case FIXED_INTS_32:
+      case FIXED_INTS_64:
+        return new Lng(groupField, diskResident);
+      case FLOAT_32:
+      case FLOAT_64:
+        return new Dbl(groupField, diskResident);
+      case BYTES_FIXED_STRAIGHT:
+      case BYTES_FIXED_DEREF:
+      case BYTES_VAR_STRAIGHT:
+      case BYTES_VAR_DEREF:
+        return new BR(groupField, diskResident);
+      case BYTES_VAR_SORTED:
+      case BYTES_FIXED_SORTED:
+        return new SortedBR(groupField, diskResident, initialSize);
+      default:
+        throw new IllegalArgumentException(String.format("ValueType %s not supported", type));
+    }
+  }
+
+  /**
+   * Constructs a {@link IDVAllGroupsCollector}.
+   * Selects and constructs the most optimal all groups collector implementation for grouping by {@link IndexDocValues}.
+   * If implementations require an initial allocation size then this will be set to 128.
+   *
+   *
+   * @param groupField  The field to group by
+   * @param type The {@link ValueType} which is used to select a concrete implementation.
+   * @param diskResident Wether the values to group by should be disk resident
+   * @return the most optimal all groups collector implementation for grouping by {@link IndexDocValues}
+   */
+  public static IDVAllGroupsCollector create(String groupField, ValueType type, boolean diskResident) {
+    return create(groupField, type, diskResident, DEFAULT_INITIAL_SIZE);
+  }
+
+  final String groupField;
+  final boolean diskResident;
+  final Collection<GROUP_VALUE_TYPE> groups;
+
+  IDVAllGroupsCollector(String groupField, boolean diskResident, Collection<GROUP_VALUE_TYPE> groups) {
+    this.groupField = groupField;
+    this.diskResident = diskResident;
+    this.groups = groups;
+  }
+
+  IndexDocValues.Source getSource(IndexReader ir) throws IOException {
+    return diskResident ? ir.perDocValues().docValues(groupField).getDirectSource() :
+        ir.perDocValues().docValues(groupField).getSource();
+  }
+
+  static class Lng extends IDVAllGroupsCollector<Long> {
+
+    private IndexDocValues.Source source;
+
+    Lng(String groupField, boolean diskResident) {
+      super(groupField, diskResident, new TreeSet<Long>());
+    }
+
+    public void collect(int doc) throws IOException {
+      long value = source.getInt(doc);
+      if (!groups.contains(value)) {
+        groups.add(value);
+      }
+    }
+
+    public Collection<Long> getGroups() {
+      return groups;
+    }
+
+    public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
+      source = getSource(context.reader);
+    }
+
+  }
+
+  static class Dbl extends IDVAllGroupsCollector<Double> {
+
+    private IndexDocValues.Source source;
+
+    Dbl(String groupField, boolean diskResident) {
+      super(groupField, diskResident, new TreeSet<Double>());
+    }
+
+    public void collect(int doc) throws IOException {
+      double value = source.getFloat(doc);
+      if (!groups.contains(value)) {
+        groups.add(value);
+      }
+    }
+
+    public Collection<Double> getGroups() {
+      return groups;
+    }
+
+    public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
+      source = getSource(context.reader);
+    }
+
+  }
+
+  static class BR extends IDVAllGroupsCollector<BytesRef> {
+
+    private final BytesRef spare = new BytesRef();
+
+    private IndexDocValues.Source source;
+
+    BR(String groupField, boolean diskResident) {
+      super(groupField, diskResident, new TreeSet<BytesRef>());
+    }
+
+    public void collect(int doc) throws IOException {
+      BytesRef value = source.getBytes(doc, spare);
+      if (!groups.contains(value)) {
+        groups.add(new BytesRef(value));
+      }
+    }
+
+    public Collection<BytesRef> getGroups() {
+      return groups;
+    }
+
+    public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
+      source = getSource(context.reader);
+    }
+
+  }
+
+  static class SortedBR extends IDVAllGroupsCollector<BytesRef> {
+
+    private final SentinelIntSet ordSet;
+    private final BytesRef spare = new BytesRef();
+
+    private IndexDocValues.SortedSource source;
+
+    SortedBR(String groupField, boolean diskResident, int initialSize) {
+      super(groupField, diskResident, new ArrayList<BytesRef>(initialSize));
+      ordSet = new SentinelIntSet(initialSize, -1);
+    }
+
+    public void collect(int doc) throws IOException {
+      int ord = source.ord(doc);
+      if (!ordSet.exists(ord)) {
+        ordSet.put(ord);
+        BytesRef value = source.getBytes(doc, new BytesRef());
+        groups.add(value);
+      }
+    }
+
+    public Collection<BytesRef> getGroups() {
+      return groups;
+    }
+
+    public void setNextReader(IndexReader.AtomicReaderContext context) throws IOException {
+      source = getSource(context.reader).asSortedSource();
+      ordSet.clear();
+      for (BytesRef countedGroup : groups) {
+        int ord = source.getByValue(countedGroup, spare);
+        if (ord >= 0) {
+          ordSet.put(ord);
+        }
+      }
+    }
+  }
+
+}
