Index: lucene/src/test/org/apache/lucene/index/TestCodecs.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestCodecs.java (revision 1187902) +++ lucene/src/test/org/apache/lucene/index/TestCodecs.java (revision ) @@ -615,7 +615,7 @@ final int termIndexInterval = _TestUtil.nextInt(random, 13, 27); final SegmentCodecs codecInfo = fieldInfos.buildSegmentCodecs(false); - final SegmentWriteState state = new SegmentWriteState(null, dir, SEGMENT, fieldInfos, 10000, termIndexInterval, codecInfo, null, newIOContext(random)); + final SegmentWriteState state = new SegmentWriteState(null, dir, SEGMENT, fieldInfos, 10000, termIndexInterval, codecInfo, null, newIOContext(random), newIndexWriterConfig(Version.LUCENE_40, new MockAnalyzer(random))); final FieldsConsumer consumer = state.segmentCodecs.codec().fieldsConsumer(state); Arrays.sort(fields); Index: lucene/src/test/org/apache/lucene/index/values/TestDocValues.java =================================================================== --- lucene/src/test/org/apache/lucene/index/values/TestDocValues.java (revision 1187902) +++ lucene/src/test/org/apache/lucene/index/values/TestDocValues.java (revision ) @@ -56,7 +56,7 @@ Directory dir = newDirectory(); final Counter trackBytes = Counter.newCounter(); - Writer w = Bytes.getWriter(dir, "test", mode, fixedSize, COMP, trackBytes, newIOContext(random)); + Writer w = Bytes.getWriter(dir, "test", mode, fixedSize, COMP, trackBytes, newIOContext(random), random.nextBoolean()); int maxDoc = 220; final String[] values = new String[maxDoc]; final int fixedLength = 1 + atLeast(50); Index: lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java =================================================================== --- lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java (revision 1187902) +++ lucene/src/java/org/apache/lucene/index/IndexWriterConfig.java (revision ) @@ -128,6 +128,7 @@ private volatile int readerTermsIndexDivisor; private volatile FlushPolicy flushPolicy; private volatile int perThreadHardLimitMB; + private volatile boolean optimizeBytesDocValuesForSpeed = true; private Version matchVersion; @@ -662,7 +663,7 @@ return this; } - /** + /**TestIndexWriterConfig * Returns the max amount of memory each {@link DocumentsWriterPerThread} can * consume until forcefully flushed. * @see #setRAMPerThreadHardLimitMB(int) @@ -670,7 +671,30 @@ public int getRAMPerThreadHardLimitMB() { return perThreadHardLimitMB; } + /** + * Sets whether indexed docvalues of type bytes should be optimized for speed by rounding up the bytes used for a value + * to either 8, 16, 32 or 64 bytes. A change to this option only effects new documents added to the index. + * + * @param optimizeBytesDocValuesForSpeed whether indexed docvalues should be optimized for speed by + * rounding up the bytes used for a value + * @return this + */ + public IndexWriterConfig setOptimizeBytesDocValuesForSpeed(boolean optimizeBytesDocValuesForSpeed) { + this.optimizeBytesDocValuesForSpeed = optimizeBytesDocValuesForSpeed; + return this; + } + + /** + * @see #setOptimizeBytesDocValuesForSpeed(boolean) + * + * @return whether indexed docvalues of type bytes should be optimized for speed by rounding up the bytes used for a value + */ + public boolean isOptimizeBytesDocValuesForSpeed() { + return optimizeBytesDocValuesForSpeed; + } + + /** * @see #setFlushPolicy(FlushPolicy) */ public FlushPolicy getFlushPolicy() { @@ -701,6 +725,7 @@ sb.append("readerTermsIndexDivisor=").append(readerTermsIndexDivisor).append("\n"); sb.append("flushPolicy=").append(flushPolicy).append("\n"); sb.append("perThreadHardLimitMB=").append(perThreadHardLimitMB).append("\n"); + sb.append("optimizeBytesDocValuesForSpeed=").append(optimizeBytesDocValuesForSpeed).append("\n"); return sb.toString(); } Index: lucene/src/java/org/apache/lucene/index/values/Writer.java =================================================================== --- lucene/src/java/org/apache/lucene/index/values/Writer.java (revision 1187902) +++ lucene/src/java/org/apache/lucene/index/values/Writer.java (revision ) @@ -172,11 +172,12 @@ * the {@link Directory} to create the files from. * @param bytesUsed * a byte-usage tracking reference + * @param optimizeBytesDocValuesForSpeed Whether the space used for DV should be rounded up for higher lookup performance. * @return a new {@link Writer} instance for the given {@link ValueType} * @throws IOException */ public static Writer create(ValueType type, String id, Directory directory, - Comparator comp, Counter bytesUsed, IOContext context) throws IOException { + Comparator comp, Counter bytesUsed, IOContext context, boolean optimizeBytesDocValuesForSpeed) throws IOException { if (comp == null) { comp = BytesRef.getUTF8SortedAsUnicodeComparator(); } @@ -193,22 +194,22 @@ return Floats.getWriter(directory, id, bytesUsed, context, type); case BYTES_FIXED_STRAIGHT: return Bytes.getWriter(directory, id, Bytes.Mode.STRAIGHT, true, comp, - bytesUsed, context); + bytesUsed, context, optimizeBytesDocValuesForSpeed); case BYTES_FIXED_DEREF: return Bytes.getWriter(directory, id, Bytes.Mode.DEREF, true, comp, - bytesUsed, context); + bytesUsed, context, optimizeBytesDocValuesForSpeed); case BYTES_FIXED_SORTED: return Bytes.getWriter(directory, id, Bytes.Mode.SORTED, true, comp, - bytesUsed, context); + bytesUsed, context, optimizeBytesDocValuesForSpeed); case BYTES_VAR_STRAIGHT: return Bytes.getWriter(directory, id, Bytes.Mode.STRAIGHT, false, comp, - bytesUsed, context); + bytesUsed, context, optimizeBytesDocValuesForSpeed); case BYTES_VAR_DEREF: return Bytes.getWriter(directory, id, Bytes.Mode.DEREF, false, comp, - bytesUsed, context); + bytesUsed, context, optimizeBytesDocValuesForSpeed); case BYTES_VAR_SORTED: return Bytes.getWriter(directory, id, Bytes.Mode.SORTED, false, comp, - bytesUsed, context); + bytesUsed, context, optimizeBytesDocValuesForSpeed); default: throw new IllegalArgumentException("Unknown Values: " + type); } Index: lucene/src/java/org/apache/lucene/index/values/FixedSortedBytesImpl.java =================================================================== --- lucene/src/java/org/apache/lucene/index/values/FixedSortedBytesImpl.java (revision 1187902) +++ lucene/src/java/org/apache/lucene/index/values/FixedSortedBytesImpl.java (revision ) @@ -53,9 +53,10 @@ private final Comparator comp; public Writer(Directory dir, String id, Comparator comp, - Counter bytesUsed, IOContext context) throws IOException { + Counter bytesUsed, IOContext context, boolean optimizePackedForSpeed) throws IOException { super(dir, id, CODEC_NAME, VERSION_CURRENT, bytesUsed, context); this.comp = comp; + this.optimizePackedForSpeed = optimizePackedForSpeed; } @Override Index: lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java (revision 1187902) +++ lucene/src/test/org/apache/lucene/index/TestSegmentMerger.java (revision ) @@ -24,6 +24,7 @@ import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.Version; import java.io.IOException; import java.util.Collection; @@ -74,7 +75,8 @@ } public void testMerge() throws IOException { - SegmentMerger merger = new SegmentMerger(mergedDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, mergedSegment, null, null, new FieldInfos(), newIOContext(random)); + SegmentMerger merger = new SegmentMerger(mergedDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, mergedSegment, + null, null, new FieldInfos(), newIOContext(random), newIndexWriterConfig(Version.LUCENE_40, new MockAnalyzer(random))); merger.add(reader1); merger.add(reader2); int docsMerged = merger.merge(); @@ -145,7 +147,8 @@ w.close(); // Assert that SM fails if .del exists - SegmentMerger sm = new SegmentMerger(dir, 1, "a", null, null, null, newIOContext(random)); + SegmentMerger sm = new SegmentMerger(dir, 1, "a", null, null, null, newIOContext(random), + newIndexWriterConfig(Version.LUCENE_40, new MockAnalyzer(random))); try { sm.createCompoundFile("b1", w.segmentInfos.info(0), newIOContext(random)); fail("should not have been able to create a .cfs with .del and .s* files"); Index: lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java =================================================================== --- lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java (revision 1187902) +++ lucene/src/java/org/apache/lucene/index/DocumentsWriterPerThread.java (revision ) @@ -443,7 +443,8 @@ assert deleteSlice == null : "all deletes must be applied in prepareFlush"; flushState = new SegmentWriteState(infoStream, directory, segment, fieldInfos, numDocsInRAM, writer.getConfig().getTermIndexInterval(), - fieldInfos.buildSegmentCodecs(true), pendingDeletes, new IOContext(new FlushInfo(numDocsInRAM, bytesUsed()))); + fieldInfos.buildSegmentCodecs(true), pendingDeletes, new IOContext(new FlushInfo(numDocsInRAM, bytesUsed())), + writer.getConfig()); final double startMBUsed = parent.flushControl.netBytes() / 1024. / 1024.; // Apply delete-by-docID now (delete-byDocID only // happens when an exception is hit processing that @@ -558,7 +559,7 @@ PerDocWriteState newPerDocWriteState(int codecId) { assert segment != null; - return new PerDocWriteState(infoStream, directory, segment, fieldInfos, bytesUsed, codecId, IOContext.DEFAULT); + return new PerDocWriteState(infoStream, directory, segment, fieldInfos, bytesUsed, codecId, IOContext.DEFAULT, writer.getConfig()); } void setInfoStream(PrintStream infoStream) { Index: lucene/src/java/org/apache/lucene/index/values/VarSortedBytesImpl.java =================================================================== --- lucene/src/java/org/apache/lucene/index/values/VarSortedBytesImpl.java (revision 1187902) +++ lucene/src/java/org/apache/lucene/index/values/VarSortedBytesImpl.java (revision ) @@ -54,10 +54,11 @@ private final Comparator comp; public Writer(Directory dir, String id, Comparator comp, - Counter bytesUsed, IOContext context) throws IOException { + Counter bytesUsed, IOContext context, boolean optimizePackedForSpeed) throws IOException { super(dir, id, CODEC_NAME, VERSION_CURRENT, bytesUsed, context); this.comp = comp; size = 0; + this.optimizePackedForSpeed = optimizePackedForSpeed; } @Override public void merge(MergeState mergeState, IndexDocValues[] docValues) @@ -119,8 +120,7 @@ final int[] sortedEntries = hash.sort(comp); // total bytes of data idxOut.writeLong(maxBytes); - PackedInts.Writer offsetWriter = PackedInts.getWriter(idxOut, count+1, - PackedInts.bitsRequired(maxBytes)); + PackedInts.Writer offsetWriter = PackedInts.getWriter(idxOut, count+1, bitsRequired(maxBytes)); // first dump bytes data, recording index & write offset as // we go final BytesRef spare = new BytesRef(); Index: lucene/src/java/org/apache/lucene/index/IndexWriter.java =================================================================== --- lucene/src/java/org/apache/lucene/index/IndexWriter.java (revision 1187902) +++ lucene/src/java/org/apache/lucene/index/IndexWriter.java (revision ) @@ -2567,7 +2567,8 @@ // abortable so that IW.close(false) is able to stop it SegmentMerger merger = new SegmentMerger(directory, config.getTermIndexInterval(), mergedName, null, payloadProcessorProvider, - globalFieldNumberMap.newFieldInfos(SegmentCodecsBuilder.create(codecs)), context); + globalFieldNumberMap.newFieldInfos(SegmentCodecsBuilder.create(codecs)), + context, config); for (IndexReader reader : readers) // add new indexes merger.add(reader); @@ -3633,7 +3634,7 @@ IOContext context = new IOContext(merge.getMergeInfo()); SegmentMerger merger = new SegmentMerger(directory, config.getTermIndexInterval(), mergedName, merge, - payloadProcessorProvider, merge.info.getFieldInfos(), context); + payloadProcessorProvider, merge.info.getFieldInfos(), context, config); if (infoStream != null) { message("merging " + merge.segString(directory) + " mergeVectors=" + merge.info.getFieldInfos().hasVectors()); Index: lucene/src/java/org/apache/lucene/index/PerDocWriteState.java =================================================================== --- lucene/src/java/org/apache/lucene/index/PerDocWriteState.java (revision 1187902) +++ lucene/src/java/org/apache/lucene/index/PerDocWriteState.java (revision ) @@ -37,10 +37,11 @@ public final SegmentCodecs segmentCodecs; public final int codecId; public final IOContext context; + public final IndexWriterConfig config; PerDocWriteState(PrintStream infoStream, Directory directory, String segmentName, FieldInfos fieldInfos, Counter bytesUsed, - int codecId, IOContext context) { + int codecId, IOContext context, IndexWriterConfig config) { this.infoStream = infoStream; this.directory = directory; this.segmentName = segmentName; @@ -49,6 +50,7 @@ this.codecId = codecId; this.bytesUsed = bytesUsed; this.context = context; + this.config = config; } PerDocWriteState(SegmentWriteState state) { @@ -60,6 +62,7 @@ codecId = state.codecId; bytesUsed = Counter.newCounter(); context = state.context; + config = state.config; } PerDocWriteState(PerDocWriteState state, int codecId) { @@ -71,5 +74,6 @@ this.codecId = codecId; this.bytesUsed = state.bytesUsed; this.context = state.context; + this.config = state.config; } } Index: lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java =================================================================== --- lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (revision 1187902) +++ lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java (revision ) @@ -973,6 +973,7 @@ public static IndexWriterConfig newIndexWriterConfig(Random r, Version v, Analyzer a) { IndexWriterConfig c = new IndexWriterConfig(v, a); c.setSimilarityProvider(similarityProvider); + c.setOptimizeBytesDocValuesForSpeed(r.nextBoolean()); if (r.nextBoolean()) { c.setMergeScheduler(new SerialMergeScheduler()); } Index: lucene/src/java/org/apache/lucene/index/SegmentWriteState.java =================================================================== --- lucene/src/java/org/apache/lucene/index/SegmentWriteState.java (revision 1187902) +++ lucene/src/java/org/apache/lucene/index/SegmentWriteState.java (revision ) @@ -54,9 +54,11 @@ public int termIndexInterval; // TODO: this should be private to the codec, not settable here or in IWC public final IOContext context; + public final IndexWriterConfig config; public SegmentWriteState(PrintStream infoStream, Directory directory, String segmentName, FieldInfos fieldInfos, - int numDocs, int termIndexInterval, SegmentCodecs segmentCodecs, BufferedDeletes segDeletes, IOContext context) { + int numDocs, int termIndexInterval, SegmentCodecs segmentCodecs, BufferedDeletes segDeletes, IOContext context, + IndexWriterConfig config) { this.infoStream = infoStream; this.segDeletes = segDeletes; this.directory = directory; @@ -67,6 +69,7 @@ this.segmentCodecs = segmentCodecs; codecId = -1; this.context = context; + this.config = config; } /** @@ -83,5 +86,6 @@ context = state.context; this.codecId = codecId; segDeletes = state.segDeletes; + config = state.config; } } Index: lucene/src/test/org/apache/lucene/index/TestDoc.java =================================================================== --- lucene/src/test/org/apache/lucene/index/TestDoc.java (revision 1187902) +++ lucene/src/test/org/apache/lucene/index/TestDoc.java (revision ) @@ -37,6 +37,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.Version; import org.apache.lucene.util._TestUtil; @@ -197,7 +198,8 @@ SegmentReader r1 = SegmentReader.get(true, si1, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, context); SegmentReader r2 = SegmentReader.get(true, si2, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR, context); - SegmentMerger merger = new SegmentMerger(si1.dir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, merged, null, null, new FieldInfos(), context); + SegmentMerger merger = new SegmentMerger(si1.dir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, + merged, null, null, new FieldInfos(), context, newIndexWriterConfig(Version.LUCENE_40, new MockAnalyzer(random))); merger.add(r1); merger.add(r2); Index: lucene/src/java/org/apache/lucene/index/codecs/DocValuesWriterBase.java =================================================================== --- lucene/src/java/org/apache/lucene/index/codecs/DocValuesWriterBase.java (revision 1187902) +++ lucene/src/java/org/apache/lucene/index/codecs/DocValuesWriterBase.java (revision ) @@ -21,6 +21,7 @@ import java.util.Comparator; import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.PerDocWriteState; import org.apache.lucene.index.values.Writer; import org.apache.lucene.store.Directory; @@ -37,12 +38,14 @@ private final int codecId; private final Counter bytesUsed; private final IOContext context; + private final IndexWriterConfig config; protected DocValuesWriterBase(PerDocWriteState state) { this.segmentName = state.segmentName; this.codecId = state.codecId; this.bytesUsed = state.bytesUsed; this.context = state.context; + this.config = state.config; } protected abstract Directory getDirectory(); @@ -55,7 +58,7 @@ public DocValuesConsumer addValuesField(FieldInfo field) throws IOException { return Writer.create(field.getDocValues(), docValuesId(segmentName, codecId, field.number), - getDirectory(), getComparator(), bytesUsed, context); + getDirectory(), getComparator(), bytesUsed, context, config.isOptimizeBytesDocValuesForSpeed()); } public static String docValuesId(String segmentsName, int codecID, int fieldId) { Index: lucene/src/java/org/apache/lucene/index/SegmentMerger.java =================================================================== --- lucene/src/java/org/apache/lucene/index/SegmentMerger.java (revision 1187902) +++ lucene/src/java/org/apache/lucene/index/SegmentMerger.java (revision ) @@ -72,8 +72,9 @@ private PayloadProcessorProvider payloadProcessorProvider; private IOContext context; + private IndexWriterConfig config; - SegmentMerger(Directory dir, int termIndexInterval, String name, MergePolicy.OneMerge merge, PayloadProcessorProvider payloadProcessorProvider, FieldInfos fieldInfos, IOContext context) { + SegmentMerger(Directory dir, int termIndexInterval, String name, MergePolicy.OneMerge merge, PayloadProcessorProvider payloadProcessorProvider, FieldInfos fieldInfos, IOContext context, IndexWriterConfig config) { this.payloadProcessorProvider = payloadProcessorProvider; directory = dir; segment = name; @@ -90,6 +91,7 @@ } this.termIndexInterval = termIndexInterval; this.context = context; + this.config = config; } public FieldInfos fieldInfos() { @@ -293,7 +295,8 @@ // entering the index. See LUCENE-1282 for // details. throw new RuntimeException("mergeFields produced an invalid result: docCount is " + docCount + " but fdx file size is " + fdxFileLength + " file=" + fileName + " file exists?=" + directory.fileExists(fileName) + "; now aborting this merge to prevent index corruption"); - segmentWriteState = new SegmentWriteState(null, directory, segment, fieldInfos, docCount, termIndexInterval, codecInfo, null, context); + segmentWriteState = new SegmentWriteState(null, directory, segment, fieldInfos, docCount, termIndexInterval, + codecInfo, null, context, config); return docCount; } Index: lucene/src/java/org/apache/lucene/index/values/Bytes.java =================================================================== --- lucene/src/java/org/apache/lucene/index/values/Bytes.java (revision 1187902) +++ lucene/src/java/org/apache/lucene/index/values/Bytes.java (revision ) @@ -32,17 +32,17 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.ByteBlockPool.Allocator; +import org.apache.lucene.util.ByteBlockPool.DirectTrackingAllocator; import org.apache.lucene.util.ByteBlockPool; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefHash.TrackingDirectBytesStartArray; import org.apache.lucene.util.BytesRefHash; import org.apache.lucene.util.CodecUtil; import org.apache.lucene.util.Counter; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.PagedBytes; import org.apache.lucene.util.RamUsageEstimator; -import org.apache.lucene.util.ByteBlockPool.Allocator; -import org.apache.lucene.util.ByteBlockPool.DirectTrackingAllocator; -import org.apache.lucene.util.BytesRefHash.TrackingDirectBytesStartArray; import org.apache.lucene.util.packed.PackedInts; /** @@ -109,13 +109,15 @@ * {@link Writer}. A call to {@link Writer#finish(int)} will release * all internally used resources and frees the memory tracking * reference. + * @param optimizeBytesDocValuesForSpeed Whether the space used for DV should be rounded up for higher lookup performance. * @param context * @return a new {@link Writer} instance * @throws IOException * if the files for the writer can not be created. */ public static Writer getWriter(Directory dir, String id, Mode mode, - boolean fixedSize, Comparator sortComparator, Counter bytesUsed, IOContext context) + boolean fixedSize, Comparator sortComparator, + Counter bytesUsed, IOContext context, boolean optimizeBytesDocValuesForSpeed) throws IOException { // TODO -- i shouldn't have to specify fixed? can // track itself & do the write thing at write time? @@ -129,7 +131,7 @@ } else if (mode == Mode.DEREF) { return new FixedDerefBytesImpl.Writer(dir, id, bytesUsed, context); } else if (mode == Mode.SORTED) { - return new FixedSortedBytesImpl.Writer(dir, id, sortComparator, bytesUsed, context); + return new FixedSortedBytesImpl.Writer(dir, id, sortComparator, bytesUsed, context, optimizeBytesDocValuesForSpeed); } } else { if (mode == Mode.STRAIGHT) { @@ -137,7 +139,7 @@ } else if (mode == Mode.DEREF) { return new VarDerefBytesImpl.Writer(dir, id, bytesUsed, context); } else if (mode == Mode.SORTED) { - return new VarSortedBytesImpl.Writer(dir, id, sortComparator, bytesUsed, context); + return new VarSortedBytesImpl.Writer(dir, id, sortComparator, bytesUsed, context, optimizeBytesDocValuesForSpeed); } } @@ -393,6 +395,7 @@ protected int lastDocId = -1; protected int[] docToEntry; protected final BytesRefHash hash; + protected boolean optimizePackedForSpeed = false; protected long maxBytes = 0; protected DerefBytesWriterBase(Directory dir, String id, String codecName, @@ -506,8 +509,7 @@ protected void writeIndex(IndexOutput idxOut, int docCount, long maxValue, int[] addresses, int[] toEntry) throws IOException { - final PackedInts.Writer w = PackedInts.getWriter(idxOut, docCount, - PackedInts.bitsRequired(maxValue)); + final PackedInts.Writer w = PackedInts.getWriter(idxOut, docCount, bitsRequired(maxValue)); final int limit = docCount > docToEntry.length ? docToEntry.length : docCount; assert toEntry.length >= limit -1; @@ -530,8 +532,7 @@ protected void writeIndex(IndexOutput idxOut, int docCount, long maxValue, long[] addresses, int[] toEntry) throws IOException { - final PackedInts.Writer w = PackedInts.getWriter(idxOut, docCount, - PackedInts.bitsRequired(maxValue)); + final PackedInts.Writer w = PackedInts.getWriter(idxOut, docCount, bitsRequired(maxValue)); final int limit = docCount > docToEntry.length ? docToEntry.length : docCount; assert toEntry.length >= limit -1; @@ -551,9 +552,14 @@ } w.finish(); } - + + protected int bitsRequired(long maxValue){ + return optimizePackedForSpeed ? + PackedInts.getNextFixedSize(PackedInts.bitsRequired(maxValue)) : PackedInts.bitsRequired(maxValue); - } - + } + + } + static abstract class BytesSortedSourceBase extends SortedSource { private final PagedBytes pagedBytes;