Index: hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java (revision 1510612) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java (working copy) @@ -241,29 +241,17 @@ output.writeLong(loadOnOpenDataOffset); output.writeInt(dataIndexCount); - if (majorVersion == 1) { - // This used to be metaIndexOffset, but it was not used in version 1. - output.writeLong(0); - } else { - output.writeLong(uncompressedDataIndexSize); - } + output.writeLong(uncompressedDataIndexSize); output.writeInt(metaIndexCount); output.writeLong(totalUncompressedBytes); - if (majorVersion == 1) { - output.writeInt((int) Math.min(Integer.MAX_VALUE, entryCount)); - } else { - // This field is long from version 2 onwards. - output.writeLong(entryCount); - } + output.writeLong(entryCount); output.writeInt(compressionCodec.ordinal()); - if (majorVersion > 1) { - output.writeInt(numDataIndexLevels); - output.writeLong(firstDataBlockOffset); - output.writeLong(lastDataBlockOffset); - Bytes.writeStringFixedSize(output, comparatorClassName, MAX_COMPARATOR_NAME_LENGTH); - } + output.writeInt(numDataIndexLevels); + output.writeLong(firstDataBlockOffset); + output.writeLong(lastDataBlockOffset); + Bytes.writeStringFixedSize(output, comparatorClassName, MAX_COMPARATOR_NAME_LENGTH); } /** @@ -354,23 +342,17 @@ fileInfoOffset = input.readLong(); loadOnOpenDataOffset = input.readLong(); dataIndexCount = input.readInt(); - if (majorVersion == 1) { - input.readLong(); // Read and skip metaIndexOffset. - } else { - uncompressedDataIndexSize = input.readLong(); - } + uncompressedDataIndexSize = input.readLong(); metaIndexCount = input.readInt(); totalUncompressedBytes = input.readLong(); - entryCount = majorVersion == 1 ? input.readInt() : input.readLong(); + entryCount = input.readLong(); compressionCodec = Compression.Algorithm.values()[input.readInt()]; - if (majorVersion > 1) { - numDataIndexLevels = input.readInt(); - firstDataBlockOffset = input.readLong(); - lastDataBlockOffset = input.readLong(); - setComparatorClass(getComparatorClass(Bytes.readStringFixedSize(input, + numDataIndexLevels = input.readInt(); + firstDataBlockOffset = input.readLong(); + lastDataBlockOffset = input.readLong(); + setComparatorClass(getComparatorClass(Bytes.readStringFixedSize(input, MAX_COMPARATOR_NAME_LENGTH))); - } } private void append(StringBuilder sb, String s) { @@ -389,13 +371,11 @@ append(sb, "totalUncomressedBytes=" + totalUncompressedBytes); append(sb, "entryCount=" + entryCount); append(sb, "compressionCodec=" + compressionCodec); - if (majorVersion == 2) { - append(sb, "uncompressedDataIndexSize=" + uncompressedDataIndexSize); - append(sb, "numDataIndexLevels=" + numDataIndexLevels); - append(sb, "firstDataBlockOffset=" + firstDataBlockOffset); - append(sb, "lastDataBlockOffset=" + lastDataBlockOffset); - append(sb, "comparatorClassName=" + comparatorClassName); - } + append(sb, "uncompressedDataIndexSize=" + uncompressedDataIndexSize); + append(sb, "numDataIndexLevels=" + numDataIndexLevels); + append(sb, "firstDataBlockOffset=" + firstDataBlockOffset); + append(sb, "lastDataBlockOffset=" + lastDataBlockOffset); + append(sb, "comparatorClassName=" + comparatorClassName); append(sb, "majorVersion=" + majorVersion); append(sb, "minorVersion=" + minorVersion); @@ -516,15 +496,6 @@ } public void setEntryCount(long newEntryCount) { - if (majorVersion == 1) { - int intEntryCount = (int) Math.min(Integer.MAX_VALUE, newEntryCount); - if (intEntryCount != newEntryCount) { - LOG.info("Warning: entry count is " + newEntryCount + " but writing " - + intEntryCount + " into the version " + majorVersion + " trailer"); - } - entryCount = intEntryCount; - return; - } entryCount = newEntryCount; } @@ -626,8 +597,6 @@ } public long getUncompressedDataIndexSize() { - if (majorVersion == 1) - return 0; return uncompressedDataIndexSize; } Index: hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (revision 1510612) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java (working copy) @@ -62,7 +62,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HFileProtos; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; -import org.apache.hadoop.hbase.regionserver.StoreFile.WriterBuilder; import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ChecksumType; @@ -153,7 +152,7 @@ Compression.Algorithm.NONE; /** Minimum supported HFile format version */ - public static final int MIN_FORMAT_VERSION = 1; + public static final int MIN_FORMAT_VERSION = 2; /** Maximum supported HFile format version */ Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java (revision 1510612) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java (working copy) @@ -197,35 +197,12 @@ MAX_ALLOWED_FOLD_FACTOR); // Do we support compound bloom filters? - if (HFile.getFormatVersion(conf) > HFile.MIN_FORMAT_VERSION) { - // In case of compound Bloom filters we ignore the maxKeys hint. - CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter( - getBloomBlockSize(conf), err, Hash.getHashType(conf), maxFold, - cacheConf.shouldCacheBloomsOnWrite(), bloomType == BloomType.ROWCOL - ? KeyValue.KEY_COMPARATOR : Bytes.BYTES_RAWCOMPARATOR); - writer.addInlineBlockWriter(bloomWriter); - return bloomWriter; - } else { - // A single-block Bloom filter. Only used when testing HFile format - // version 1. - int tooBig = conf.getInt(IO_STOREFILE_BLOOM_MAX_KEYS, - 128 * 1000 * 1000); - - if (maxKeys <= 0) { - LOG.warn("Invalid maximum number of keys specified: " + maxKeys - + ", not using Bloom filter"); - return null; - } else if (maxKeys < tooBig) { - BloomFilterWriter bloom = new ByteBloomFilter((int) maxKeys, err, - Hash.getHashType(conf), maxFold); - bloom.allocBloom(); - return bloom; - } else { - LOG.debug("Skipping bloom filter because max keysize too large: " - + maxKeys); - } - } - return null; + // In case of compound Bloom filters we ignore the maxKeys hint. + CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf), + err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(), + bloomType == BloomType.ROWCOL ? KeyValue.KEY_COMPARATOR : Bytes.BYTES_RAWCOMPARATOR); + writer.addInlineBlockWriter(bloomWriter); + return bloomWriter; } /** @@ -250,18 +227,12 @@ float err = getErrorRate(conf); - if (HFile.getFormatVersion(conf) > HFile.MIN_FORMAT_VERSION) { - int maxFold = getMaxFold(conf); - // In case of compound Bloom filters we ignore the maxKeys hint. - CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter( - getBloomBlockSize(conf), err, Hash.getHashType(conf), - maxFold, - cacheConf.shouldCacheBloomsOnWrite(), Bytes.BYTES_RAWCOMPARATOR); - writer.addInlineBlockWriter(bloomWriter); - return bloomWriter; - } else { - LOG.info("Delete Family Bloom filter is not supported in HFile V1"); - return null; - } + int maxFold = getMaxFold(conf); + // In case of compound Bloom filters we ignore the maxKeys hint. + CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf), + err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(), + Bytes.BYTES_RAWCOMPARATOR); + writer.addInlineBlockWriter(bloomWriter); + return bloomWriter; } }; Index: hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java (revision 1510612) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestFixedFileTrailer.java (working copy) @@ -50,8 +50,11 @@ private static final Log LOG = LogFactory.getLog(TestFixedFileTrailer.class); - /** The number of used fields by version. Indexed by version minus one. */ - private static final int[] NUM_FIELDS_BY_VERSION = new int[] { 9, 14 }; + /** + * The number of used fields by version. Indexed by version minus two. + * Min version that we support is V2 + */ + private static final int[] NUM_FIELDS_BY_VERSION = new int[] { 14 }; private HBaseTestingUtility util = new HBaseTestingUtility(); private FileSystem fs; @@ -87,18 +90,12 @@ t.setDataIndexCount(3); t.setEntryCount(((long) Integer.MAX_VALUE) + 1); - if (version == 1) { - t.setFileInfoOffset(876); - } + t.setLastDataBlockOffset(291); + t.setNumDataIndexLevels(3); + t.setComparatorClass(KeyValue.KEY_COMPARATOR.getClass()); + t.setFirstDataBlockOffset(9081723123L); // Completely unrealistic. + t.setUncompressedDataIndexSize(827398717L); // Something random. - if (version == 2) { - t.setLastDataBlockOffset(291); - t.setNumDataIndexLevels(3); - t.setComparatorClass(KeyValue.KEY_COMPARATOR.getClass()); - t.setFirstDataBlockOffset(9081723123L); // Completely unrealistic. - t.setUncompressedDataIndexSize(827398717L); // Something random. - } - t.setLoadOnOpenOffset(128); t.setMetaIndexCount(7); @@ -162,7 +159,7 @@ String trailerStr = t.toString(); assertEquals("Invalid number of fields in the string representation " - + "of the trailer: " + trailerStr, NUM_FIELDS_BY_VERSION[version - 1], + + "of the trailer: " + trailerStr, NUM_FIELDS_BY_VERSION[version - 2], trailerStr.split(", ").length); assertEquals(trailerStr, t4.toString()); }