Index: hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java (revision 1449586) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java (working copy) @@ -185,7 +185,6 @@ LOG.info("Total number of keys: " + keys.size()); for (byte[] key : keys) { assertTrue(key != null); - assertTrue(indexReader != null); HFileBlock b = indexReader.seekToDataBlock(key, 0, key.length, null, true, true, false); if (Bytes.BYTES_RAWCOMPARATOR.compare(key, firstKeyInFile) < 0) { @@ -197,7 +196,7 @@ String keyStr = "key #" + i + ", " + Bytes.toStringBinary(key); assertTrue("seekToDataBlock failed for " + keyStr, b != null); - + assertTrue(b != null); if (prevOffset == b.getOffset()) { assertEquals(++expectedHitCount, brw.hitCount); } else { @@ -435,7 +434,7 @@ /** Checks if the HeapSize calculator is within reason */ @Test - public void testHeapSizeForBlockIndex() throws IOException { + public void testHeapSizeForBlockIndex() { Class cl = HFileBlockIndex.BlockIndexReader.class; long expected = ClassSize.estimateBase(cl, false); @@ -479,7 +478,7 @@ conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, indexBlockSize); Set keyStrSet = new HashSet(); - byte[][] keys = new byte[NUM_KV][]; + byte[][] keyArray = new byte[NUM_KV][]; byte[][] values = new byte[NUM_KV][]; // Write the HFile @@ -491,24 +490,24 @@ .withCompression(compr) .withComparator(KeyValue.KEY_COMPARATOR) .create(); - Random rand = new Random(19231737); + Random random = new Random(19231737); for (int i = 0; i < NUM_KV; ++i) { - byte[] row = TestHFileWriterV2.randomOrderedKey(rand, i); + byte[] row = TestHFileWriterV2.randomOrderedKey(random, i); // Key will be interpreted by KeyValue.KEY_COMPARATOR byte[] k = KeyValue.createFirstOnRow(row, 0, row.length, row, 0, 0, row, 0, 0).getKey(); - byte[] v = TestHFileWriterV2.randomValue(rand); + byte[] v = TestHFileWriterV2.randomValue(random); writer.append(k, v); - keys[i] = k; + keyArray[i] = k; values[i] = v; keyStrSet.add(Bytes.toStringBinary(k)); if (i > 0) { - assertTrue(KeyValue.KEY_COMPARATOR.compare(keys[i - 1], - keys[i]) < 0); + assertTrue(KeyValue.KEY_COMPARATOR.compare(keyArray[i - 1], + keyArray[i]) < 0); } } @@ -520,21 +519,21 @@ assertEquals(expectedNumLevels, reader.getTrailer().getNumDataIndexLevels()); - assertTrue(Bytes.equals(keys[0], reader.getFirstKey())); - assertTrue(Bytes.equals(keys[NUM_KV - 1], reader.getLastKey())); - LOG.info("Last key: " + Bytes.toStringBinary(keys[NUM_KV - 1])); + assertTrue(Bytes.equals(keyArray[0], reader.getFirstKey())); + assertTrue(Bytes.equals(keyArray[NUM_KV - 1], reader.getLastKey())); + LOG.info("Last key: " + Bytes.toStringBinary(keyArray[NUM_KV - 1])); for (boolean pread : new boolean[] { false, true }) { HFileScanner scanner = reader.getScanner(true, pread); for (int i = 0; i < NUM_KV; ++i) { - checkSeekTo(keys, scanner, i); - checkKeyValue("i=" + i, keys[i], values[i], scanner.getKey(), + checkSeekTo(keyArray, scanner, i); + checkKeyValue("i=" + i, keyArray[i], values[i], scanner.getKey(), scanner.getValue()); } assertTrue(scanner.seekTo()); for (int i = NUM_KV - 1; i >= 0; --i) { - checkSeekTo(keys, scanner, i); - checkKeyValue("i=" + i, keys[i], values[i], scanner.getKey(), + checkSeekTo(keyArray, scanner, i); + checkKeyValue("i=" + i, keyArray[i], values[i], scanner.getKey(), scanner.getValue()); } } @@ -587,10 +586,10 @@ } } - private void checkSeekTo(byte[][] keys, HFileScanner scanner, int i) + private void checkSeekTo(byte[][] keyArray, HFileScanner scanner, int i) throws IOException { assertEquals("Failed to seek to key #" + i + " (" - + Bytes.toStringBinary(keys[i]) + ")", 0, scanner.seekTo(keys[i])); + + Bytes.toStringBinary(keyArray[i]) + ")", 0, scanner.seekTo(keyArray[i])); } private void assertArrayEqualsBuffer(String msgPrefix, byte[] arr, Index: hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java (revision 1449586) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java (working copy) @@ -178,12 +178,11 @@ boolean pread, boolean isCompaction) throws IOException { BlockWithScanInfo blockWithScanInfo = loadDataBlockWithScanInfo(key, keyOffset, keyLength, - currentBlock, cacheBlocks, pread, isCompaction); + currentBlock, cacheBlocks, pread, isCompaction); if (blockWithScanInfo == null) { return null; - } else { - return blockWithScanInfo.getHFileBlock(); } + return blockWithScanInfo.getHFileBlock(); } /** @@ -316,9 +315,9 @@ if (rootCount == 0) throw new IOException("HFile empty"); - byte[] midKey = this.midKey.get(); - if (midKey != null) - return midKey; + byte[] targetMidKey = this.midKey.get(); + if (targetMidKey != null) + return targetMidKey; if (midLeafBlockOffset >= 0) { if (cachingBlockReader == null) { @@ -339,14 +338,14 @@ int keyOffset = b.arrayOffset() + Bytes.SIZEOF_INT * (numDataBlocks + 2) + keyRelOffset + SECONDARY_INDEX_ENTRY_OVERHEAD; - midKey = Arrays.copyOfRange(b.array(), keyOffset, keyOffset + keyLen); + targetMidKey = Arrays.copyOfRange(b.array(), keyOffset, keyOffset + keyLen); } else { // The middle of the root-level index. - midKey = blockKeys[rootCount / 2]; + targetMidKey = blockKeys[rootCount / 2]; } - this.midKey.set(midKey); - return midKey; + this.midKey.set(targetMidKey); + return targetMidKey; } /** @@ -416,13 +415,13 @@ * * @param key Last key in the block * @param offset file offset where the block is stored - * @param dataSize the uncompressed data size + * @param dataSize the on-disk size of the root-level block for version 2, or the uncompressed + * size for version 1 */ private void add(final byte[] key, final long offset, final int dataSize) { blockOffsets[rootCount] = offset; blockKeys[rootCount] = key; blockDataSizes[rootCount] = dataSize; - rootCount++; rootByteSize += SECONDARY_INDEX_ENTRY_OVERHEAD + key.length; } @@ -1029,9 +1028,8 @@ } return true; - } else { - return curInlineChunk.getNonRootSize() >= maxChunkSize; } + return curInlineChunk.getNonRootSize() >= maxChunkSize; } /** @@ -1098,15 +1096,15 @@ * Add one index entry to the current leaf-level block. When the leaf-level * block gets large enough, it will be flushed to disk as an inline block. * - * @param firstKey the first key of the data block + * @param targetFirstKey the first key of the data block to be added * @param blockOffset the offset of the data block * @param blockDataSize the on-disk size of the data block ({@link HFile} * format version 2), or the uncompressed size of the data block ( * {@link HFile} format version 1). */ - public void addEntry(byte[] firstKey, long blockOffset, int blockDataSize) + public void addEntry(byte[] targetFirstKey, long blockOffset, int blockDataSize) { - curInlineChunk.add(firstKey, blockOffset, blockDataSize); + curInlineChunk.add(targetFirstKey, blockOffset, blockDataSize); ++totalNumEntries; }