Index: src/java/org/apache/hadoop/hbase/io/hfile/HFile.java =================================================================== --- src/java/org/apache/hadoop/hbase/io/hfile/HFile.java (revision 813746) +++ src/java/org/apache/hadoop/hbase/io/hfile/HFile.java (working copy) @@ -168,7 +168,7 @@ protected String name; // Total uncompressed bytes, maybe calculate a compression ratio later. - private int totalBytes = 0; + private long totalBytes = 0; // Total # of key/value entries, ie: how many times add() was called. private int entryCount = 0; @@ -320,13 +320,12 @@ */ private void finishBlock() throws IOException { if (this.out == null) return; - long size = releaseCompressingStream(this.out); + int size = releaseCompressingStream(this.out); this.out = null; blockKeys.add(firstKey); - int written = longToInt(size); blockOffsets.add(Long.valueOf(blockBegin)); - blockDataSizes.add(Integer.valueOf(written)); - this.totalBytes += written; + blockDataSizes.add(Integer.valueOf(size)); + this.totalBytes += size; } /* @@ -513,7 +512,7 @@ } if (this.lastKeyBuffer != null) { if (this.comparator.compare(this.lastKeyBuffer, this.lastKeyOffset, - this.lastKeyLength, key, offset, length) > 0) { + this.lastKeyLength, key, offset, length) >= 0) { throw new IOException("Added a key not lexically larger than" + " previous key=" + Bytes.toString(key, offset, length) + ", lastkey=" + Bytes.toString(this.lastKeyBuffer, this.lastKeyOffset, @@ -620,7 +619,7 @@ appendFileInfo(this.fileinfo, FileInfo.AVG_KEY_LEN, Bytes.toBytes(avgKeyLen), false); int avgValueLen = this.entryCount == 0? 0: - (int)(this.keylength/this.entryCount); + (int)(this.valuelength/this.entryCount); appendFileInfo(this.fileinfo, FileInfo.AVG_VALUE_LEN, Bytes.toBytes(avgValueLen), false); appendFileInfo(this.fileinfo, FileInfo.COMPARATOR, @@ -860,7 +859,7 @@ if (trailer.metaIndexCount == 0) { return null; // there are no meta blocks } - if (metaIndex == null) { + if ((metaIndex == null) || (metaIndex.count == 0)) { throw new IOException("Meta index not loaded"); } byte [] mbname = Bytes.toBytes(metaBlockName); @@ -876,6 +875,8 @@ ByteBuffer buf = decompress(metaIndex.blockOffsets[block], longToInt(blockSize), metaIndex.blockDataSizes[block]); + if (buf == null) + return null; byte [] magic = new byte[METABLOCKMAGIC.length]; buf.get(magic, 0, magic.length); @@ -898,7 +899,7 @@ if (blockIndex == null) { throw new IOException("Block index not loaded"); } - if (block < 0 || block > blockIndex.count) { + if (block < 0 || block >= blockIndex.count) { throw new IOException("Requested block is out of range: " + block + ", max: " + blockIndex.count); } @@ -935,6 +936,9 @@ } ByteBuffer buf = decompress(blockIndex.blockOffsets[block], longToInt(onDiskBlockSize), this.blockIndex.blockDataSizes[block]); + if (buf == null) { + throw new IOException("Decompress block failure " + block); + } byte [] magic = new byte[DATABLOCKMAGIC.length]; buf.get(magic, 0, magic.length); @@ -1251,6 +1255,7 @@ block.rewind(); currKeyLen = block.getInt(); currValueLen = block.getInt(); + return true; } currBlock = 0; block = reader.readBlock(currBlock, cacheBlocks);