Index: hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (revision 1460518) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java (working copy) @@ -109,7 +109,7 @@ } // pick an split point (roughly halfway) - byte[] SPLITKEY = new byte[] { (LAST_CHAR-FIRST_CHAR)/2, FIRST_CHAR}; + byte[] SPLITKEY = new byte[] { (LAST_CHAR + FIRST_CHAR)/2, FIRST_CHAR}; /* * Writes HStoreKey and ImmutableBytes data to passed writer and @@ -280,6 +280,9 @@ // Now confirm that I can read from the ref to link HFileScanner sB = hsfB.createReader().getScanner(false, false); sB.seekTo(); + + //count++ as seekTo() will advance the scanner + count++; while (sB.next()) { count++; } @@ -358,18 +361,11 @@ assertTrue(fs.exists(f.getPath())); topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true); bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false); + + assertNull(bottomPath); + top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE, NoOpDataBlockEncoder.INSTANCE).createReader(); - bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE, - NoOpDataBlockEncoder.INSTANCE).createReader(); - bottomScanner = bottom.getScanner(false, false); - int count = 0; - while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) || - bottomScanner.next()) { - count++; - } - // When badkey is < than the bottom, should return no values. - assertTrue(count == 0); // Now read from the top. first = true; topScanner = top.getScanner(false, false); @@ -402,8 +398,7 @@ badmidkey = Bytes.toBytes("|||"); topPath = splitStoreFile(regionFs,topHri, TEST_FAMILY, f, badmidkey, true); bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false); - top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE, - NoOpDataBlockEncoder.INSTANCE).createReader(); + assertNull(topPath); bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE, NoOpDataBlockEncoder.INSTANCE).createReader(); first = true; @@ -426,14 +421,6 @@ for (int i = 0; i < tmp.length(); i++) { assertTrue(Bytes.toString(keyKV.getRow()).charAt(i) == 'z'); } - count = 0; - topScanner = top.getScanner(false, false); - while ((!topScanner.isSeeked() && topScanner.seekTo()) || - (topScanner.isSeeked() && topScanner.next())) { - count++; - } - // When badkey is < than the bottom, should return no values. - assertTrue(count == 0); } finally { if (top != null) { top.close(true); // evict since we are about to delete the file @@ -929,6 +916,9 @@ throws IOException { FileSystem fs = regionFs.getFileSystem(); Path path = regionFs.splitStoreFile(hri, family, sf, splitKey, isTopRef); + if (null == path) { + return null; + } Path regionDir = regionFs.commitDaughterRegion(hri); return new Path(new Path(regionDir, family), path.getName()); } Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java (revision 1462235) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java (working copy) @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.Reference; @@ -519,6 +520,27 @@ */ Path splitStoreFile(final HRegionInfo hri, final String familyName, final StoreFile f, final byte[] splitRow, final boolean top) throws IOException { + + // Check whether the split row lies in the range of the store file + // If it is outside the range, return directly. + if (top) { + //check if larger than last key. + KeyValue splitKey = KeyValue.createFirstOnRow(splitRow); + byte[] lastKey = f.createReader().getLastKey(); + if (f.getReader().getComparator().compare(splitKey.getBuffer(), + splitKey.getKeyOffset(), splitKey.getKeyLength(), lastKey, 0, lastKey.length) > 0) { + return null; + } + } else { + //check if smaller than first key + KeyValue splitKey = KeyValue.createLastOnRow(splitRow); + byte[] firstKey = f.createReader().getFirstKey(); + if (f.getReader().getComparator().compare(splitKey.getBuffer(), + splitKey.getKeyOffset(), splitKey.getKeyLength(), firstKey, 0, firstKey.length) < 0) { + return null; + } + } + Path splitDir = new Path(getSplitsDir(hri), familyName); // A reference to the bottom half of the hsf store file. Reference r =