diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index c066803..70f9132 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -926,8 +926,9 @@ LOG.info("Min max keys are : [" + Bytes.toString(orphanRegionRange.getFirst()) + ", " + Bytes.toString(orphanRegionRange.getSecond()) + ")"); - // create new region on hdfs. move data into place. - HRegionInfo hri = new HRegionInfo(template.getTableName(), orphanRegionRange.getFirst(), orphanRegionRange.getSecond()); + // create new region on hdfs. move data into place. + HRegionInfo hri = new HRegionInfo(template.getTableName(), orphanRegionRange.getFirst(), + Bytes.add(orphanRegionRange.getSecond(), new byte[1])); LOG.info("Creating new region : " + hri); HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), hri, template); Path target = region.getRegionFileSystem().getRegionDir(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java index bbb6b53..742cc0b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java @@ -307,6 +307,19 @@ } /** + * Counts the number of row to verify data loss or non-dataloss. + */ + int countRows(byte[] start, byte[] end) throws IOException { + Scan s = new Scan(start, end); + ResultScanner rs = tbl.getScanner(s); + int i = 0; + while (rs.next() != null) { + i++; + } + return i; + } + + /** * delete table in preparation for next test * * @param tablename diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java index 21935f3..056c97e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java @@ -1576,6 +1576,52 @@ } /** + * This creates and fixes a bad table with a missing region -- hole in meta and data present but + * .regioinfino missing (an orphan hdfs region)in the fs. At last we check every row was present + * at the correct region. + */ + @Test(timeout = 180000) + public void testHDFSRegioninfoMissingAndCheckRegionBoundary() throws Exception { + TableName table = TableName.valueOf("tableHDFSRegioninfoMissing"); + try { + setupTable(table); + assertEquals(ROWKEYS.length, countRows()); + + // Mess it up by leaving a hole in the meta data + admin.disableTable(table); + deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), true, + true, false, true, HRegionInfo.DEFAULT_REPLICA_ID); + admin.enableTable(table); + + HBaseFsck hbck = doFsck(conf, false); + assertErrors(hbck, + new HBaseFsck.ErrorReporter.ERROR_CODE[] { + HBaseFsck.ErrorReporter.ERROR_CODE.ORPHAN_HDFS_REGION, + HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED, + HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN }); + // holes are separate from overlap groups + assertEquals(0, hbck.getOverlapGroups(table).size()); + + // fix hole + doFsck(conf, true); + + // check that hole fixed + assertNoErrors(doFsck(conf, false)); + + // check data belong to the correct region,every scan should get one row. + for (int i = 0; i < ROWKEYS.length; i++) { + if (i != ROWKEYS.length - 1) { + assertEquals(1, countRows(ROWKEYS[i], ROWKEYS[i + 1])); + } else { + assertEquals(1, countRows(ROWKEYS[i], null)); + } + } + + } finally { + cleanupTable(table); + } + } + /** * This creates and fixes a bad table with a region that is missing meta and * not assigned to a region server. */