Index: src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java (revision 1354813) +++ src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java (working copy) @@ -1092,7 +1092,98 @@ deleteTable(table); } } + + @Test + public void testTwoRegionsOfDisabledTableDeployed() throws Exception { + String table = "testTwoRegionsOfDisabledTableDeployed"; + try { + MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); + assertTrue(cluster.waitForActiveAndReadyMaster()); + // Create a ZKW to use in the test + ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL); + + FileSystem filesystem = FileSystem.get(conf); + Path rootdir = filesystem.makeQualified(new Path(conf + .get(HConstants.HBASE_DIR))); + + byte[][] SPLIT_KEYS = new byte[][] { new byte[0], Bytes.toBytes("aaa"), + Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd") }; + HTableDescriptor htdDisabled = new HTableDescriptor(Bytes.toBytes(table)); + htdDisabled.addFamily(new HColumnDescriptor(FAM)); + + // Write the .tableinfo + FSTableDescriptors + .createTableDescriptor(filesystem, rootdir, htdDisabled); + List disabledRegions = TEST_UTIL.createMultiRegionsInMeta( + TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS); + + // Let's just assign everything to first RS + HRegionServer hrs = cluster.getRegionServer(0); + ServerName serverName = hrs.getServerName(); + + // create region files. + TEST_UTIL.getHBaseAdmin().disableTable(table); + TEST_UTIL.getHBaseAdmin().enableTable(table); + + // Region of disable table was opened on RS + TEST_UTIL.getHBaseAdmin().disableTable(table); + // Make the region [,aaa) to be deployed in one RS + HRegionInfo region = disabledRegions.remove(0); + ZKAssign.createNodeOffline(zkw, region, serverName); + hrs.openRegion(region); + + int iTimes = 0; + while (true) { + RegionTransitionData rtd = ZKAssign.getData(zkw, + region.getEncodedName()); + if (rtd != null && rtd.getEventType() == EventType.RS_ZK_REGION_OPENED) { + break; + } + Thread.sleep(100); + iTimes++; + if (iTimes >= REGION_ONLINE_TIMEOUT) { + break; + } + } + assertTrue(iTimes < REGION_ONLINE_TIMEOUT); + + // Make the region [ddd,) to be deployed in one RS + region = disabledRegions.remove(disabledRegions.size()-1); + ZKAssign.createNodeOffline(zkw, region, serverName); + hrs.openRegion(region); + + iTimes = 0; + while (true) { + RegionTransitionData rtd = ZKAssign.getData(zkw, + region.getEncodedName()); + if (rtd != null && rtd.getEventType() == EventType.RS_ZK_REGION_OPENED) { + break; + } + Thread.sleep(100); + iTimes++; + if (iTimes >= REGION_ONLINE_TIMEOUT) { + break; + } + } + HBaseFsck hbck = doFsck(conf, false); + // We expect there to be 2 errors. ie. 2 regions unwantedly in deployed state. + // But HBCK giving 3 errors. + // [SHOULD_NOT_BE_DEPLOYED, SHOULD_NOT_BE_DEPLOYED, HOLE_IN_REGION_CHAIN] + // The 3rd one coming when the HBCK doing onlineConsistencyRepair. + // That time the regions from table comes as [,aaa) [ddd,) + // This one will be reported as hole. + assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.SHOULD_NOT_BE_DEPLOYED, + ERROR_CODE.SHOULD_NOT_BE_DEPLOYED }); + } finally { + TEST_UTIL.getHBaseAdmin().enableTable(table); + deleteTable(table); + } + } + + @org.junit.Rule public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu = new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();