Index: src/main/java/org/apache/hadoop/hbase/util/Bytes.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/Bytes.java (revision 1596187) +++ src/main/java/org/apache/hadoop/hbase/util/Bytes.java (working copy) @@ -1587,6 +1587,47 @@ return binaryIncrementPos(val, amount); } + /** + * Increment the key to the next key + * @param key the key to increment + * @return a new byte array with the next key or null if the key could not be incremented because + * it's already at its max value. + */ + public static byte[] nextKey(byte[] key) { + byte[] nextStartRow = new byte[key.length]; + System.arraycopy(key, 0, nextStartRow, 0, key.length); + if (!nextKey(nextStartRow, nextStartRow.length)) { + return null; + } + return nextStartRow; + } + + /** + * Increment the key in-place to the next key + * @param key the key to increment + * @param length the length of the key + * @return true if the key can be incremented and false otherwise if the key is at its max value. + */ + public static boolean nextKey(byte[] key, int length) { + return nextKey(key, 0, length); + } + + public static boolean nextKey(byte[] key, int offset, int length) { + if (length == 0) { + return false; + } + int i = offset + length - 1; + while (key[i] == -1) { + key[i] = 0; + i--; + if (i < offset) { + return false; + } + } + key[i] = (byte) (key[i] + 1); + return true; + } + /* increment/deincrement for positive value */ private static byte [] binaryIncrementPos(byte [] value, long amount) { long amo = amount; Index: src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java (revision 1596187) +++ src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java (working copy) @@ -597,14 +597,14 @@ FileSystem fs = p.getFileSystem(getConf()); FileStatus[] dirs = fs.listStatus(p); if (dirs == null) { - LOG.warn("Attempt to adopt ophan hdfs region skipped becuase no files present in " + + LOG.warn("Attempt to adopt orphan hdfs region skipped because no files present in " + p + ". This dir could probably be deleted."); return ; } String tableName = Bytes.toString(hi.getTableName()); TableInfo tableInfo = tablesInfo.get(tableName); - Preconditions.checkNotNull("Table " + tableName + "' not present!", tableInfo); + Preconditions.checkNotNull(tableInfo, "Table " + tableName + "' not present!"); HTableDescriptor template = tableInfo.getHTD(); // find min and max key values @@ -638,6 +638,11 @@ } } + // Start key and end key must not be same except when both are empty + // Otherwise, invalid region hri with same startkey and endkey will be created + if (!Bytes.equals(end, HConstants.EMPTY_END_ROW) && Bytes.equals(start, end)) { + end = Bytes.nextKey(start); + } // expand the range to include the range of all hfiles if (orphanRegionRange == null) { // first range @@ -938,6 +943,8 @@ } } + loadTableInfosForTablesWithNoRegion(); + return tablesInfo; } @@ -1940,6 +1947,8 @@ tablesInfo.put(tableName, modTInfo); } + loadTableInfosForTablesWithNoRegion(); + for (TableInfo tInfo : tablesInfo.values()) { TableIntegrityErrorHandler handler = tInfo.new IntegrityFixSuggester(tInfo, errors); if (!tInfo.checkRegionChain(handler)) { @@ -1949,6 +1958,28 @@ return tablesInfo; } + /** Loads table info's for tables that may not have been included, since there are no + * regions reported for the table, but table dir is there in hdfs + */ + private void loadTableInfosForTablesWithNoRegion() throws IOException { + Configuration conf = getConf(); + Path hbaseRoot = FSUtils.getRootDir(conf); + FileSystem fs = hbaseRoot.getFileSystem(conf); + Map allTables = new FSTableDescriptors(fs, hbaseRoot).getAll(); + for (HTableDescriptor htd : allTables.values()) { + if (checkMetaOnly && !htd.isMetaTable()) { + continue; + } + + String tableName = htd.getNameAsString(); + if (isTableIncluded(tableName) && !tablesInfo.containsKey(tableName)) { + TableInfo tableInfo = new TableInfo(tableName); + tableInfo.htds.add(htd); + tablesInfo.put(tableName, tableInfo); + } + } + } + /** * Merge hdfs data by moving from contained HbckInfo into targetRegionDir. * @return number of file move fixes done to merge regions. @@ -2230,9 +2261,8 @@ "Last region should end with an empty key. Creating a new " + "region and regioninfo in HDFS to plug the hole.", getTableInfo()); HTableDescriptor htd = getTableInfo().getHTD(); - // from curEndKey to EMPTY_START_ROW - HRegionInfo newRegion = new HRegionInfo(htd.getName(), curEndKey, - HConstants.EMPTY_START_ROW); + // from curEndKey to EMPTY_END_ROW + HRegionInfo newRegion = new HRegionInfo(htd.getName(), curEndKey, HConstants.EMPTY_END_ROW); HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); LOG.info("Table region end key was not empty. Created new empty region: " + newRegion @@ -2431,6 +2461,12 @@ byte[] prevKey = null; byte[] problemKey = null; + + if (splits.size() == 0) { + // no region for this table + handler.handleHoleInRegionChain(HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); + } + for (byte[] key : splits) { Collection ranges = regions.get(key); if (prevKey == null && !Bytes.equals(key, HConstants.EMPTY_BYTE_ARRAY)) { Index: src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java (revision 1596187) +++ src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildHole.java (working copy) @@ -87,7 +87,7 @@ assertEquals(1, htbls.length); assertErrors(doFsck(conf, false), new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, - ERROR_CODE.NOT_IN_META_OR_DEPLOYED, }); + ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.HOLE_IN_REGION_CHAIN}); } @org.junit.Rule Index: src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java (revision 1596187) +++ src/test/java/org/apache/hadoop/hbase/util/hbck/TestOfflineMetaRebuildOverlap.java (working copy) @@ -97,7 +97,8 @@ new ERROR_CODE[] { ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, ERROR_CODE.NOT_IN_META_OR_DEPLOYED, - ERROR_CODE.NOT_IN_META_OR_DEPLOYED, }); + ERROR_CODE.NOT_IN_META_OR_DEPLOYED, + ERROR_CODE.HOLE_IN_REGION_CHAIN}); } @org.junit.Rule Index: src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java (revision 1596187) +++ src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java (working copy) @@ -111,6 +111,8 @@ private final static byte[][] ROWKEYS= new byte[][] { Bytes.toBytes("00"), Bytes.toBytes("50"), Bytes.toBytes("A0"), Bytes.toBytes("A5"), Bytes.toBytes("B0"), Bytes.toBytes("B5"), Bytes.toBytes("C0"), Bytes.toBytes("C5") }; + private final static byte[][] SINGLE_ROWKEY = new byte[][] { Bytes.toBytes("00") }; + private final static byte[][] EMPTY_ROWKEY = new byte[][] {}; @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -296,6 +298,11 @@ dumpMeta(htd.getName()); } + //setupTable default with SPLITS + HTable setupTable(String tablename) throws Exception { + return setupTable(tablename, true, ROWKEYS); + } + /** * Setup a clean table before we start mucking with it. * @@ -303,15 +310,19 @@ * @throws InterruptedException * @throws KeeperException */ - HTable setupTable(String tablename) throws Exception { + HTable setupTable(String tablename, boolean withSplits, byte[][] rowkeys) throws Exception { HTableDescriptor desc = new HTableDescriptor(tablename); HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM)); desc.addFamily(hcd); // If a table has no CF's it doesn't get checked - TEST_UTIL.getHBaseAdmin().createTable(desc, SPLITS); + if (withSplits) { + TEST_UTIL.getHBaseAdmin().createTable(desc, SPLITS); + } else { + TEST_UTIL.getHBaseAdmin().createTable(desc); + } tbl = new HTable(TEST_UTIL.getConfiguration(), tablename); List puts = new ArrayList(); - for (byte[] row : ROWKEYS) { + for (byte[] row : rowkeys) { Put p = new Put(row); p.add(FAM, Bytes.toBytes("val"), row); puts.add(p); @@ -871,7 +882,7 @@ /** * This creates and fixes a bad table with a missing region -- hole in meta - * and data present but .regioinfino missing (an orphan hdfs region)in the fs. + * and data present but .regioninfo missing (an orphan hdfs region)in the fs. */ @Test public void testHDFSRegioninfoMissing() throws Exception { @@ -906,6 +917,136 @@ } /** + * This creates and fixes a bad table with single region with .regioninfo missing -- hole in meta + * and data present but .regioninfo missing (an orphan hdfs region)in the fs. + */ + @Test + public void testHDFSRegioninfoMissingInSingleRegion() throws Exception { + String table = name.getMethodName(); + try { + setupTable(table, false, ROWKEYS); + assertEquals(ROWKEYS.length, countRows()); + // Flush the data for HFiles to be available + TEST_UTIL.getHBaseAdmin().flush(table); + // Mess it up by leaving a hole in the meta data + deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes(""), + Bytes.toBytes(""), false, false, false, true); + + HBaseFsck hbck = doFsck(conf, false); + assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.ORPHAN_HDFS_REGION }); + // holes are separate from overlap groups + assertEquals(0, hbck.getOverlapGroups(table).size()); + + // fix hole + doFsck(conf, true); + + // check that hole fixed + assertNoErrors(doFsck(conf, false)); + + assertEquals(ROWKEYS.length, countRows()); + } finally { + deleteTable(table); + } + } + + /** + * This creates and fixes a bad table with single region with .regioninfo missing -- hole in meta + * and no data and .regioninfo missing (an orphan hdfs region)in the fs. + */ + @Test + public void testHDFSRegioninfoMissingSingleRegionNoData() throws Exception { + String table = name.getMethodName(); + try { + setupTable(table, false, EMPTY_ROWKEY); + assertEquals(EMPTY_ROWKEY.length, countRows()); + + // Mess it up by leaving a hole in the meta data + deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes(""), Bytes.toBytes(""), false, + false, false, true); + + HBaseFsck hbck = doFsck(conf, false); + assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.ORPHAN_HDFS_REGION }); + // holes are separate from overlap groups + assertEquals(0, hbck.getOverlapGroups(table).size()); + + // fix hole + doFsck(conf, true); + + // check that hole fixed + assertNoErrors(doFsck(conf, false)); + + assertEquals(EMPTY_ROWKEY.length, countRows()); + } finally { + deleteTable(table); + } + } + + /** + * This creates and fixes a bad table with splits/multiple regions with .regioninfo missing -- + * hole in meta and no data and .regioninfo missing (an orphan hdfs region)in the fs. + */ + @Test + public void testHDFSRegioninfoMissingMultipleRegionNoData() throws Exception { + String table = name.getMethodName(); + try { + setupTable(table, true, EMPTY_ROWKEY); + assertEquals(EMPTY_ROWKEY.length, countRows()); + + // Mess it up by leaving a hole in the meta data + deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("C"), false, + false, false, true); + + HBaseFsck hbck = doFsck(conf, false); + assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.ORPHAN_HDFS_REGION }); + // holes are separate from overlap groups + assertEquals(0, hbck.getOverlapGroups(table).size()); + + // fix hole + doFsck(conf, true); + + // check that hole fixed + assertNoErrors(doFsck(conf, false)); + + assertEquals(EMPTY_ROWKEY.length, countRows()); + } finally { + deleteTable(table); + } + } + + /** + * This creates and fixes a bad table with single region and single keyvalue with .regioninfo missing + * -- hole in meta and data present but .regioninfo missing (an orphan hdfs region)in the fs. + */ + @Test + public void testHDFSRegioninfoMissingSingleRegionSingleKV() throws Exception { + String table = name.getMethodName(); + try { + setupTable(table, false, SINGLE_ROWKEY); + assertEquals(SINGLE_ROWKEY.length, countRows()); + // Flush the data for HFiles to be available + TEST_UTIL.getHBaseAdmin().flush(table); + // Mess it up by leaving a hole in the meta data + deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes(""), Bytes.toBytes(""), false, + false, false, true); + + HBaseFsck hbck = doFsck(conf, false); + assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.ORPHAN_HDFS_REGION }); + // holes are separate from overlap groups + assertEquals(0, hbck.getOverlapGroups(table).size()); + + // fix hole + doFsck(conf, true); + + // check that hole fixed + assertNoErrors(doFsck(conf, false)); + + assertEquals(SINGLE_ROWKEY.length, countRows()); + } finally { + deleteTable(table); + } + } + + /** * This creates and fixes a bad table with a region that is missing meta and * not assigned to a region server. */ @@ -1031,6 +1172,9 @@ deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("C"), Bytes.toBytes(""), false, false, true); // don't rm meta + // also remove the table directory in hdfs + deleteTableDir(table); + HBaseFsck hbck = doFsck(conf, false); assertErrors(hbck, new ERROR_CODE[] {ERROR_CODE.NOT_IN_HDFS, ERROR_CODE.NOT_IN_HDFS, ERROR_CODE.NOT_IN_HDFS, @@ -1047,6 +1191,16 @@ TEST_UTIL.getHBaseAdmin().tableExists(table)); } + public void deleteTableDir(String table) throws IOException { + Path rootDir = new Path(conf.get(HConstants.HBASE_DIR)); + FileSystem fs = rootDir.getFileSystem(conf); + Path p = new Path(rootDir, table); + HBaseFsck.debugLsr(conf, p); + boolean success = fs.delete(p, true); + LOG.info("Deleted " + p + " sucessfully? " + success); + HBaseFsck.debugLsr(conf, p); + } + /** * when the hbase.version file missing, It is fix the fault. */