Index: hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java (revision 1577831) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java (working copy) @@ -490,6 +490,7 @@ * LQI's corresponding to the resultant hfiles. * * protected for testing + * @throws IOException */ protected List groupOrSplit(Multimap regionGroups, final LoadQueueItem item, final HTable table, @@ -530,6 +531,30 @@ idx = -(idx + 1) - 1; } final int indexForCallable = idx; + + /** + * we can consider there is a region hole in following conditions. 1) if idx < 0,then first + * region info is lost. 2) if the endkey of a region is not equal to the startkey of the next + * region. 3) if the endkey of the last region is not empty. + */ + if (indexForCallable < 0) { + throw new IOException("The first region info for table " + + Bytes.toString(table.getTableName()) + + " cann't be found in hbase:meta.Please use hbck tool to fix it first."); + } else if ((indexForCallable == startEndKeys.getFirst().length - 1) + && !Bytes.equals(startEndKeys.getSecond()[indexForCallable], HConstants.EMPTY_BYTE_ARRAY)) { + throw new IOException("The last region info for table " + + Bytes.toString(table.getTableName()) + + " cann't be found in hbase:meta.Please use hbck tool to fix it first."); + } else if (indexForCallable + 1 < startEndKeys.getFirst().length + && !(Bytes.compareTo(startEndKeys.getSecond()[indexForCallable], + startEndKeys.getFirst()[indexForCallable + 1]) == 0)) { + throw new IOException("The endkey of one region for table " + + Bytes.toString(table.getTableName()) + + " is not equal to the startkey of the next region in hbase:meta." + + "Please use hbck tool to fix it first."); + } + boolean lastKeyInRange = Bytes.compareTo(last, startEndKeys.getSecond()[idx]) < 0 || Bytes.equals(startEndKeys.getSecond()[idx], HConstants.EMPTY_BYTE_ARRAY); Index: hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java (revision 1577831) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java (working copy) @@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -44,6 +45,9 @@ import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; +import org.apache.hadoop.hbase.catalog.CatalogTracker; +import org.apache.hadoop.hbase.catalog.MetaEditor; +import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; @@ -119,7 +123,7 @@ try { LOG.info("Creating table " + table); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table)); - for (int i = 0; i < 10; i++) { + for (int i = 0; i < cfs; i++) { htd.addFamily(new HColumnDescriptor(family(i))); } @@ -129,6 +133,28 @@ } } + /** + * Creates a table with given table name,specified number of column families
+ * and splitkeys if the table does not already exist. + * @param table + * @param cfs + * @param SPLIT_KEYS + */ + private void setupTableWithSplitkeys(String table, int cfs, byte[][] SPLIT_KEYS) + throws IOException { + try { + LOG.info("Creating table " + table); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table)); + for (int i = 0; i < cfs; i++) { + htd.addFamily(new HColumnDescriptor(family(i))); + } + + util.getHBaseAdmin().createTable(htd, SPLIT_KEYS); + } catch (TableExistsException tee) { + LOG.info("Table " + table + " already exists"); + } + } + private Path buildBulkFiles(String table, int value) throws Exception { Path dir = util.getDataTestDirOnTestFS(table); Path bulk1 = new Path(dir, table+value); @@ -199,7 +225,7 @@ /** * Checks that all columns have the expected value and that there is the * expected number of rows. - * @throws IOException + * @throws IOException */ void assertExpectedTable(String table, int count, int value) throws IOException { HTable t = null; @@ -403,5 +429,60 @@ fail("doBulkLoad should have thrown an exception"); } + @Test + public void testGroupOrSplitWhenRegionHoleExistsInMeta() throws Exception { + String tableName = "testGroupOrSplitWhenRegionHoleExistsInMeta"; + byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("row_00000100") }; + + setupTableWithSplitkeys(tableName, 10, SPLIT_KEYS); + HTable table = new HTable(util.getConfiguration(), Bytes.toBytes(tableName)); + Path dir = buildBulkFiles(tableName, 2); + + final AtomicInteger countedLqis = new AtomicInteger(); + LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration()) { + + protected List + groupOrSplit(Multimap regionGroups, final LoadQueueItem item, + final HTable htable, final Pair startEndKeys) throws IOException { + List lqis = super.groupOrSplit(regionGroups, item, htable, startEndKeys); + if (lqis != null) { + countedLqis.addAndGet(lqis.size()); + } + return lqis; + } + }; + + // do bulkload when there is no region hole in hbase:meta. + try { + loader.doBulkLoad(dir, table); + } catch (Exception e) { + LOG.error("exeception=", e); + } + // check if all the data are loaded into the table. + this.assertExpectedTable(tableName, ROWCOUNT, 2); + + dir = buildBulkFiles(tableName, 3); + + // Mess it up by leaving a hole in the hbase:meta + CatalogTracker ct = new CatalogTracker(util.getConfiguration()); + List regionInfos = MetaReader.getTableRegions(ct, TableName.valueOf(tableName)); + for (HRegionInfo regionInfo : regionInfos) { + if (Bytes.equals(regionInfo.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) { + MetaEditor.deleteRegion(ct, regionInfo); + break; + } + } + + try { + loader.doBulkLoad(dir, table); + } catch (Exception e) { + LOG.error("exeception=", e); + assertTrue("IOException expected", e instanceof IOException); + } + + table.close(); + + this.assertExpectedTable(tableName, ROWCOUNT, 2); + } }