Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1138018) +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy) @@ -2795,12 +2795,47 @@ * @param rootDir Root directory for HBase instance * @param conf * @param hTableDescriptor + * @param hlog shared HLog * @return new HRegion * * @throws IOException */ public static HRegion createHRegion(final HRegionInfo info, final Path rootDir, final Configuration conf, + final HTableDescriptor hTableDescriptor, + final HLog hlog) + throws IOException { + LOG.info("creating HRegion " + info.getTableNameAsString() + + " HTD == " + hTableDescriptor + " RootDir = " + rootDir + + " Table name == " + info.getTableNameAsString()); + + Path tableDir = + HTableDescriptor.getTableDir(rootDir, info.getTableName()); + Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName()); + FileSystem fs = FileSystem.get(conf); + fs.mkdirs(regionDir); + FSUtils.createTableDescriptor(fs, hTableDescriptor, tableDir); + HRegion region = HRegion.newHRegion(tableDir, hlog, fs, conf, info, null); + region.initialize(); + return region; + } + + /** + * Convenience method creating new HRegions. Used by createTable and by the + * bootstrap code in the HMaster constructor. + * Note, this method creates an {@link HLog} for the created region. It + * needs to be closed explicitly. Use {@link HRegion#getLog()} to get + * access. + * @param info Info for region to create. + * @param rootDir Root directory for HBase instance + * @param conf + * @param hTableDescriptor + * @return new HRegion + * + * @throws IOException + */ + public static HRegion createHRegion(final HRegionInfo info, final Path rootDir, + final Configuration conf, final HTableDescriptor hTableDescriptor) throws IOException { LOG.info("creating HRegion " + info.getTableNameAsString() Index: src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java (revision 1138018) +++ src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java (working copy) @@ -64,6 +64,28 @@ } /** + * Adds a META row for each of the specified new regions. + * @param catalogTracker CatalogTracker + * @param regionInfos region information list + * @throws IOException if problem connecting or updating meta + */ + public static void addRegionsToMeta(CatalogTracker catalogTracker, + List regionInfos) + throws IOException { + List puts = new ArrayList(); + for (HRegionInfo regionInfo : regionInfos) { + Put put = new Put(regionInfo.getRegionName()); + put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, + Writables.getBytes(regionInfo)); + puts.add(put); + LOG.debug("Added region " + regionInfo.getRegionNameAsString() + " to META"); + } + catalogTracker.waitForMetaServerConnectionDefault().put( + CatalogTracker.META_REGION, puts); + LOG.info("Added " + puts.size() + " regions to META"); + } + + /** * Offline parent in meta. * Used when splitting. * @param catalogTracker Index: src/main/java/org/apache/hadoop/hbase/master/HMaster.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/HMaster.java (revision 1138018) +++ src/main/java/org/apache/hadoop/hbase/master/HMaster.java (working copy) @@ -1,5 +1,5 @@ /** - * Copyright 2010 The Apache Software Foundation + * Copyright 2011 The Apache Software Foundation * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file @@ -26,6 +26,7 @@ import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; @@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HServerLoad; - import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; @@ -54,8 +54,8 @@ import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.MetaScanner; +import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; -import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType; import org.apache.hadoop.hbase.ipc.HBaseRPC; @@ -75,15 +75,16 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.replication.regionserver.Replication; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.InfoServer; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Sleeper; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.VersionInfo; -import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.zookeeper.ClusterId; import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker; import org.apache.hadoop.hbase.zookeeper.RegionServerTracker; @@ -963,26 +964,44 @@ } storeTableDescriptor(hTableDescriptor); - for (HRegionInfo newRegion : newRegions) { - // 1. Set table enabling flag up in zk. - try { - assignmentManager.getZKTable().setEnabledTable(tableName); - } catch (KeeperException e) { - throw new IOException("Unable to ensure that the table will be" + + // 1. Set table enabling flag up in zk. + try { + assignmentManager.getZKTable().setEnabledTable(tableName); + } catch (KeeperException e) { + throw new IOException("Unable to ensure that the table will be" + " enabled because of a ZooKeeper issue", e); - } + } + HLog hlog = null; + List regionInfos = new ArrayList(); + final int batchSize = 10; + for (int regionIdx = 0; regionIdx < newRegions.length; regionIdx++) { + HRegionInfo newRegion = newRegions[regionIdx]; // 2. Create HRegion - HRegion region = HRegion.createHRegion(newRegion, - fileSystemManager.getRootDir(), conf, hTableDescriptor); + HRegion region; + if (hlog == null) { + region = HRegion.createHRegion(newRegion, + fileSystemManager.getRootDir(), conf, hTableDescriptor); + hlog = region.getLog(); + } else { + region = HRegion.createHRegion(newRegion, + fileSystemManager.getRootDir(), conf, hTableDescriptor, hlog); + } - // 3. Insert into META - MetaEditor.addRegionToMeta(catalogTracker, region.getRegionInfo()); + regionInfos.add(region.getRegionInfo()); + if (regionIdx % batchSize == 0) { + // 3. Insert into META + MetaEditor.addRegionsToMeta(catalogTracker, regionInfos); + regionInfos.clear(); + } // 4. Close the new region to flush to disk. Close log file too. region.close(); - region.getLog().closeAndDelete(); } + hlog.closeAndDelete(); + if (regionInfos.size() > 0) { + MetaEditor.addRegionsToMeta(catalogTracker, regionInfos); + } // 5. Trigger immediate assignment of the regions in round-robin fashion List servers = serverManager.getOnlineServersList();