Index: src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java (revision 1136656) +++ src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java (working copy) @@ -173,7 +173,7 @@ throws IOException { HRegion r = new HRegion(closedRegion.getTableDir(), closedRegion.getLog(), closedRegion.getFilesystem(), closedRegion.getConf(), - closedRegion.getRegionInfo(), null); + closedRegion.getRegionInfo(), closedRegion.getTableDesc(), null); r.initialize(); return r; } Index: src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java (revision 1136656) +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java (working copy) @@ -154,7 +154,7 @@ for (HRegion r: daughters) { // Open so can count its content. HRegion openRegion = HRegion.openHRegion(this.testdir, r.getRegionInfo(), - r.getLog(), r.getConf()); + r.getTableDesc(), r.getLog(), r.getConf()); try { int count = countRows(openRegion); assertTrue(count > 0 && count != rowcount); @@ -209,7 +209,7 @@ for (HRegion r: daughters) { // Open so can count its content. HRegion openRegion = HRegion.openHRegion(this.testdir, r.getRegionInfo(), - r.getLog(), r.getConf()); + r.getTableDesc(), r.getLog(), r.getConf()); try { int count = countRows(openRegion); assertTrue(count > 0 && count != rowcount); @@ -254,6 +254,7 @@ htd.addFamily(hcd); HRegionInfo hri = new HRegionInfo(htd.getName(), STARTROW, ENDROW); HRegion.createHRegion(hri, testdir, TEST_UTIL.getConfiguration(), htd); - return HRegion.openHRegion(testdir, hri, wal, TEST_UTIL.getConfiguration()); + return HRegion.openHRegion(testdir, hri, htd, wal, + TEST_UTIL.getConfiguration()); } } \ No newline at end of file Index: src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java (revision 1136656) +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java (working copy) @@ -87,7 +87,7 @@ HLog hlog = new HLog(fs, logdir, oldLogDir, conf); HRegion.createHRegion(info, basedir, conf, htd); Path tableDir = new Path(basedir, Bytes.toString(htd.getName())); - HRegion region = new HRegion(tableDir, hlog, fs, conf, info, null); + HRegion region = new HRegion(tableDir, hlog, fs, conf, info, htd, null); store = new Store(basedir, region, hcd, fs, conf); TEST_FILE = StoreFile.getRandomFilename(fs, store.getHomedir()); Index: src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java (revision 1136656) +++ src/test/java/org/apache/hadoop/hbase/regionserver/handler/TestOpenRegionHandler.java (working copy) @@ -238,7 +238,7 @@ HRegion region = HRegion.createHRegion(hri, HBaseTestingUtility.getTestDir(), HTU .getConfiguration(), htd); - OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri) { + OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri, htd) { HRegion openRegion() { // Open region first, then remove znode as though it'd been hijacked. //HRegion region = super.openRegion(); Index: src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java (revision 1136656) +++ src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java (working copy) @@ -165,7 +165,7 @@ wal3.setSequenceNumber(wal2.getSequenceNumber()); try { final HRegion region = new HRegion(basedir, wal3, this.fs, this.conf, hri, - null); + htd, null); long seqid = region.initialize(); assertTrue(seqid > wal3.getSequenceNumber()); @@ -193,11 +193,11 @@ final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableNameStr); final Path basedir = new Path(this.hbaseRootDir, tableNameStr); deleteDir(basedir); - HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr); + final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr); HRegion region2 = HRegion.createHRegion(hri, hbaseRootDir, this.conf, htd); HLog wal = createWAL(this.conf); - HRegion region = HRegion.openHRegion(hri, wal, this.conf); + HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf); Path f = new Path(basedir, "hfile"); HFile.Writer writer = new HFile.Writer(this.fs, f); byte [] family = htd.getFamilies().iterator().next().getName(); @@ -218,7 +218,7 @@ runWALSplit(newConf); HLog wal2 = createWAL(newConf); HRegion region2 = new HRegion(basedir, wal2, FileSystem.get(newConf), - newConf, hri, null); + newConf, hri, htd, null); long seqid2 = region2.initialize(); assertTrue(seqid2 > -1); @@ -257,7 +257,7 @@ // of the families during the load of edits so its seqid is not same as // others to test we do right thing when different seqids. HLog wal = createWAL(this.conf); - HRegion region = new HRegion(basedir, wal, this.fs, this.conf, hri, null); + HRegion region = new HRegion(basedir, wal, this.fs, this.conf, hri, htd, null); long seqid = region.initialize(); // HRegionServer usually does this. It knows the largest seqid across all regions. wal.setSequenceNumber(seqid); @@ -282,7 +282,7 @@ wal.close(); runWALSplit(this.conf); HLog wal2 = createWAL(this.conf); - HRegion region2 = new HRegion(basedir, wal2, this.fs, this.conf, hri, null) { + HRegion region2 = new HRegion(basedir, wal2, this.fs, this.conf, hri, htd, null) { @Override protected boolean restoreEdit(Store s, KeyValue kv) { super.restoreEdit(s, kv); @@ -317,7 +317,7 @@ // Make a new wal for new region open. HLog wal3 = createWAL(newConf); final AtomicInteger countOfRestoredEdits = new AtomicInteger(0); - HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, null) { + HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) { @Override protected boolean restoreEdit(Store s, KeyValue kv) { boolean b = super.restoreEdit(s, kv); @@ -409,7 +409,7 @@ final AtomicInteger flushcount = new AtomicInteger(0); try { final HRegion region = - new HRegion(basedir, newWal, newFS, newConf, hri, null) { + new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) { protected boolean internalFlushcache( final HLog wal, final long myseqid, MonitoredTask status) throws IOException { Index: src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (revision 1136656) +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java (working copy) @@ -56,7 +56,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; -import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; import org.mockito.Mockito; @@ -129,7 +128,7 @@ htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); HLog hlog = new HLog(fs, logdir, oldLogDir, conf); - HRegion region = new HRegion(basedir, hlog, fs, conf, info, null); + HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null); store = new Store(basedir, region, hcd, fs, conf); } Index: src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java (revision 1136656) +++ src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java (working copy) @@ -183,7 +183,7 @@ //HRegionInfo info = new HRegionInfo(tableName, null, null, false); HRegion r = new HRegion(closedRegion.getTableDir(), closedRegion.getLog(), closedRegion.getFilesystem(), closedRegion.getConf(), - closedRegion.getRegionInfo(), null); + closedRegion.getRegionInfo(), closedRegion.getTableDesc(), null); r.initialize(); // this following piece is a hack. currently a coprocessorHost Index: src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java (revision 1136656) +++ src/test/java/org/apache/hadoop/hbase/coprocessor/TestWALObserver.java (working copy) @@ -273,9 +273,9 @@ // Make a new wal for new region open. HLog wal2 = createWAL(newConf); Path tableDir = - HTableDescriptor.getTableDir(hbaseRootDir, hri.getTableName()); + HTableDescriptor.getTableDir(hbaseRootDir, hri.getTableName()); HRegion region = new HRegion(tableDir, wal2, FileSystem.get(newConf), - newConf, hri, TEST_UTIL.getHBaseCluster().getRegionServer(0)); + newConf, hri, htd, TEST_UTIL.getHBaseCluster().getRegionServer(0)); long seqid2 = region.initialize(); SampleRegionWALObserver cp2 = Index: src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java (revision 1136656) +++ src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java (working copy) @@ -190,7 +190,7 @@ // Now verify that we can read all the rows from regions 0, 1 // in the new merged region. - HRegion merged = HRegion.openHRegion(mergedInfo, log, this.conf); + HRegion merged = HRegion.openHRegion(mergedInfo, this.desc, log, this.conf); verifyMerge(merged, upperbound); merged.close(); LOG.info("Verified " + msg); Index: src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (revision 1136656) +++ src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (working copy) @@ -567,7 +567,7 @@ this.splitdir, hri); HRegion r = HRegion.newHRegion(this.parent.getTableDir(), this.parent.getLog(), fs, this.parent.getConf(), - hri, rsServices); + hri, this.parent.getTableDesc(), rsServices); r.readRequestsCount.set(this.parent.getReadRequestsCount() / 2); r.writeRequestsCount.set(this.parent.getWriteRequestsCount() / 2); HRegion.moveInitialFilesIntoPlace(fs, regionDir, r.getRegionDir()); Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 1136686) +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (working copy) @@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HServerInfo; import org.apache.hadoop.hbase.HServerLoad; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MasterAddressTracker; import org.apache.hadoop.hbase.NotServingRegionException; @@ -2268,15 +2269,47 @@ LOG.info("Received request to open region: " + region.getRegionNameAsString()); if (this.stopped) throw new RegionServerStoppedException(); + HTableDescriptor htd = getHTableDescriptor(region.getTableName()); if (region.isRootRegion()) { - this.service.submit(new OpenRootHandler(this, this, region)); + this.service.submit(new OpenRootHandler(this, this, region, htd)); } else if(region.isMetaRegion()) { - this.service.submit(new OpenMetaHandler(this, this, region)); + this.service.submit(new OpenMetaHandler(this, this, region, htd)); } else { - this.service.submit(new OpenRegionHandler(this, this, region)); + this.service.submit(new OpenRegionHandler(this, this, region, htd)); } } + // Keep cache of htds so we can avoid going to fs every time. Have entries + // expire after ten seconds if not used so we don't miss schema edits. + private final TreeMap cacheOfHTDs = + new TreeMap(Bytes.BYTES_COMPARATOR); + private static final byte [] CACHE_OF_HTDS_INSERTION_TIME = + Bytes.toBytes("CACHE_OF_HTDS_INSERTION_TIME"); + + synchronized HTableDescriptor getHTableDescriptor(final byte [] tablename) + throws IOException { + // Look in cache of descriptors. + // TODO: Break out this code and make unit test. + HTableDescriptor htd = this.cacheOfHTDs.get(tablename); + long now = System.currentTimeMillis(); + if (htd != null) { + byte [] bytes = htd.getValue(CACHE_OF_HTDS_INSERTION_TIME); + if (bytes != null) { + long insertionTime = Bytes.toLong(bytes); + long diff = now - insertionTime; + // If descriptor is < 10 seconds old, then use it, else reget. + if (diff < (10 * 1000)) { + htd.setValue(CACHE_OF_HTDS_INSERTION_TIME, Bytes.toBytes(now)); + return htd; + } + } + } + htd = FSUtils.getTableDescriptor(this.fs, this.rootDir, tablename); + this.cacheOfHTDs.put(tablename, htd); + htd.setValue(CACHE_OF_HTDS_INSERTION_TIME, Bytes.toBytes(now)); + return htd; + } + @Override @QosPriority(priority=HIGH_QOS) public void openRegions(List regions) Index: src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java (revision 1136656) +++ src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java (working copy) @@ -26,6 +26,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -45,6 +46,7 @@ private final RegionServerServices rsServices; private final HRegionInfo regionInfo; + private final HTableDescriptor htd; // We get version of our znode at start of open process and monitor it across // the total open. We'll fail the open if someone hijacks our znode; we can @@ -52,16 +54,18 @@ private volatile int version = -1; public OpenRegionHandler(final Server server, - final RegionServerServices rsServices, HRegionInfo regionInfo) { - this(server, rsServices, regionInfo, EventType.M_RS_OPEN_REGION); + final RegionServerServices rsServices, HRegionInfo regionInfo, + HTableDescriptor htd) { + this (server, rsServices, regionInfo, htd, EventType.M_RS_OPEN_REGION); } protected OpenRegionHandler(final Server server, final RegionServerServices rsServices, final HRegionInfo regionInfo, - EventType eventType) { + final HTableDescriptor htd, EventType eventType) { super(server, eventType); this.rsServices = rsServices; this.regionInfo = regionInfo; + this.htd = htd; } public HRegionInfo getRegionInfo() { @@ -184,7 +188,7 @@ // Was there an exception opening the region? This should trigger on // InterruptedException too. If so, we failed. - return !t.interrupted() && t.getException() == null; + return !Thread.interrupted() && t.getException() == null; } /** @@ -269,8 +273,9 @@ try { // Instantiate the region. This also periodically tickles our zk OPENING // state so master doesn't timeout this region in transition. - region = HRegion.openHRegion(tableDir, this.regionInfo, this.rsServices.getWAL(), - this.server.getConfiguration(), this.rsServices, + region = HRegion.openHRegion(tableDir, this.regionInfo, this.htd, + this.rsServices.getWAL(), this.server.getConfiguration(), + this.rsServices, new CancelableProgressable() { public boolean progress() { // We may lose the znode ownership during the open. Currently its @@ -296,8 +301,9 @@ try { // Instantiate the region. This also periodically tickles our zk OPENING // state so master doesn't timeout this region in transition. - region = HRegion.openHRegion(this.regionInfo, this.rsServices.getWAL(), - this.server.getConfiguration(), this.rsServices, + region = HRegion.openHRegion(this.regionInfo, this.htd, + this.rsServices.getWAL(), this.server.getConfiguration(), + this.rsServices, new CancelableProgressable() { public boolean progress() { // We may lose the znode ownership during the open. Currently its @@ -375,4 +381,4 @@ private boolean isGoodVersion() { return this.version != -1; } -} +} \ No newline at end of file Index: src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java (revision 1136656) +++ src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRootHandler.java (working copy) @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver.handler; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.regionserver.RegionServerServices; @@ -30,7 +31,8 @@ */ public class OpenRootHandler extends OpenRegionHandler { public OpenRootHandler(final Server server, - final RegionServerServices rsServices, HRegionInfo regionInfo) { - super(server, rsServices, regionInfo, EventType.M_RS_OPEN_ROOT); + final RegionServerServices rsServices, HRegionInfo regionInfo, + final HTableDescriptor htd) { + super(server, rsServices, regionInfo, htd, EventType.M_RS_OPEN_ROOT); } } Index: src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java (revision 1136656) +++ src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenMetaHandler.java (working copy) @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver.handler; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.regionserver.RegionServerServices; @@ -30,7 +31,8 @@ */ public class OpenMetaHandler extends OpenRegionHandler { public OpenMetaHandler(final Server server, - final RegionServerServices rsServices, HRegionInfo regionInfo) { - super(server,rsServices, regionInfo, EventType.M_RS_OPEN_META); + final RegionServerServices rsServices, HRegionInfo regionInfo, + final HTableDescriptor htd) { + super(server,rsServices, regionInfo, htd, EventType.M_RS_OPEN_META); } -} +} \ No newline at end of file Index: src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (revision 1136656) +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (working copy) @@ -199,8 +199,6 @@ final Path regiondir; KeyValue.KVComparator comparator; - private Pair lastCompactInfo = null; - /* * Data structure of write state flags used coordinating flushes, * compactions and closes. @@ -282,6 +280,7 @@ this.log = null; this.regiondir = null; this.regionInfo = null; + this.htableDescriptor = null; this.threadWakeFrequency = 0L; this.coprocessorHost = null; } @@ -310,26 +309,22 @@ * @see HRegion#newHRegion(Path, HLog, FileSystem, Configuration, org.apache.hadoop.hbase.HRegionInfo, FlushRequester) */ public HRegion(Path tableDir, HLog log, FileSystem fs, Configuration conf, - HRegionInfo regionInfo, RegionServerServices rsServices) { + HRegionInfo regionInfo, final HTableDescriptor htd, + RegionServerServices rsServices) { this.tableDir = tableDir; this.comparator = regionInfo.getComparator(); this.log = log; this.fs = fs; this.conf = conf; this.regionInfo = regionInfo; + this.htableDescriptor = htd; this.rsServices = rsServices; this.threadWakeFrequency = conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000); String encodedNameStr = this.regionInfo.getEncodedName(); + setHTableSpecificConf(); this.regiondir = getRegionDir(this.tableDir, encodedNameStr); - try { - LOG.info("Setting table desc from HDFS. Region = " - + this.regionInfo.getTableNameAsString()); - loadHTableDescriptor(tableDir); - LOG.info(" This HTD from HDFS == " + this.htableDescriptor); - } catch (IOException ioe) { - LOG.error("Could not instantiate region as error loading HTableDescriptor"); - } + // don't initialize coprocessors if not running within a regionserver // TODO: revisit if coprocessors should load in other cases if (rsServices != null) { @@ -341,40 +336,19 @@ } } - private void loadHTableDescriptor(Path tableDir) throws IOException { - LOG.debug("Assigning tabledesc from .tableinfo for region = " - + this.regionInfo.getRegionNameAsString()); - // load HTableDescriptor - this.htableDescriptor = FSUtils.getTableDescriptor(tableDir, fs); - - if (this.htableDescriptor != null) { - setHTableSpecificConf(); - } else { - throw new IOException("Table description missing in " + - ".tableinfo. Cannot create new region." - + " current region is == " + this.regionInfo.toString()); + void setHTableSpecificConf() { + if (this.htableDescriptor == null) return; + LOG.info("Setting up tabledescriptor config now ..."); + long flushSize = this.htableDescriptor.getMemStoreFlushSize(); + if (flushSize == HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE) { + flushSize = conf.getLong("hbase.hregion.memstore.flush.size", + HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE); } - + this.memstoreFlushSize = flushSize; + this.blockingMemStoreSize = this.memstoreFlushSize * + conf.getLong("hbase.hregion.memstore.block.multiplier", 2); } - private void setHTableSpecificConf() { - if (this.htableDescriptor != null) { - LOG.info("Setting up tabledescriptor config now ..."); - long flushSize = this.htableDescriptor.getMemStoreFlushSize(); - if (flushSize == HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE) { - flushSize = conf.getLong("hbase.hregion.memstore.flush.size", - HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE); - } - this.memstoreFlushSize = flushSize; - this.blockingMemStoreSize = this.memstoreFlushSize * - conf.getLong("hbase.hregion.memstore.block.multiplier", 2); - } - } - - public void setHtableDescriptor(HTableDescriptor htableDescriptor) { - this.htableDescriptor = htableDescriptor; - } - /** * Initialize this region. * @return What the next sequence (edit) id should be. @@ -2763,11 +2737,12 @@ * @param conf is global configuration settings. * @param regionInfo - HRegionInfo that describes the region * is new), then read them from the supplied path. + * @param htd * @param rsServices * @return the new instance */ public static HRegion newHRegion(Path tableDir, HLog log, FileSystem fs, - Configuration conf, HRegionInfo regionInfo, + Configuration conf, HRegionInfo regionInfo, final HTableDescriptor htd, RegionServerServices rsServices) { try { @SuppressWarnings("unchecked") @@ -2776,9 +2751,10 @@ Constructor c = regionClass.getConstructor(Path.class, HLog.class, FileSystem.class, - Configuration.class, HRegionInfo.class, RegionServerServices.class); + Configuration.class, HRegionInfo.class, HTableDescriptor.class, + RegionServerServices.class); - return c.newInstance(tableDir, log, fs, conf, regionInfo, rsServices); + return c.newInstance(tableDir, log, fs, conf, regionInfo, htd, rsServices); } catch (Throwable e) { // todo: what should I throw here? throw new IllegalStateException("Could not instantiate a region instance.", e); @@ -2800,9 +2776,8 @@ * @throws IOException */ public static HRegion createHRegion(final HRegionInfo info, final Path rootDir, - final Configuration conf, - final HTableDescriptor hTableDescriptor) - throws IOException { + final Configuration conf, final HTableDescriptor hTableDescriptor) + throws IOException { LOG.info("creating HRegion " + info.getTableNameAsString() + " HTD == " + hTableDescriptor + " RootDir = " + rootDir + " Table name == " + info.getTableNameAsString()); @@ -2816,7 +2791,7 @@ HRegion region = HRegion.newHRegion(tableDir, new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME), new Path(regionDir, HConstants.HREGION_OLDLOGDIR_NAME), conf), - fs, conf, info, null); + fs, conf, info, hTableDescriptor, null); region.initialize(); return region; } @@ -2833,10 +2808,11 @@ * * @throws IOException */ - public static HRegion openHRegion(final HRegionInfo info, final HLog wal, + public static HRegion openHRegion(final HRegionInfo info, + final HTableDescriptor htd, final HLog wal, final Configuration conf) throws IOException { - return openHRegion(info, wal, conf, null, null); + return openHRegion(info, htd, wal, conf, null, null); } /** @@ -2853,8 +2829,9 @@ * * @throws IOException */ - public static HRegion openHRegion(final HRegionInfo info, final HLog wal, - final Configuration conf, final RegionServerServices rsServices, + public static HRegion openHRegion(final HRegionInfo info, + final HTableDescriptor htd, final HLog wal, final Configuration conf, + final RegionServerServices rsServices, final CancelableProgressable reporter) throws IOException { if (LOG.isDebugEnabled()) { @@ -2866,14 +2843,14 @@ Path dir = HTableDescriptor.getTableDir(FSUtils.getRootDir(conf), info.getTableName()); HRegion r = HRegion.newHRegion(dir, wal, FileSystem.get(conf), conf, info, - rsServices); + htd, rsServices); return r.openHRegion(reporter); } public static HRegion openHRegion(Path tableDir, final HRegionInfo info, - final HLog wal, final Configuration conf) - throws IOException { - return openHRegion(tableDir, info, wal, conf, null, null); + final HTableDescriptor htd, final HLog wal, final Configuration conf) + throws IOException { + return openHRegion(tableDir, info, htd, wal, conf, null, null); } /** @@ -2891,21 +2868,19 @@ * @throws IOException */ public static HRegion openHRegion(final Path tableDir, final HRegionInfo info, - final HLog wal, final Configuration conf, - final RegionServerServices rsServices, - final CancelableProgressable reporter) - throws IOException { + final HTableDescriptor htd, final HLog wal, final Configuration conf, + final RegionServerServices rsServices, + final CancelableProgressable reporter) + throws IOException { + if (info == null) throw new NullPointerException("Passed region info is null"); LOG.info("HRegion.openHRegion Region name ==" + info.getRegionNameAsString()); if (LOG.isDebugEnabled()) { LOG.debug("Opening region: " + info); } - if (info == null) { - throw new NullPointerException("Passed region info is null"); - } Path dir = HTableDescriptor.getTableDir(tableDir, info.getTableName()); HRegion r = HRegion.newHRegion(dir, wal, FileSystem.get(conf), conf, info, - rsServices); + htd, rsServices); return r.openHRegion(reporter); } @@ -3077,7 +3052,8 @@ * @return new merged region * @throws IOException */ - public static HRegion merge(HRegion a, HRegion b) throws IOException { + public static HRegion merge(HRegion a, HRegion b) + throws IOException { if (!a.getRegionInfo().getTableNameAsString().equals( b.getRegionInfo().getTableNameAsString())) { throw new IOException("Regions do not belong to the same table"); @@ -3179,7 +3155,8 @@ LOG.debug("Files for new region"); listPaths(fs, newRegionDir); } - HRegion dstRegion = HRegion.newHRegion(tableDir, log, fs, conf, newRegionInfo, null); + HRegion dstRegion = HRegion.newHRegion(tableDir, log, fs, conf, + newRegionInfo, a.getTableDesc(), null); dstRegion.readRequestsCount.set(a.readRequestsCount.get() + b.readRequestsCount.get()); dstRegion.writeRequestsCount.set(a.writeRequestsCount.get() + b.writeRequestsCount.get()); dstRegion.initialize(); @@ -3745,10 +3722,11 @@ String metaStr = Bytes.toString(HConstants.META_TABLE_NAME); // Currently expects tables have one region only. if (p.getName().startsWith(rootStr)) { - region = HRegion.newHRegion(p, log, fs, c, HRegionInfo.ROOT_REGIONINFO, null); + region = HRegion.newHRegion(p, log, fs, c, HRegionInfo.ROOT_REGIONINFO, + HTableDescriptor.ROOT_TABLEDESC, null); } else if (p.getName().startsWith(metaStr)) { - region = HRegion.newHRegion(p, log, fs, c, HRegionInfo.FIRST_META_REGIONINFO, - null); + region = HRegion.newHRegion(p, log, fs, c, + HRegionInfo.FIRST_META_REGIONINFO, HTableDescriptor.META_TABLEDESC, null); } else { throw new IOException("Not a known catalog table: " + p.toString()); } Index: src/main/java/org/apache/hadoop/hbase/master/HMaster.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/master/HMaster.java (revision 1136656) +++ src/main/java/org/apache/hadoop/hbase/master/HMaster.java (working copy) @@ -939,20 +939,19 @@ return hRegionInfos; } - private void storeTableDescriptor(HTableDescriptor hTableDescriptor) - throws IOException { - FSUtils.createTableDescriptor(hTableDescriptor, conf); - } - private synchronized void createTable(final HTableDescriptor hTableDescriptor, final HRegionInfo [] newRegions, final boolean sync) throws IOException { String tableName = newRegions[0].getTableNameAsString(); - if(MetaReader.tableExists(catalogTracker, tableName)) { + if (MetaReader.tableExists(catalogTracker, tableName)) { throw new TableExistsException(tableName); } - storeTableDescriptor(hTableDescriptor); + // TODO: Currently we make the table descriptor and as side-effect the + // tableDir is created. Should we change below method to be createTable + // where we create table in tmp dir with its table descriptor file and then + // do rename to move it into place? + FSUtils.createTableDescriptor(hTableDescriptor, conf); for (HRegionInfo newRegion : newRegions) { // 1. Set table enabling flag up in zk. @@ -1381,22 +1380,6 @@ } /** - * Get a HTD for a given table name - * @param tableName - * @return HTableDescriptor - */ -/* - public HTableDescriptor getHTableDescriptor(byte[] tableName) { - if (tableName != null && tableName.length > 0) { - return this.assignmentManager.getTableDescriptor( - Bytes.toString(tableName)); - } - return null; - } -*/ - - - /** * Compute the average load across all region servers. * Currently, this uses a very naive computation - just uses the number of * regions being served, ignoring stats about number of requests. Index: src/main/java/org/apache/hadoop/hbase/util/Merge.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/Merge.java (revision 1136656) +++ src/main/java/org/apache/hadoop/hbase/util/Merge.java (working copy) @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.ZooKeeperConnectionException; @@ -153,7 +154,7 @@ get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); List cells2 = rootRegion.get(get, null).list(); HRegionInfo info2 = Writables.getHRegionInfo((cells2 == null)? null: cells2.get(0).getValue()); - HRegion merged = merge(info1, rootRegion, info2, rootRegion); + HRegion merged = merge(HTableDescriptor.META_TABLEDESC, info1, rootRegion, info2, rootRegion); LOG.info("Adding " + merged.getRegionInfo() + " to " + rootRegion.getRegionInfo()); HRegion.addRegionToMETA(rootRegion, merged); @@ -216,8 +217,9 @@ Get get = new Get(region1); get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); List cells1 = metaRegion1.get(get, null).list(); - HRegionInfo info1 = Writables.getHRegionInfo((cells1 == null)? null: cells1.get(0).getValue()); - if (info1== null) { + HRegionInfo info1 = + Writables.getHRegionInfo((cells1 == null)? null: cells1.get(0).getValue()); + if (info1 == null) { throw new NullPointerException("info1 is null using key " + Bytes.toStringBinary(region1) + " in " + meta1); } @@ -235,7 +237,9 @@ if (info2 == null) { throw new NullPointerException("info2 is null using key " + meta2); } - HRegion merged = merge(info1, metaRegion1, info2, metaRegion2); + HTableDescriptor htd = FSUtils.getTableDescriptor(FileSystem.get(getConf()), + this.rootdir, this.tableName); + HRegion merged = merge(htd, info1, metaRegion1, info2, metaRegion2); // Now find the meta region which will contain the newly merged region @@ -267,8 +271,8 @@ * to scan the meta if the resulting merged region does not go in either) * Returns HRegion object for newly merged region */ - private HRegion merge(HRegionInfo info1, HRegion meta1, HRegionInfo info2, - HRegion meta2) + private HRegion merge(final HTableDescriptor htd, HRegionInfo info1, + HRegion meta1, HRegionInfo info2, HRegion meta2) throws IOException { if (info1 == null) { throw new IOException("Could not find " + Bytes.toStringBinary(region1) + " in " + @@ -280,9 +284,9 @@ } HRegion merged = null; HLog log = utils.getLog(); - HRegion r1 = HRegion.openHRegion(info1, log, getConf()); + HRegion r1 = HRegion.openHRegion(info1, htd, log, getConf()); try { - HRegion r2 = HRegion.openHRegion(info2, log, getConf()); + HRegion r2 = HRegion.openHRegion(info2, htd, log, getConf()); try { merged = HRegion.merge(r1, r2); } finally { Index: src/main/java/org/apache/hadoop/hbase/util/HMerge.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/HMerge.java (revision 1136656) +++ src/main/java/org/apache/hadoop/hbase/util/HMerge.java (working copy) @@ -55,6 +55,7 @@ * a table by merging adjacent regions. */ class HMerge { + // TODO: Where is this class used? How does it relate to Merge in same package? static final Log LOG = LogFactory.getLog(HMerge.class); static final Random rand = new Random(); @@ -135,12 +136,12 @@ protected final Configuration conf; protected final FileSystem fs; protected final Path tabledir; + protected final HTableDescriptor htd; protected final HLog hlog; private final long maxFilesize; - protected Merger(Configuration conf, FileSystem fs, - final byte [] tableName) + protected Merger(Configuration conf, FileSystem fs, final byte [] tableName) throws IOException { this.conf = conf; this.fs = fs; @@ -151,6 +152,7 @@ fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))), Bytes.toString(tableName) ); + this.htd = FSUtils.getTableDescriptor(this.tabledir, this.fs); Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME); Path oldLogDir = new Path(tabledir, HConstants.HREGION_OLDLOGDIR_NAME); @@ -188,13 +190,13 @@ long nextSize = 0; for (int i = 0; i < info.length - 1; i++) { if (currentRegion == null) { - currentRegion = - HRegion.newHRegion(tabledir, hlog, fs, conf, info[i], null); + currentRegion = HRegion.newHRegion(tabledir, hlog, fs, conf, info[i], + this.htd, null); currentRegion.initialize(); currentSize = currentRegion.getLargestHStoreSize(); } - nextRegion = - HRegion.newHRegion(tabledir, hlog, fs, conf, info[i + 1], null); + nextRegion = HRegion.newHRegion(tabledir, hlog, fs, conf, info[i + 1], + this.htd, null); nextRegion.initialize(); nextSize = nextRegion.getLargestHStoreSize(); @@ -357,7 +359,7 @@ // Scan root region to find all the meta regions root = HRegion.newHRegion(rootTableDir, hlog, fs, conf, - HRegionInfo.ROOT_REGIONINFO, null); + HRegionInfo.ROOT_REGIONINFO, HTableDescriptor.ROOT_TABLEDESC, null); root.initialize(); Scan scan = new Scan(); @@ -431,4 +433,4 @@ } } } -} +} \ No newline at end of file Index: src/main/java/org/apache/hadoop/hbase/util/FSUtils.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (revision 1136656) +++ src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (working copy) @@ -947,30 +947,26 @@ } - public static HTableDescriptor getTableDescriptor(Path tableDir, FileSystem fs) { - try { - LOG.info("Reading table descriptor from .tableinfo. current path = " - + tableDir); - if (tableDir == null) { - LOG.info("Reading table descriptor from .tableinfo current tablename is NULL "); - return null; - } + public static HTableDescriptor getTableDescriptor(Path tableDir, FileSystem fs) + throws IOException { + LOG.info("Reading table descriptor from .tableinfo. current path = " + + tableDir); + if (tableDir == null) { + LOG.info("Reading table descriptor from .tableinfo current tablename is NULL "); + return null; + } - FSDataInputStream fsDataInputStream = - fs.open(new Path(tableDir, HConstants.TABLEINFO_NAME)); - HTableDescriptor hTableDescriptor = new HTableDescriptor(); - hTableDescriptor.readFields(fsDataInputStream); - LOG.info("Current tabledescriptor from .tableinfo is " + hTableDescriptor.toString()); - fsDataInputStream.close(); - return hTableDescriptor; - } catch (IOException ioe) { - LOG.info("Exception during getTableDescriptor ", ioe); - } - return null; + FSDataInputStream fsDataInputStream = + fs.open(new Path(tableDir, HConstants.TABLEINFO_NAME)); + HTableDescriptor hTableDescriptor = new HTableDescriptor(); + hTableDescriptor.readFields(fsDataInputStream); + LOG.info("Current tabledescriptor from .tableinfo is " + hTableDescriptor.toString()); + fsDataInputStream.close(); + return hTableDescriptor; } /** - * Create new HTableDescriptor in HDFS. + * Create new HTableDescriptor in HDFS. Happens when we are creating table. * @param htableDescriptor */ public static void createTableDescriptor(HTableDescriptor htableDescriptor, @@ -985,8 +981,7 @@ } public static void createTableDescriptor(FileSystem fs, - HTableDescriptor htableDescriptor, - Path tableDir) { + HTableDescriptor htableDescriptor, Path tableDir) { try { Path tableInfoPath = new Path(tableDir, HConstants.TABLEINFO_NAME); LOG.info("Current tableInfoPath = " + tableInfoPath @@ -1002,16 +997,22 @@ } } + /** + * Called when we are creating a table. + * @param fs + * @param hTableDescriptor + * @param tableDir + * @throws IOException + */ private static void writeTableDescriptor(FileSystem fs, - HTableDescriptor hTableDescriptor, - Path tableDir) throws IOException { + HTableDescriptor hTableDescriptor, Path tableDir) + throws IOException { // Create in tmpdir and then move into place in case we crash after // create but before close. If we don't successfully close the file, // subsequent region reopens will fail the below because create is // registered in NN. Path tableInfoPath = new Path(tableDir, HConstants.TABLEINFO_NAME); - Path tmpPath = new Path(new Path(tableDir,".tmp"), - HConstants.TABLEINFO_NAME); + Path tmpPath = new Path(new Path(tableDir,".tmp"), HConstants.TABLEINFO_NAME); LOG.info("TableInfoPath = " + tableInfoPath + " tmpPath = " + tmpPath); FSDataOutputStream out = fs.create(tmpPath, true); try { Index: src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java (revision 1136656) +++ src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java (working copy) @@ -20,14 +20,23 @@ package org.apache.hadoop.hbase.util; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HTable; @@ -36,18 +45,8 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; -import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.wal.HLog; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.KeyValue; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; - /** * Contains utility methods for manipulating HBase meta tables. * Be sure to call {@link #shutdown()} when done with this class so it closes @@ -59,7 +58,6 @@ private static final Log LOG = LogFactory.getLog(MetaUtils.class); private final Configuration conf; private FileSystem fs; - private Path rootdir; private HLog log; private HRegion rootRegion; private Map metaRegions = Collections.synchronizedSortedMap( @@ -89,8 +87,6 @@ */ private void initialize() throws IOException { this.fs = FileSystem.get(this.conf); - // Get root directory of HBase installation - this.rootdir = FSUtils.getRootDir(this.conf); } /** @@ -266,14 +262,16 @@ if (this.rootRegion != null) { return this.rootRegion; } - this.rootRegion = HRegion.openHRegion(HRegionInfo.ROOT_REGIONINFO, getLog(), + this.rootRegion = HRegion.openHRegion(HRegionInfo.ROOT_REGIONINFO, + HTableDescriptor.ROOT_TABLEDESC, getLog(), this.conf); this.rootRegion.compactStores(); return this.rootRegion; } private HRegion openMetaRegion(HRegionInfo metaInfo) throws IOException { - HRegion meta = HRegion.openHRegion(metaInfo, getLog(), this.conf); + HRegion meta = HRegion.openHRegion(metaInfo, HTableDescriptor.META_TABLEDESC, + getLog(), this.conf); meta.compactStores(); return meta; }