.META. or -ROOT-
*/
public boolean isMetaTable() {
- return this.tableDesc.isMetaTable();
+ return Bytes.equals(tableName, HRegionInfo.FIRST_META_REGIONINFO.getTableName());
}
/** @return true if this region is a meta region */
public boolean isMetaRegion() {
- return this.tableDesc.isMetaRegion();
+ return isMetaTable();
}
/**
@@ -564,14 +614,14 @@
@Override
public String toString() {
return "REGION => {" + HConstants.NAME + " => '" +
- this.regionNameStr +
- "', STARTKEY => '" +
+ this.regionNameStr
+ + " TableName => " + this.tableName
+ + "', STARTKEY => '" +
Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" +
Bytes.toStringBinary(this.endKey) +
"', ENCODED => " + getEncodedName() + "," +
(isOffline()? " OFFLINE => true,": "") +
- (isSplit()? " SPLIT => true,": "") +
- " TABLE => {" + this.tableDesc.toString() + "}";
+ (isSplit()? " SPLIT => true,": "") + "}";
}
/**
@@ -618,7 +668,7 @@
Bytes.writeByteArray(out, regionName);
out.writeBoolean(split);
Bytes.writeByteArray(out, startKey);
- tableDesc.write(out);
+ Bytes.writeByteArray(out, tableName);
out.writeInt(hashCode);
}
@@ -632,7 +682,7 @@
this.regionNameStr = Bytes.toStringBinary(this.regionName);
this.split = in.readBoolean();
this.startKey = Bytes.readByteArray(in);
- this.tableDesc.readFields(in);
+ this.tableName = Bytes.readByteArray(in);
this.hashCode = in.readInt();
}
@@ -646,7 +696,7 @@
}
// Are regions of same table?
- int result = Bytes.compareTo(this.tableDesc.getName(), o.tableDesc.getName());
+ int result = Bytes.compareTo(this.tableName, o.tableName);
if (result != 0) {
return result;
}
Index: hbase/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
===================================================================
--- hbase/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (revision a13cc6087b9b0d6924c32d100b612eedf01fd3c4)
+++ hbase/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java (revision )
@@ -25,6 +25,7 @@
import java.lang.Thread.UncaughtExceptionHandler;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@@ -63,6 +64,7 @@
import org.apache.hadoop.hbase.master.handler.SplitRegionHandler;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
@@ -77,6 +79,7 @@
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.data.Stat;
+import org.apache.hadoop.hbase.client.Get;
/**
* Manages and performs region assignment.
@@ -139,6 +142,10 @@
private final ExecutorService executorService;
+ private MaptableName
- * @throws IOException e
- */
- public void addColumn(final byte [] tableName,
- final HColumnDescriptor hcd)
- throws IOException {
- List.META. catalog
* table on a period looking for unused regions to garbage collect.
@@ -253,8 +256,10 @@
if (split == null) return result;
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
Path rootdir = this.services.getMasterFileSystem().getRootDir();
- Path tabledir = new Path(rootdir, split.getTableDesc().getNameAsString());
- for (HColumnDescriptor family: split.getTableDesc().getFamilies()) {
+ Path tabledir = new Path(rootdir, split.getTableNameAsString());
+ HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableName());
+
+ for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
Path p = Store.getStoreHomedir(tabledir, split.getEncodedName(),
family.getName());
// Look for reference files. Call listStatus with anonymous instance of PathFilter.
@@ -276,4 +281,10 @@
}
return result;
}
+
+ private HTableDescriptor getTableDescriptor(byte[] tableName) {
+ return this.services.getAssignmentManager().getTableDescriptor(
+ Bytes.toString(tableName));
-}
+ }
+
+}
Index: hbase/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java
===================================================================
--- hbase/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java (revision a13cc6087b9b0d6924c32d100b612eedf01fd3c4)
+++ hbase/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java (revision )
@@ -68,10 +68,10 @@
HColumnDescriptor.DEFAULT_BLOOMFILTER);
HTableDescriptor htd = new HTableDescriptor(table);
htd.addFamily(hcd);
- HRegionInfo info = new HRegionInfo(htd, null, null, false);
+ HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false);
HRegion region =
HRegion.createHRegion(info, HBaseTestingUtility.getTestDir(), TEST_UTIL
- .getConfiguration());
+ .getConfiguration(), htd);
Listb
- * @param b
- */
- private static void setInfoFamilyCaching(final HRegionInfo hri, final boolean b) {
- for (HColumnDescriptor hcd: hri.getTableDesc().families.values()) {
+ private static void setInfoFamilyCachingForRoot(final boolean b) {
+ for (HColumnDescriptor hcd:
+ HTableDescriptor.ROOT_TABLEDESC.families.values()) {
- if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
- hcd.setBlockCacheEnabled(b);
- hcd.setInMemory(b);
- }
+ if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
+ hcd.setBlockCacheEnabled(b);
+ hcd.setInMemory(b);
+ }
}
}
+ private static void setInfoFamilyCachingForMeta(final boolean b) {
+ for (HColumnDescriptor hcd:
+ HTableDescriptor.META_TABLEDESC.families.values()) {
+ if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
+ hcd.setBlockCacheEnabled(b);
+ hcd.setInMemory(b);
+ }
+ }
+ }
+
+
public void deleteRegion(HRegionInfo region) throws IOException {
fs.delete(HRegion.getRegionDir(rootdir, region), true);
}
@@ -363,16 +377,137 @@
// @see HRegion.checkRegioninfoOnFilesystem()
}
- public void deleteFamily(HRegionInfo region, byte[] familyName)
- throws IOException {
- fs.delete(Store.getStoreHomedir(
- new Path(rootdir, region.getTableDesc().getNameAsString()),
- region.getEncodedName(), familyName), true);
- }
-
public void stop() {
if (splitLogManager != null) {
this.splitLogManager.stop();
}
}
+
+ /**
+ * Get table info path for a table.
+ * @param tableName
+ * @return Table info path
+ */
+ private Path getTableInfoPath(byte[] tableName) {
+ Path tablePath = new Path(this.rootdir, Bytes.toString(tableName));
+ Path tableInfoPath = new Path(tablePath, HConstants.TABLEINFO_NAME);
+ return tableInfoPath;
-}
+ }
+
+ /**
+ * Get table info path for a table.
+ * @param tableName
+ * @return Table info path
+ */
+ private Path getTablePath(byte[] tableName) {
+ return new Path(this.rootdir, Bytes.toString(tableName));
+ }
+ /**
+ * Get a HTableDescriptor of a table.
+ * @param tableName
+ * @return HTableDescriptor
+ */
+ public HTableDescriptor getTableDescriptor(byte[] tableName) {
+ try {
+
+ FSDataInputStream fsDataInputStream = fs.open(getTableInfoPath(tableName));
+ HTableDescriptor hTableDescriptor = new HTableDescriptor();
+ hTableDescriptor.readFields(fsDataInputStream);
+ fsDataInputStream.close();
+ //fs.close();
+ return hTableDescriptor;
+ } catch (IOException ioe) {
+ try {
+ //fs.close();
+ } catch (Exception e) {
+ LOG.error("file system close failed: ", e);
+ }
+ LOG.info("Exception during readTableDecriptor ", ioe);
+ }
+ return null;
+ }
+
+ /**
+ * Create new HTableDescriptor in HDFS.
+ * @param htableDescriptor
+ */
+ public void createTableDescriptor(HTableDescriptor htableDescriptor) {
+ FSUtils.createTableDescriptor(htableDescriptor, conf);
+ }
+
+ /**
+ * Update a table descriptor.
+ * @param htableDescriptor
+ * @return updated HTableDescriptor
+ * @throws IOException
+ */
+ public HTableDescriptor updateTableDescriptor(HTableDescriptor htableDescriptor)
+ throws IOException {
+ LOG.info("Update Table Descriptor. Current HTD = " + htableDescriptor);
+ FSUtils.updateHTableDescriptor(fs, conf, htableDescriptor);
+ return htableDescriptor;
+ }
+
+ /**
+ * Delete column of a table
+ * @param tableName
+ * @param familyName
+ * @return Modified HTableDescriptor with requested column deleted.
+ * @throws IOException
+ */
+ public HTableDescriptor deleteColumn(byte[] tableName, byte[] familyName)
+ throws IOException {
+ LOG.info("DeleteColumn. Table = " + Bytes.toString(tableName)
+ + " family = " + Bytes.toString(familyName));
+ HTableDescriptor htd = getTableDescriptor(tableName);
+ htd.removeFamily(familyName);
+ updateTableDescriptor(htd);
+ return htd;
+ }
+
+ /**
+ * Modify Column of a table
+ * @param tableName
+ * @param hcd HColumnDesciptor
+ * @return Modified HTableDescriptor with the column modified.
+ * @throws IOException
+ */
+ public HTableDescriptor modifyColumn(byte[] tableName, HColumnDescriptor hcd)
+ throws IOException {
+ LOG.info("AddModifyColumn. Table = " + Bytes.toString(tableName)
+ + " HCD = " + hcd.toString());
+
+ HTableDescriptor htd = getTableDescriptor(tableName);
+ byte [] familyName = hcd.getName();
+ if(!htd.hasFamily(familyName)) {
+ throw new InvalidFamilyOperationException("Family '" +
+ Bytes.toString(familyName) + "' doesn't exists so cannot be modified");
+ }
+ htd.addFamily(hcd);
+ updateTableDescriptor(htd);
+ return htd;
+ }
+
+ /**
+ * Add column to a table
+ * @param tableName
+ * @param hcd
+ * @return Modified HTableDescriptor with new column added.
+ * @throws IOException
+ */
+ public HTableDescriptor addColumn(byte[] tableName, HColumnDescriptor hcd)
+ throws IOException {
+ LOG.info("AddColumn. Table = " + Bytes.toString(tableName)
+ + " HCD = " + hcd.toString());
+
+ HTableDescriptor htd = getTableDescriptor(tableName);
+ if(htd == null) {
+ throw new InvalidFamilyOperationException("Family '" +
+ hcd.getNameAsString() + "' cannot be modified as HTD is null");
+ }
+ htd.addFamily(hcd);
+ updateTableDescriptor(htd);
+ return htd;
+ }
+
+}
Index: hbase/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
===================================================================
--- hbase/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java (revision a13cc6087b9b0d6924c32d100b612eedf01fd3c4)
+++ hbase/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java (revision )
@@ -153,8 +153,8 @@
int daughtersRowCount = 0;
for (HRegion r: daughters) {
// Open so can count its content.
- HRegion openRegion = HRegion.openHRegion(r.getRegionInfo(),
+ HRegion openRegion = HRegion.openHRegion(this.testdir, r.getRegionInfo(),
- r.getLog(), r.getConf());
+ r.getLog(), r.getConf());
try {
int count = countRows(openRegion);
assertTrue(count > 0 && count != rowcount);
@@ -208,8 +208,8 @@
int daughtersRowCount = 0;
for (HRegion r: daughters) {
// Open so can count its content.
- HRegion openRegion = HRegion.openHRegion(r.getRegionInfo(),
+ HRegion openRegion = HRegion.openHRegion(this.testdir, r.getRegionInfo(),
- r.getLog(), r.getConf());
+ r.getLog(), r.getConf());
try {
int count = countRows(openRegion);
assertTrue(count > 0 && count != rowcount);
@@ -252,7 +252,8 @@
HTableDescriptor htd = new HTableDescriptor("table");
HColumnDescriptor hcd = new HColumnDescriptor(CF);
htd.addFamily(hcd);
- HRegionInfo hri = new HRegionInfo(htd, STARTROW, ENDROW);
- return HRegion.openHRegion(hri, wal, TEST_UTIL.getConfiguration());
+ HRegionInfo hri = new HRegionInfo(htd.getName(), STARTROW, ENDROW);
+ HRegion.createHRegion(hri, testdir, TEST_UTIL.getConfiguration(), htd);
+ return HRegion.openHRegion(testdir, hri, wal, TEST_UTIL.getConfiguration());
}
}
\ No newline at end of file
Index: hbase/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java
===================================================================
--- hbase/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java (revision a13cc6087b9b0d6924c32d100b612eedf01fd3c4)
+++ hbase/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java (revision )
@@ -82,8 +82,8 @@
public void testWithRegions() throws IOException {
HTableDescriptor htd = new HTableDescriptor("mytable");
List-ROOT- and .META. are assigned. If not,
* assign them.
@@ -851,46 +892,62 @@
createTable(desc, splitKeys, false);
}
- public void createTable(HTableDescriptor desc, byte [][] splitKeys,
+ public void createTable(HTableDescriptor hTableDescriptor,
+ byte [][] splitKeys,
- boolean sync)
+ boolean sync)
throws IOException {
if (!isMasterRunning()) {
throw new MasterNotRunningException();
}
if (cpHost != null) {
- cpHost.preCreateTable(desc, splitKeys);
+ cpHost.preCreateTable(hTableDescriptor, splitKeys);
}
- HRegionInfo [] newRegions = null;
+ HRegionInfo [] newRegions = getHRegionInfos(hTableDescriptor, splitKeys);
+ storeTableDescriptor(hTableDescriptor);
+ int timeout = conf.getInt("hbase.client.catalog.timeout", 10000);
+ // Need META availability to create a table
+ try {
+ if(catalogTracker.waitForMeta(timeout) == null) {
+ throw new NotAllMetaRegionsOnlineException();
+ }
+ } catch (InterruptedException e) {
+ LOG.warn("Interrupted waiting for meta availability", e);
+ throw new IOException(e);
+ }
+ createTable(hTableDescriptor ,newRegions, sync);
+ }
+
+ private HRegionInfo[] getHRegionInfos(HTableDescriptor hTableDescriptor,
+ byte[][] splitKeys) {
+ HRegionInfo[] hRegionInfos = null;
- if(splitKeys == null || splitKeys.length == 0) {
+ if (splitKeys == null || splitKeys.length == 0) {
- newRegions = new HRegionInfo [] { new HRegionInfo(desc, null, null) };
+ hRegionInfos = new HRegionInfo[]{
+ new HRegionInfo(hTableDescriptor.getName(), null, null)};
- } else {
- int numRegions = splitKeys.length + 1;
+ } else {
+ int numRegions = splitKeys.length + 1;
- newRegions = new HRegionInfo[numRegions];
+ hRegionInfos = new HRegionInfo[numRegions];
- byte [] startKey = null;
+ byte[] startKey = null;
- byte [] endKey = null;
+ byte[] endKey = null;
- for(int i=0;i.META..
@@ -219,6 +223,82 @@
LOG.info("Updated region " + regionInfo.getRegionNameAsString() + " in META");
}
+ public static void updateRootWithMetaMigrationStatus(CatalogTracker catalogTracker) throws IOException {
+ updateRootWithMetaMigrationStatus(catalogTracker, true);
+ }
+
+ public static void updateRootWithMetaMigrationStatus(CatalogTracker catalogTracker,
+ boolean metaUpdated)
+ throws IOException {
+ Put put = new Put(HRegionInfo.ROOT_REGIONINFO.getRegionName());
+ addMetaUpdateStatus(put, metaUpdated);
+ catalogTracker.waitForRootServerConnectionDefault().put(
+ CatalogTracker.ROOT_REGION, put);
+ LOG.info("Updated -ROOT- row with metaMigrated status = " + metaUpdated);
+ }
+
+ public static Listw gotten by running its
* {@link Writable#write(java.io.DataOutput)} method.
* @throws IOException e
@@ -215,4 +216,16 @@
}
return tgt;
}
+
+ /**
+ * Get HREgionInfoForMigration serialized from bytes.
+ * @param bytes serialized bytes
+ * @return HRegionInfoForMigration
+ * @throws IOException
+ */
+ public static HRegionInfo090x getHRegionInfoForMigration(final byte [] bytes)
+ throws IOException {
+ return (HRegionInfo090x)getWritable(bytes, new HRegionInfo090x());
-}
\ No newline at end of file
+ }
+
+}
\ No newline at end of file
Index: hbase/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
===================================================================
--- hbase/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java (revision a13cc6087b9b0d6924c32d100b612eedf01fd3c4)
+++ hbase/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java (revision )
@@ -22,10 +22,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.RemoteExceptionHandler;
-import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@@ -166,6 +163,12 @@
}
@Override
+ public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey,
+ WALEdit logEdit) {
+ //Not interested
+ }
+
+ @Override
public void logCloseRequested() {
// not interested
}
Index: hbase/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java
===================================================================
--- hbase/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java (revision a13cc6087b9b0d6924c32d100b612eedf01fd3c4)
+++ hbase/src/test/java/org/apache/hadoop/hbase/client/TestScannerTimeout.java (revision )
@@ -27,6 +27,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
@@ -89,6 +90,7 @@
*/
@Test
public void test2481() throws Exception {
+ LOG.info("START ************ test2481");
Scan scan = new Scan();
HTable table =
new HTable(new Configuration(TEST_UTIL.getConfiguration()), TABLE_NAME);
@@ -109,6 +111,7 @@
return;
}
fail("We should be timing out");
+ LOG.info("END ************ test2481");
}
/**
@@ -118,6 +121,7 @@
*/
@Test
public void test2772() throws Exception {
+ LOG.info("START************ test2772");
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);
Scan scan = new Scan();
// Set a very high timeout, we want to test what happens when a RS
@@ -134,6 +138,8 @@
Result[] results = r.next(NB_ROWS);
assertEquals(NB_ROWS, results.length);
r.close();
+ LOG.info("END ************ test2772");
+
}
/**
@@ -143,14 +149,24 @@
*/
@Test
public void test3686a() throws Exception {
+ LOG.info("START ************ TEST3686A---1");
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);
+ LOG.info("START ************ TEST3686A---1111");
+
Scan scan = new Scan();
scan.setCaching(SCANNER_CACHING);
-
+ LOG.info("************ TEST3686A");
+ MetaReader.fullScanMetaAndPrint(TEST_UTIL.getHBaseCluster().getMaster().getCatalogTracker());
HTable table = new HTable(TABLE_NAME);
+ LOG.info("START ************ TEST3686A---22");
+
ResultScanner r = table.getScanner(scan);
+ LOG.info("START ************ TEST3686A---33");
+
int count = 1;
r.next();
+ LOG.info("START ************ TEST3686A---44");
+
// Kill after one call to next(), which got 5 rows.
rs.abort("die!");
while(r.next() != null) {
@@ -158,6 +174,7 @@
}
assertEquals(NB_ROWS, count);
r.close();
+ LOG.info("************ END TEST3686A");
}
/**
@@ -168,6 +185,7 @@
*/
@Test
public void test3686b() throws Exception {
+ LOG.info("START ************ test3686b");
HRegionServer rs = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);
Scan scan = new Scan();
scan.setCaching(SCANNER_CACHING);
@@ -189,5 +207,7 @@
}
assertEquals(NB_ROWS, count);
r.close();
+ LOG.info("END ************ END test3686b");
+
}
}
\ No newline at end of file
Index: hbase/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java
===================================================================
--- hbase/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java (revision a13cc6087b9b0d6924c32d100b612eedf01fd3c4)
+++ hbase/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java (revision )
@@ -57,9 +57,15 @@
/** configuration parameter name for test directory */
public static final String TEST_DIRECTORY_KEY = "test.build.data";
+/*
protected final static byte [] fam1 = Bytes.toBytes("colfamily1");
protected final static byte [] fam2 = Bytes.toBytes("colfamily2");
protected final static byte [] fam3 = Bytes.toBytes("colfamily3");
+*/
+ protected final static byte [] fam1 = Bytes.toBytes("colfamily11");
+ protected final static byte [] fam2 = Bytes.toBytes("colfamily21");
+ protected final static byte [] fam3 = Bytes.toBytes("colfamily31");
+
protected static final byte [][] COLUMNS = {fam1, fam2, fam3};
private boolean localfs = false;
@@ -159,9 +165,8 @@
Path rootdir = filesystem.makeQualified(
new Path(conf.get(HConstants.HBASE_DIR)));
filesystem.mkdirs(rootdir);
-
- return HRegion.createHRegion(new HRegionInfo(desc, startKey, endKey),
- rootdir, conf);
+ HRegionInfo hri = new HRegionInfo(desc.getName(), startKey, endKey);
+ return HRegion.createHRegion(hri, rootdir, conf, desc);
}
protected HRegion openClosedRegion(final HRegion closedRegion)
@@ -653,9 +658,10 @@
}
protected void createRootAndMetaRegions() throws IOException {
- root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, testDir, conf);
+ root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, testDir,
+ conf, HTableDescriptor.ROOT_TABLEDESC);
meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir,
- conf);
+ conf, HTableDescriptor.META_TABLEDESC);
HRegion.addRegionToMETA(root, meta);
}
Index: hbase/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
===================================================================
--- hbase/src/main/java/org/apache/hadoop/hbase/client/HConnection.java (revision a13cc6087b9b0d6924c32d100b612eedf01fd3c4)
+++ hbase/src/main/java/org/apache/hadoop/hbase/client/HConnection.java (revision )
@@ -373,4 +373,13 @@
* @deprecated This method will be changed from public to package protected.
*/
public int getCurrentNrHRS() throws IOException;
+
+ /**
+ * @param tableNames List of table names
+ * @return HTD[] table metadata
+ * @throws IOException if a remote or network exception occurs
+ */
+ public HTableDescriptor[] getHTableDescriptors(List
+ * **NOTE**
+ *
+ * ROOT, the first META region, and regions created by an older
+ * version of HBase (0.20 or prior) will continue to use the
+ * old region name format.
+ */
+
+ /** Separator used to demarcate the encodedName in a region name
+ * in the new format. See description on new format above.
+ */
+ private static final int ENC_SEPARATOR = '.';
+ public static final int MD5_HEX_LENGTH = 32;
+
+ /**
+ * Does region name contain its encoded name?
+ * @param regionName region name
+ * @return boolean indicating if this a new format region
+ * name which contains its encoded name.
+ */
+ private static boolean hasEncodedName(final byte[] regionName) {
+ // check if region name ends in ENC_SEPARATOR
+ if ((regionName.length >= 1)
+ && (regionName[regionName.length - 1] == ENC_SEPARATOR)) {
+ // region name is new format. it contains the encoded name.
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * @param regionName
+ * @return the encodedName
+ */
+ public static String encodeRegionName(final byte [] regionName) {
+ String encodedName;
+ if (hasEncodedName(regionName)) {
+ // region is in new format:
+ // -ROOT- if passed 70236052 or
+ * .META. if passed 1028785192 else returns
+ * encodedRegionName
+ */
+ public static String prettyPrint(final String encodedRegionName) {
+ if (encodedRegionName.equals("70236052")) {
+ return encodedRegionName + "/-ROOT-";
+ } else if (encodedRegionName.equals("1028785192")) {
+ return encodedRegionName + "/.META.";
+ }
+ return encodedRegionName;
+ }
+
+ /** delimiter used between portions of a region name */
+ public static final int DELIMITER = ',';
+
+ /** HRegionInfo for root region */
+ public static final HRegionInfo090x ROOT_REGIONINFO =
+ new HRegionInfo090x(0L, HTableDescriptor.ROOT_TABLEDESC);
+
+ /** HRegionInfo for first meta region */
+ public static final HRegionInfo090x FIRST_META_REGIONINFO =
+ new HRegionInfo090x(1L, HTableDescriptor.META_TABLEDESC);
+
+ private byte [] endKey = HConstants.EMPTY_BYTE_ARRAY;
+ // This flag is in the parent of a split while the parent is still referenced
+ // by daughter regions. We USED to set this flag when we disabled a table
+ // but now table state is kept up in zookeeper as of 0.90.0 HBase.
+ private boolean offLine = false;
+ private long regionId = -1;
+ private transient byte [] regionName = HConstants.EMPTY_BYTE_ARRAY;
+ private String regionNameStr = "";
+ private boolean split = false;
+ private byte [] startKey = HConstants.EMPTY_BYTE_ARRAY;
+ protected HTableDescriptor tableDesc = null;
+ private int hashCode = -1;
+ //TODO: Move NO_HASH to HStoreFile which is really the only place it is used.
+ public static final String NO_HASH = null;
+ private volatile String encodedName = NO_HASH;
+ private byte [] encodedNameAsBytes = null;
+
+ private void setHashCode() {
+ int result = Arrays.hashCode(this.regionName);
+ result ^= this.regionId;
+ result ^= Arrays.hashCode(this.startKey);
+ result ^= Arrays.hashCode(this.endKey);
+ result ^= Boolean.valueOf(this.offLine).hashCode();
+ result ^= this.tableDesc.hashCode();
+ this.hashCode = result;
+ }
+
+ /**
+ * Private constructor used constructing HRegionInfo for the catalog root and
+ * first meta regions
+ */
+ private HRegionInfo090x(long regionId, HTableDescriptor tableDesc) {
+ super();
+ this.regionId = regionId;
+ this.tableDesc = tableDesc;
+
+ // Note: Root & First Meta regions names are still in old format
+ this.regionName = createRegionName(tableDesc.getName(), null,
+ regionId, false);
+ this.regionNameStr = Bytes.toStringBinary(this.regionName);
+ setHashCode();
+ }
+
+ /** Default constructor - creates empty object */
+ public HRegionInfo090x() {
+ super();
+ this.tableDesc = new HTableDescriptor();
+ }
+
+ /**
+ * Construct HRegionInfo with explicit parameters
+ *
+ * @param tableDesc the table descriptor
+ * @param startKey first key in region
+ * @param endKey end of key range
+ * @throws IllegalArgumentException
+ */
+ public HRegionInfo090x(final HTableDescriptor tableDesc, final byte[] startKey,
+ final byte[] endKey)
+ throws IllegalArgumentException {
+ this(tableDesc, startKey, endKey, false);
+ }
+
+ /**
+ * Construct HRegionInfo with explicit parameters
+ *
+ * @param tableDesc the table descriptor
+ * @param startKey first key in region
+ * @param endKey end of key range
+ * @param split true if this region has split and we have daughter regions
+ * regions that may or may not hold references to this region.
+ * @throws IllegalArgumentException
+ */
+ public HRegionInfo090x(HTableDescriptor tableDesc, final byte[] startKey,
+ final byte[] endKey, final boolean split)
+ throws IllegalArgumentException {
+ this(tableDesc, startKey, endKey, split, System.currentTimeMillis());
+ }
+
+ /**
+ * Construct HRegionInfo with explicit parameters
+ *
+ * @param tableDesc the table descriptor
+ * @param startKey first key in region
+ * @param endKey end of key range
+ * @param split true if this region has split and we have daughter regions
+ * regions that may or may not hold references to this region.
+ * @param regionid Region id to use.
+ * @throws IllegalArgumentException
+ */
+ public HRegionInfo090x(HTableDescriptor tableDesc, final byte[] startKey,
+ final byte[] endKey, final boolean split, final long regionid)
+ throws IllegalArgumentException {
+ super();
+ if (tableDesc == null) {
+ throw new IllegalArgumentException("tableDesc cannot be null");
+ }
+ this.offLine = false;
+ this.regionId = regionid;
+ this.regionName = createRegionName(tableDesc.getName(), startKey, regionId, true);
+ this.regionNameStr = Bytes.toStringBinary(this.regionName);
+ this.split = split;
+ this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone();
+ this.startKey = startKey == null?
+ HConstants.EMPTY_START_ROW: startKey.clone();
+ this.tableDesc = tableDesc;
+ setHashCode();
+ }
+
+ /**
+ * Costruct a copy of another HRegionInfo
+ *
+ * @param other
+ */
+ public HRegionInfo090x(HRegionInfo090x other) {
+ super();
+ this.endKey = other.getEndKey();
+ this.offLine = other.isOffline();
+ this.regionId = other.getRegionId();
+ this.regionName = other.getRegionName();
+ this.regionNameStr = Bytes.toStringBinary(this.regionName);
+ this.split = other.isSplit();
+ this.startKey = other.getStartKey();
+ this.tableDesc = other.getTableDesc();
+ this.hashCode = other.hashCode();
+ this.encodedName = other.getEncodedName();
+ }
+
+ /**
+ * Make a region name of passed parameters.
+ * @param tableName
+ * @param startKey Can be null
+ * @param regionid Region id (Usually timestamp from when region was created).
+ * @param newFormat should we create the region name in the new format
+ * (such that it contains its encoded name?).
+ * @return Region name made of passed tableName, startKey and id
+ */
+ public static byte [] createRegionName(final byte [] tableName,
+ final byte [] startKey, final long regionid, boolean newFormat) {
+ return createRegionName(tableName, startKey, Long.toString(regionid), newFormat);
+ }
+
+ /**
+ * Make a region name of passed parameters.
+ * @param tableName
+ * @param startKey Can be null
+ * @param id Region id (Usually timestamp from when region was created).
+ * @param newFormat should we create the region name in the new format
+ * (such that it contains its encoded name?).
+ * @return Region name made of passed tableName, startKey and id
+ */
+ public static byte [] createRegionName(final byte [] tableName,
+ final byte [] startKey, final String id, boolean newFormat) {
+ return createRegionName(tableName, startKey, Bytes.toBytes(id), newFormat);
+ }
+
+ /**
+ * Make a region name of passed parameters.
+ * @param tableName
+ * @param startKey Can be null
+ * @param id Region id (Usually timestamp from when region was created).
+ * @param newFormat should we create the region name in the new format
+ * (such that it contains its encoded name?).
+ * @return Region name made of passed tableName, startKey and id
+ */
+ public static byte [] createRegionName(final byte [] tableName,
+ final byte [] startKey, final byte [] id, boolean newFormat) {
+ byte [] b = new byte [tableName.length + 2 + id.length +
+ (startKey == null? 0: startKey.length) +
+ (newFormat ? (MD5_HEX_LENGTH + 2) : 0)];
+
+ int offset = tableName.length;
+ System.arraycopy(tableName, 0, b, 0, offset);
+ b[offset++] = DELIMITER;
+ if (startKey != null && startKey.length > 0) {
+ System.arraycopy(startKey, 0, b, offset, startKey.length);
+ offset += startKey.length;
+ }
+ b[offset++] = DELIMITER;
+ System.arraycopy(id, 0, b, offset, id.length);
+ offset += id.length;
+
+ if (newFormat) {
+ //
+ // Encoded name should be built into the region name.
+ //
+ // Use the region name thus far (namely, .META. or -ROOT-
+ */
+ public boolean isMetaTable() {
+ return this.tableDesc.isMetaTable();
+ }
+
+ /** @return true if this region is a meta region */
+ public boolean isMetaRegion() {
+ return this.tableDesc.isMetaRegion();
+ }
+
+ /**
+ * @return True if has been split and has daughters.
+ */
+ public boolean isSplit() {
+ return this.split;
+ }
+
+ /**
+ * @param split set split status
+ */
+ public void setSplit(boolean split) {
+ this.split = split;
+ }
+
+ /**
+ * @return True if this region is offline.
+ */
+ public boolean isOffline() {
+ return this.offLine;
+ }
+
+ /**
+ * The parent of a region split is offline while split daughters hold
+ * references to the parent. Offlined regions are closed.
+ * @param offLine Set online/offline status.
+ */
+ public void setOffline(boolean offLine) {
+ this.offLine = offLine;
+ }
+
+
+ /**
+ * @return True if this is a split parent region.
+ */
+ public boolean isSplitParent() {
+ if (!isSplit()) return false;
+ if (!isOffline()) {
+ LOG.warn("Region is split but NOT offline: " + getRegionNameAsString());
+ }
+ return true;
+ }
+
+ /**
+ * @see java.lang.Object#toString()
+ */
+ @Override
+ public String toString() {
+ return "REGION => {" + HConstants.NAME + " => '" +
+ this.regionNameStr +
+ "', STARTKEY => '" +
+ Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" +
+ Bytes.toStringBinary(this.endKey) +
+ "', ENCODED => " + getEncodedName() + "," +
+ (isOffline()? " OFFLINE => true,": "") +
+ (isSplit()? " SPLIT => true,": "") +
+ " TABLE => {" + this.tableDesc.toString() + "}";
+ }
+
+ /**
+ * @see java.lang.Object#equals(java.lang.Object)
+ */
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null) {
+ return false;
+ }
+ if (!(o instanceof HRegionInfo090x)) {
+ return false;
+ }
+ return this.compareTo((HRegionInfo090x)o) == 0;
+ }
+
+ /**
+ * @see java.lang.Object#hashCode()
+ */
+ @Override
+ public int hashCode() {
+ return this.hashCode;
+ }
+
+ /** @return the object version number */
+ @Override
+ public byte getVersion() {
+ return VERSION;
+ }
+
+ //
+ // Writable
+ //
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ super.write(out);
+ Bytes.writeByteArray(out, endKey);
+ out.writeBoolean(offLine);
+ out.writeLong(regionId);
+ Bytes.writeByteArray(out, regionName);
+ out.writeBoolean(split);
+ Bytes.writeByteArray(out, startKey);
+ tableDesc.write(out);
+ out.writeInt(hashCode);
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+ this.endKey = Bytes.readByteArray(in);
+ this.offLine = in.readBoolean();
+ this.regionId = in.readLong();
+ this.regionName = Bytes.readByteArray(in);
+ this.regionNameStr = Bytes.toStringBinary(this.regionName);
+ this.split = in.readBoolean();
+ this.startKey = Bytes.readByteArray(in);
+ try {
+ this.tableDesc.readFields(in);
+ } catch(EOFException eofe) {
+ throw new IOException("HTD not found in input buffer");
+ }
+ this.hashCode = in.readInt();
+ }
+
+ //
+ // Comparable
+ //
+
+ public int compareTo(HRegionInfo090x o) {
+ if (o == null) {
+ return 1;
+ }
+
+ // Are regions of same table?
+ int result = Bytes.compareTo(this.tableDesc.getName(), o.tableDesc.getName());
+ if (result != 0) {
+ return result;
+ }
+
+ // Compare start keys.
+ result = Bytes.compareTo(this.startKey, o.startKey);
+ if (result != 0) {
+ return result;
+ }
+
+ // Compare end keys.
+ return Bytes.compareTo(this.endKey, o.endKey);
+ }
+
+ /**
+ * @return Comparator to use comparing {@link org.apache.hadoop.hbase.KeyValue}s.
+ */
+ public KVComparator getComparator() {
+ return isRootRegion()? KeyValue.ROOT_COMPARATOR: isMetaRegion()?
+ KeyValue.META_COMPARATOR: KeyValue.COMPARATOR;
+ }
+}