diff --git src/main/java/org/apache/hadoop/hbase/KeyValue.java src/main/java/org/apache/hadoop/hbase/KeyValue.java
index 585c4a8..fc48f3a 100644
--- src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -967,6 +967,7 @@ public class KeyValue implements Writable, HeapSize {
* @return True if this KV is a {@link KeyValue.Type#Delete} type.
*/
public boolean isDeleteType() {
+ // TODO: Fix this method name vis-a-vis isDelete!
return getType() == Type.Delete.getCode();
}
diff --git src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
index ac0bc38..e3bae50 100644
--- src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
+++ src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
@@ -1,6 +1,4 @@
/**
- * Copyright 2010 The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -33,7 +31,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
-import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
@@ -49,7 +46,7 @@ import org.apache.hadoop.ipc.RemoteException;
/**
* Tracks the availability of the catalog tables -ROOT- and
* .META..
- *
+ *
* This class is "read-only" in that the locations of the catalog tables cannot
* be explicitly set. Instead, ZooKeeper is used to learn of the availability
* and location of -ROOT-. -ROOT- is used to learn of
@@ -61,7 +58,6 @@ import org.apache.hadoop.ipc.RemoteException;
*/
public class CatalogTracker {
private static final Log LOG = LogFactory.getLog(CatalogTracker.class);
- private final Configuration conf;
private final HConnection connection;
private final ZooKeeperWatcher zookeeper;
private final RootRegionTracker rootRegionTracker;
@@ -76,9 +72,9 @@ public class CatalogTracker {
private final int defaultTimeout;
private boolean stopped = false;
- public static final byte [] ROOT_REGION =
+ static final byte [] ROOT_REGION_NAME =
HRegionInfo.ROOT_REGIONINFO.getRegionName();
- public static final byte [] META_REGION =
+ static final byte [] META_REGION_NAME =
HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
/**
@@ -130,7 +126,6 @@ public class CatalogTracker {
CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf,
HConnection connection, Abortable abortable, final int defaultTimeout)
throws IOException {
- this.conf = conf;
this.connection = connection;
this.zookeeper = (zk == null) ? this.connection.getZooKeeperWatcher() : zk;
if (abortable == null) {
@@ -237,7 +232,7 @@ public class CatalogTracker {
* @throws NotAllMetaRegionsOnlineException if timed out waiting
* @throws IOException
*/
- public HRegionInterface waitForRootServerConnection(long timeout)
+ HRegionInterface waitForRootServerConnection(long timeout)
throws InterruptedException, NotAllMetaRegionsOnlineException, IOException {
return getCachedConnection(waitForRoot(timeout));
}
@@ -260,22 +255,6 @@ public class CatalogTracker {
}
/**
- * Gets a connection to the server hosting root, as reported by ZooKeeper,
- * if available. Returns null if no location is immediately available.
- * @return connection to server hosting root, null if not available
- * @throws IOException
- * @throws InterruptedException
- */
- private HRegionInterface getRootServerConnection()
- throws IOException, InterruptedException {
- ServerName sn = this.rootRegionTracker.getRootRegionLocation();
- if (sn == null) {
- return null;
- }
- return getCachedConnection(sn);
- }
-
- /**
* Gets a connection to the server currently hosting .META. or
* null if location is not currently available.
*
@@ -299,22 +278,20 @@ public class CatalogTracker {
if (!refresh) {
return current;
}
- if (verifyRegionLocation(current, this.metaLocation, META_REGION)) {
+ if (verifyRegionLocation(current, this.metaLocation, META_REGION_NAME)) {
return current;
}
resetMetaLocation();
}
- HRegionInterface rootConnection = getRootServerConnection();
- if (rootConnection == null) {
- return null;
- }
- ServerName newLocation = MetaReader.readMetaLocation(rootConnection);
+ // MetaReader.readRegionLocation will create an HTable instance and retry
+ // getting of cell.
+ ServerName newLocation = MetaReader.readRegionLocation(this, META_REGION_NAME);
if (newLocation == null) {
return null;
}
HRegionInterface newConnection = getCachedConnection(newLocation);
- if (verifyRegionLocation(newConnection, this.metaLocation, META_REGION)) {
+ if (verifyRegionLocation(newConnection, this.metaLocation, META_REGION_NAME)) {
setMetaLocation(newLocation);
return newConnection;
}
@@ -352,14 +329,16 @@ public class CatalogTracker {
throws InterruptedException, IOException, NotAllMetaRegionsOnlineException {
long stop = System.currentTimeMillis() + timeout;
synchronized (metaAvailable) {
- while(!stopped && !metaAvailable.get() &&
+ while (!stopped && !metaAvailable.get() &&
(timeout == 0 || System.currentTimeMillis() < stop)) {
- if (getMetaServerConnection(true) != null) {
+ // TODO: timeout is not used currently
+ if (verifyMetaRegionLocation(timeout)) {
return metaLocation;
}
metaAvailable.wait(timeout == 0 ? 50 : timeout);
}
- if (getMetaServerConnection(true) == null) {
+ // TODO: timeout is not used currently
+ if (!verifyMetaRegionLocation(timeout)) {
throw new NotAllMetaRegionsOnlineException(
"Timed out (" + timeout + "ms)");
}
@@ -376,7 +355,7 @@ public class CatalogTracker {
* @throws NotAllMetaRegionsOnlineException if timed out waiting
* @throws IOException
*/
- public HRegionInterface waitForMetaServerConnection(long timeout)
+ HRegionInterface waitForMetaServerConnection(long timeout)
throws InterruptedException, NotAllMetaRegionsOnlineException, IOException {
return getCachedConnection(waitForMeta(timeout));
}
@@ -511,6 +490,7 @@ public class CatalogTracker {
*/
public boolean verifyMetaRegionLocation(final long timeout)
throws InterruptedException, IOException {
+ // Timeout is not used.
return getMetaServerConnection(true) != null;
}
@@ -521,4 +501,4 @@ public class CatalogTracker {
public HConnection getConnection() {
return this.connection;
}
-}
+}
\ No newline at end of file
diff --git src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
index ac60311..ea70882 100644
--- src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
+++ src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java
@@ -28,6 +28,7 @@ import java.net.ConnectException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
@@ -40,20 +41,69 @@ import org.apache.hadoop.hbase.catalog.MetaReader.Visitor;
/**
* Writes region and assignment information to .META..
- *
- * Uses the {@link CatalogTracker} to obtain locations and connections to
- * catalogs.
*/
public class MetaEditor {
private static final Log LOG = LogFactory.getLog(MetaEditor.class);
- private static Put makePutFromRegionInfo(HRegionInfo regionInfo) throws IOException {
+ private static Put makePutFromRegionInfo(HRegionInfo regionInfo)
+ throws IOException {
Put put = new Put(regionInfo.getRegionName());
put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
Writables.getBytes(regionInfo));
return put;
}
-
+
+ /**
+ * Put the passed p to the .META. table.
+ * @param ct CatalogTracker on whose back we will ride the edit.
+ * @param p Put to add to .META.
+ * @throws IOException
+ */
+ static void putToMetaTable(final CatalogTracker ct, final Put p)
+ throws IOException {
+ put(MetaReader.getMetaHTable(ct), p);
+ }
+
+ static void put(final HTable t, final Put p) throws IOException {
+ try {
+ t.put(p);
+ } finally {
+ t.close();
+ }
+ }
+
+ /**
+ * Put the passed ps to the .META. table.
+ * @param ct CatalogTracker on whose back we will ride the edit.
+ * @param ps Put to add to .META.
+ * @throws IOException
+ */
+ static void putsToMetaTable(final CatalogTracker ct, final List ps)
+ throws IOException {
+ HTable t = MetaReader.getMetaHTable(ct);
+ try {
+ t.put(ps);
+ } finally {
+ t.close();
+ }
+ }
+
+ /**
+ * Delete the passed d from the .META. table.
+ * @param ct CatalogTracker on whose back we will ride the edit.
+ * @param d Delete to add to .META.
+ * @throws IOException
+ */
+ static void deleteMetaTable(final CatalogTracker ct, final Delete d)
+ throws IOException {
+ HTable t = MetaReader.getMetaHTable(ct);
+ try {
+ t.delete(d);
+ } finally {
+ t.close();
+ }
+ }
+
/**
* Adds a META row for the specified new region.
* @param regionInfo region information
@@ -62,8 +112,7 @@ public class MetaEditor {
public static void addRegionToMeta(CatalogTracker catalogTracker,
HRegionInfo regionInfo)
throws IOException {
- catalogTracker.waitForMetaServerConnectionDefault().put(
- CatalogTracker.META_REGION, makePutFromRegionInfo(regionInfo));
+ putToMetaTable(catalogTracker, makePutFromRegionInfo(regionInfo));
LOG.info("Added region " + regionInfo.getRegionNameAsString() + " to META");
}
@@ -79,11 +128,9 @@ public class MetaEditor {
List puts = new ArrayList();
for (HRegionInfo regionInfo : regionInfos) {
puts.add(makePutFromRegionInfo(regionInfo));
- LOG.debug("Added region " + regionInfo.getRegionNameAsString() + " to META");
}
- catalogTracker.waitForMetaServerConnectionDefault().put(
- CatalogTracker.META_REGION, puts);
- LOG.info("Added " + puts.size() + " regions to META");
+ putsToMetaTable(catalogTracker, puts);
+ LOG.info("Added " + puts.size() + " regions in META");
}
/**
@@ -108,7 +155,7 @@ public class MetaEditor {
Writables.getBytes(a));
put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER,
Writables.getBytes(b));
- catalogTracker.waitForMetaServerConnectionDefault().put(CatalogTracker.META_REGION, put);
+ putToMetaTable(catalogTracker, put);
LOG.info("Offlined parent region " + parent.getRegionNameAsString() +
" in META");
}
@@ -116,14 +163,11 @@ public class MetaEditor {
public static void addDaughter(final CatalogTracker catalogTracker,
final HRegionInfo regionInfo, final ServerName sn)
throws NotAllMetaRegionsOnlineException, IOException {
- HRegionInterface server = catalogTracker.waitForMetaServerConnectionDefault();
- byte [] catalogRegionName = CatalogTracker.META_REGION;
Put put = new Put(regionInfo.getRegionName());
addRegionInfo(put, regionInfo);
if (sn != null) addLocation(put, sn);
- server.put(catalogRegionName, put);
+ putToMetaTable(catalogTracker, put);
LOG.info("Added daughter " + regionInfo.getRegionNameAsString() +
- " in region " + Bytes.toString(catalogRegionName) +
(sn == null? ", serverName=null": ", serverName=" + sn.toString()));
}
@@ -145,9 +189,7 @@ public class MetaEditor {
public static void updateMetaLocation(CatalogTracker catalogTracker,
HRegionInfo regionInfo, ServerName sn)
throws IOException, ConnectException {
- HRegionInterface server = catalogTracker.waitForRootServerConnectionDefault();
- if (server == null) throw new IOException("No server for -ROOT-");
- updateLocation(server, CatalogTracker.ROOT_REGION, regionInfo, sn);
+ updateLocation(catalogTracker, regionInfo, sn);
}
/**
@@ -165,8 +207,7 @@ public class MetaEditor {
public static void updateRegionLocation(CatalogTracker catalogTracker,
HRegionInfo regionInfo, ServerName sn)
throws IOException {
- updateLocation(catalogTracker.waitForMetaServerConnectionDefault(),
- CatalogTracker.META_REGION, regionInfo, sn);
+ updateLocation(catalogTracker, regionInfo, sn);
}
/**
@@ -175,22 +216,23 @@ public class MetaEditor {
* Connects to the specified server which should be hosting the specified
* catalog region name to perform the edit.
*
- * @param server connection to server hosting catalog region
- * @param catalogRegionName name of catalog region being updated
+ * @param catalogTracker
* @param regionInfo region to update location of
* @param sn Server name
* @throws IOException In particular could throw {@link java.net.ConnectException}
* if the server is down on other end.
*/
- private static void updateLocation(HRegionInterface server,
- byte [] catalogRegionName, HRegionInfo regionInfo, ServerName sn)
+ private static void updateLocation(final CatalogTracker catalogTracker,
+ HRegionInfo regionInfo, ServerName sn)
throws IOException {
+ final byte [] regionName = regionInfo.getRegionName();
Put put = new Put(regionInfo.getRegionName());
addLocation(put, sn);
- server.put(catalogRegionName, put);
+ HTable t = MetaReader.getCatalogHTable(catalogTracker, regionName);
+ put(t, put);
LOG.info("Updated row " + regionInfo.getRegionNameAsString() +
- " in region " + Bytes.toStringBinary(catalogRegionName) + " with " +
- "serverName=" + sn.toString());
+ " in region " + Bytes.toString(t.getTableName()) +
+ " with server=" + sn);
}
/**
@@ -203,8 +245,7 @@ public class MetaEditor {
HRegionInfo regionInfo)
throws IOException {
Delete delete = new Delete(regionInfo.getRegionName());
- catalogTracker.waitForMetaServerConnectionDefault().
- delete(CatalogTracker.META_REGION, delete);
+ deleteMetaTable(catalogTracker, delete);
LOG.info("Deleted region " + regionInfo.getRegionNameAsString() + " from META");
}
@@ -223,8 +264,7 @@ public class MetaEditor {
throws NotAllMetaRegionsOnlineException, IOException {
Delete delete = new Delete(parent.getRegionName());
delete.deleteColumns(HConstants.CATALOG_FAMILY, qualifier);
- catalogTracker.waitForMetaServerConnectionDefault().
- delete(CatalogTracker.META_REGION, delete);
+ deleteMetaTable(catalogTracker, delete);
LOG.info("Deleted daughter reference " + daughter.getRegionNameAsString() +
", qualifier=" + Bytes.toStringBinary(qualifier) + ", from parent " +
parent.getRegionNameAsString());
@@ -252,7 +292,7 @@ public class MetaEditor {
Put put = new Put(HRegionInfo.ROOT_REGIONINFO.getRegionName());
addMetaUpdateStatus(put, metaUpdated);
catalogTracker.waitForRootServerConnectionDefault().put(
- CatalogTracker.ROOT_REGION, put);
+ CatalogTracker.ROOT_REGION_NAME, put);
LOG.info("Updated -ROOT- row with metaMigrated status = " + metaUpdated);
}
@@ -277,7 +317,7 @@ public class MetaEditor {
.createTableDescriptor(hrfm.getTableDesc());
updateHRI(masterServices.getCatalogTracker()
.waitForMetaServerConnectionDefault(),
- hrfm, CatalogTracker.META_REGION);
+ hrfm, CatalogTracker.META_REGION_NAME);
return true;
}
};
@@ -319,7 +359,7 @@ public class MetaEditor {
hrfm.getTableDesc());
updateHRI(masterServices.getCatalogTracker()
.waitForRootServerConnectionDefault(),
- hrfm, CatalogTracker.ROOT_REGION);
+ hrfm, CatalogTracker.ROOT_REGION_NAME);
return true;
}
};
@@ -369,13 +409,6 @@ public class MetaEditor {
return info;
}
- private static Put addMetaUpdateStatus(final Put p) {
- p.add(HConstants.CATALOG_FAMILY, HConstants.META_MIGRATION_QUALIFIER,
- Bytes.toBytes("true"));
- return p;
- }
-
-
private static Put addMetaUpdateStatus(final Put p, final boolean metaUpdated) {
p.add(HConstants.CATALOG_FAMILY, HConstants.META_MIGRATION_QUALIFIER,
Bytes.toBytes(metaUpdated));
@@ -386,7 +419,7 @@ public class MetaEditor {
private static Put addRegionInfo(final Put p, final HRegionInfo hri)
throws IOException {
p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
- Writables.getBytes(hri));
+ Writables.getBytes(hri));
return p;
}
@@ -397,4 +430,4 @@ public class MetaEditor {
Bytes.toBytes(sn.getStartcode()));
return p;
}
-}
+}
\ No newline at end of file
diff --git src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java
index 2afe70c..dcd03ff 100644
--- src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java
+++ src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java
@@ -1,6 +1,4 @@
/**
- * Copyright 2010 The Apache Software Foundation
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -26,26 +24,24 @@ import java.util.Map;
import java.util.NavigableMap;
import java.util.Set;
import java.util.TreeMap;
-import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.migration.HRegionInfo090x;
+import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.ipc.RemoteException;
/**
* Reads region and assignment information from .META..
- *
- * Uses the {@link CatalogTracker} to obtain locations and connections to
- * catalogs.
*/
public class MetaReader {
private static final Log LOG = LogFactory.getLog(MetaReader.class);
@@ -61,48 +57,6 @@ public class MetaReader {
}
/**
- * @param ct
- * @param tableName A user tablename or a .META. table name.
- * @return Interface on to server hosting the -ROOT- or
- * .META. regions.
- * @throws NotAllMetaRegionsOnlineException
- * @throws IOException
- */
- private static HRegionInterface getCatalogRegionInterface(final CatalogTracker ct,
- final byte [] tableName)
- throws NotAllMetaRegionsOnlineException, IOException {
- return Bytes.equals(HConstants.META_TABLE_NAME, tableName)?
- ct.waitForRootServerConnectionDefault():
- ct.waitForMetaServerConnectionDefault();
- }
-
- /**
- * @param tableName
- * @return Returns region name to look in for regions for tableName;
- * e.g. if we are looking for .META. regions, we need to look
- * in the -ROOT- region, else if a user table, we need to look
- * in the .META. region.
- */
- private static byte [] getCatalogRegionNameForTable(final byte [] tableName) {
- return Bytes.equals(HConstants.META_TABLE_NAME, tableName)?
- HRegionInfo.ROOT_REGIONINFO.getRegionName():
- HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
- }
-
- /**
- * @param regionName
- * @return Returns region name to look in for regionName;
- * e.g. if we are looking for .META.,,1 region, we need to look
- * in -ROOT- region, else if a user region, we need to look
- * in the .META.,,1 region.
- */
- private static byte [] getCatalogRegionNameForRegion(final byte [] regionName) {
- return isMetaRegion(regionName)?
- HRegionInfo.ROOT_REGIONINFO.getRegionName():
- HRegionInfo.FIRST_META_REGIONINFO.getRegionName();
- }
-
- /**
* @param regionName
* @return True if regionName is from .META. table.
*/
@@ -118,23 +72,6 @@ public class MetaReader {
}
/**
- * Performs a full scan of .META..
- *
- * Returns a map of every region to it's currently assigned server, according
- * to META. If the region does not have an assignment it will have a null
- * value in the map.
- *
- * @return map of regions to their currently assigned server where server is
- * a String of <host> ':' <port>
- * @throws IOException
- */
- public static Map fullScan(
- CatalogTracker catalogTracker)
- throws IOException {
- return fullScan(catalogTracker, new TreeSet());
- }
-
- /**
* Performs a full scan of .META., skipping regions from any
* tables in the specified set of disabled tables.
*
@@ -203,20 +140,16 @@ public class MetaReader {
* @return map of regions to their currently assigned server
* @throws IOException
*/
- public static List fullScanOfResults(
- CatalogTracker catalogTracker)
+ public static List fullScan(CatalogTracker catalogTracker)
throws IOException {
- final List regions = new ArrayList();
- Visitor v = new Visitor() {
+ CollectingVisitor v = new CollectingVisitor() {
@Override
- public boolean visit(Result r) throws IOException {
- if (r == null || r.isEmpty()) return true;
- regions.add(r);
- return true;
+ void add(Result r) {
+ this.results.add(r);
}
};
fullScan(catalogTracker, v);
- return regions;
+ return v.getResults();
}
/**
@@ -244,109 +177,134 @@ public class MetaReader {
* @param catalogTracker
* @param visitor
* @param startrow Where to start the scan. Pass null if want to begin scan
- * at first row.
+ * at first row (The visitor will stop the Scan when its done so no need to
+ * pass a stoprow).
* @throws IOException
*/
public static void fullScan(CatalogTracker catalogTracker,
final Visitor visitor, final byte [] startrow)
throws IOException {
- HRegionInterface metaServer =
- catalogTracker.waitForMetaServerConnectionDefault();
- fullScan(metaServer, visitor,
- HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), startrow);
+ fullScan(catalogTracker, visitor, startrow, false);
+ }
+
+ /**
+ * Performs a full scan of .META..
+ *
+ * Returns a map of every region to it's currently assigned server, according
+ * to META. If the region does not have an assignment it will have a null
+ * value in the map.
+ * @param catalogTracker
+ * @param visitor
+ * @param startrow Where to start the scan. Pass null if want to begin scan
+ * at first row.
+ * @param scanRoot True if we are to scan -ROOT- rather than .META.
+ * @throws IOException
+ */
+ static void fullScan(CatalogTracker catalogTracker, final Visitor visitor,
+ final byte [] startrow, final boolean scanRoot)
+ throws IOException {
+ Scan scan = new Scan();
+ if (startrow != null) scan.setStartRow(startrow);
+ scan.addFamily(HConstants.CATALOG_FAMILY);
+ HTable metaTable = scanRoot?
+ getRootHTable(catalogTracker): getMetaHTable(catalogTracker);
+ ResultScanner scanner = metaTable.getScanner(scan);
+ try {
+ Result data;
+ while((data = scanner.next()) != null) {
+ if (data.isEmpty()) continue;
+ // Break if visit returns false.
+ if (!visitor.visit(data)) break;
+ }
+ } finally {
+ scanner.close();
+ metaTable.close();
+ }
return;
}
+
+ static HTable getCatalogHTable(final CatalogTracker catalogTracker,
+ final byte [] regionName)
+ throws IOException {
+ return isMetaRegion(regionName)? getRootHTable(catalogTracker):
+ getMetaHTable(catalogTracker);
+ }
+
+ /**
+ * Callers should call close on the returned {@link HTable} instance.
+ * @param catalogTracker
+ * @param tableName
+ * @return An {@link HTable} for tableName
+ * @throws IOException
+ */
+ private static HTable getHTable(final CatalogTracker catalogTracker,
+ final byte [] tableName)
+ throws IOException {
+ // Passing the CatalogTracker's connection configuration ensures this
+ // HTable instance uses the CatalogTracker's connection.
+ return new HTable(catalogTracker.getConnection().getConfiguration(), tableName);
+ }
+
+ /**
+ * Callers should call close on the returned {@link HTable} instance.
+ * @param ct
+ * @return An {@link HTable} for .META.
+ * @throws IOException
+ */
+ static HTable getMetaHTable(final CatalogTracker ct)
+ throws IOException {
+ return getHTable(ct, HConstants.META_TABLE_NAME);
+ }
/**
- * Reads the location of META from ROOT.
- * @param metaServer connection to server hosting ROOT
- * @return location of META in ROOT where location, or null if not available
+ * Callers should call close on the returned {@link HTable} instance.
+ * @param ct
+ * @return An {@link HTable} for -ROOT-
* @throws IOException
*/
- public static ServerName readMetaLocation(HRegionInterface metaServer)
+ static HTable getRootHTable(final CatalogTracker ct)
throws IOException {
- return readLocation(metaServer, CatalogTracker.ROOT_REGION,
- CatalogTracker.META_REGION);
+ return getHTable(ct, HConstants.ROOT_TABLE_NAME);
}
/**
* Reads the location of the specified region from META.
* @param catalogTracker
- * @param regionName region to read location of
- * @return location of META in ROOT where location is, or null if not available
+ * @param regionName region whose location we are after
+ * @return location of META in ROOT as a {@link ServerName} or null if not found
* @throws IOException
*/
public static ServerName readRegionLocation(CatalogTracker catalogTracker,
byte [] regionName)
throws IOException {
- if (isMetaRegion(regionName)) throw new IllegalArgumentException("See readMetaLocation");
- return readLocation(catalogTracker.waitForMetaServerConnectionDefault(),
- CatalogTracker.META_REGION, regionName);
- }
-
- private static ServerName readLocation(HRegionInterface metaServer,
- byte [] catalogRegionName, byte [] regionName)
- throws IOException {
- Result r = null;
- try {
- r = metaServer.get(catalogRegionName,
- new Get(regionName).
- addColumn(HConstants.CATALOG_FAMILY,
- HConstants.SERVER_QUALIFIER).
- addColumn(HConstants.CATALOG_FAMILY,
- HConstants.STARTCODE_QUALIFIER));
- } catch (java.net.SocketTimeoutException e) {
- // Treat this exception + message as unavailable catalog table. Catch it
- // and fall through to return a null
- } catch (java.net.SocketException e) {
- // Treat this exception + message as unavailable catalog table. Catch it
- // and fall through to return a null
- } catch (RemoteException re) {
- IOException ioe = re.unwrapRemoteException();
- if (ioe instanceof NotServingRegionException) {
- // Treat this NSRE as unavailable table. Catch and fall through to
- // return null below
- } else if (ioe.getMessage().contains("Server not running")) {
- // Treat as unavailable table.
- } else {
- throw re;
- }
- } catch (IOException e) {
- if (e.getCause() != null && e.getCause() instanceof IOException &&
- e.getCause().getMessage() != null &&
- e.getCause().getMessage().contains("Connection reset by peer")) {
- // Treat this exception + message as unavailable catalog table. Catch it
- // and fall through to return a null
- } else {
- throw e;
- }
- }
- if (r == null || r.isEmpty()) {
- return null;
- }
- return getServerNameFromResult(r);
+ Pair pair = getRegion(catalogTracker, regionName);
+ return (pair == null || pair.getSecond() == null)? null: pair.getSecond();
}
/**
* Gets the region info and assignment for the specified region from META.
* @param catalogTracker
- * @param regionName
- * @return location of META in ROOT where location is
- * a String of <host> ':' <port>, or null if not available
+ * @param regionName Region to lookup.
+ * @return Location and HRegionInfo for regionName
* @throws IOException
*/
public static Pair getRegion(
- CatalogTracker catalogTracker, byte [] regionName)
+ final CatalogTracker catalogTracker, final byte [] regionName)
throws IOException {
Get get = new Get(regionName);
get.addFamily(HConstants.CATALOG_FAMILY);
- byte [] meta = getCatalogRegionNameForRegion(regionName);
- Result r = catalogTracker.waitForMetaServerConnectionDefault().get(meta, get);
+ HTable metaTable = getCatalogHTable(catalogTracker, regionName);
+ Result r = null;
+ try {
+ r = metaTable.get(get);
+ } finally {
+ metaTable.close();
+ }
return (r == null || r.isEmpty())? null: metaRowToRegionPair(r);
}
/**
- * @param data A .META. table row.
+ * @param data A .META. table row. Cannot be null.
* @return A pair of the regioninfo and the ServerName
* (or null for server address if no address set in .META.).
* @throws IOException
@@ -386,28 +344,43 @@ public class MetaReader {
* @throws IOException
*/
public static boolean tableExists(CatalogTracker catalogTracker,
- String tableName)
+ final String tableName)
throws IOException {
if (tableName.equals(HTableDescriptor.ROOT_TABLEDESC.getNameAsString()) ||
tableName.equals(HTableDescriptor.META_TABLEDESC.getNameAsString())) {
// Catalog tables always exist.
return true;
}
- HRegionInterface metaServer =
- catalogTracker.waitForMetaServerConnectionDefault();
- Scan scan = getScanForTableName(Bytes.toBytes(tableName));
- scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
- long scannerid = metaServer.openScanner(
- HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), scan);
- try {
- Result data = metaServer.next(scannerid);
- if (data != null && data.size() > 0) {
+ final byte [] tableNameBytes = Bytes.toBytes(tableName);
+ // Make a version of ResultCollectingVisitor that only collects the first
+ // region in a table, if one.
+ CollectingVisitor visitor = new CollectingVisitor() {
+ private HRegionInfo current = null;
+
+ @Override
+ public boolean visit(Result r) throws IOException {
+ this.current = getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER);
+ if (this.current == null) {
+ LOG.warn("No serialized HRegionInfo in " + r);
return true;
+ }
+ if (!isInsideTable(this.current, tableNameBytes)) return false;
+ if (this.current.isSplitParent()) return true;
+ // Else call super and add this Result to the collection.
+ super.visit(r);
+ // Stop collecting regions from table after we get one.
+ return false;
}
- return false;
- } finally {
- metaServer.close(scannerid);
- }
+
+ @Override
+ void add(Result r) {
+ // Add the current HRI.
+ this.results.add(this.current);
+ }
+ };
+ fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableNameBytes));
+ // If visitor has results >= 1 then table exists.
+ return visitor.getResults().size() >= 1;
}
/**
@@ -416,10 +389,11 @@ public class MetaReader {
* @param tableName
* @return Ordered list of {@link HRegionInfo}.
* @throws IOException
+ * @throws InterruptedException
*/
public static List getTableRegions(CatalogTracker catalogTracker,
byte [] tableName)
- throws IOException {
+ throws IOException, InterruptedException {
return getTableRegions(catalogTracker, tableName, false);
}
@@ -431,46 +405,47 @@ public class MetaReader {
* parents in the return.
* @return Ordered list of {@link HRegionInfo}.
* @throws IOException
+ * @throws InterruptedException
*/
public static List getTableRegions(CatalogTracker catalogTracker,
- byte [] tableName, final boolean excludeOfflinedSplitParents)
- throws IOException {
- if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) {
- // If root, do a bit of special handling.
- List list = new ArrayList();
- list.add(HRegionInfo.ROOT_REGIONINFO);
- return list;
- } else if (Bytes.equals(tableName, HConstants.META_TABLE_NAME)) {
- // Same for .META. table
- List list = new ArrayList();
- list.add(HRegionInfo.FIRST_META_REGIONINFO);
- return list;
+ final byte [] tableName, final boolean excludeOfflinedSplitParents)
+ throws IOException, InterruptedException {
+ List> result =
+ getTableRegionsAndLocations(catalogTracker, tableName,
+ excludeOfflinedSplitParents);
+ return getListOfHRegionInfos(result);
+ }
+
+ static List getListOfHRegionInfos(final List> pairs) {
+ if (pairs == null || pairs.isEmpty()) return null;
+ List result = new ArrayList(pairs.size());
+ for (Pair pair: pairs) {
+ result.add(pair.getFirst());
}
+ return result;
+ }
- // Its a user table.
- HRegionInterface metaServer =
- getCatalogRegionInterface(catalogTracker, tableName);
- List regions = new ArrayList();
+ /**
+ * @param current
+ * @param tableName
+ * @return True if current tablename is equal to
+ * tableName
+ */
+ static boolean isInsideTable(final HRegionInfo current, final byte [] tableName) {
+ return Bytes.equals(tableName, current.getTableName());
+ }
- Scan scan = getScanForTableName(tableName);
- scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
- long scannerid =
- metaServer.openScanner(getCatalogRegionNameForTable(tableName), scan);
- try {
- Result data;
- while((data = metaServer.next(scannerid)) != null) {
- if (data != null && data.size() > 0) {
- HRegionInfo info = Writables.getHRegionInfo(
- data.getValue(HConstants.CATALOG_FAMILY,
- HConstants.REGIONINFO_QUALIFIER));
- if (excludeOfflinedSplitParents && info.isSplitParent()) continue;
- regions.add(info);
- }
- }
- return regions;
- } finally {
- metaServer.close(scannerid);
- }
+ /**
+ * @param tableName
+ * @return Place to start Scan in .META. when passed a
+ * tableName; returns <tableName&rt; <,&rt; <,&rt;
+ */
+ static byte [] getTableStartRowForMeta(final byte [] tableName) {
+ byte [] startRow = new byte[tableName.length + 2];
+ System.arraycopy(tableName, 0, startRow, 0, tableName.length);
+ startRow[startRow.length - 2] = HRegionInfo.DELIMITER;
+ startRow[startRow.length - 1] = HRegionInfo.DELIMITER;
+ return startRow;
}
/**
@@ -504,8 +479,22 @@ public class MetaReader {
public static List>
getTableRegionsAndLocations(CatalogTracker catalogTracker, String tableName)
throws IOException, InterruptedException {
- byte [] tableNameBytes = Bytes.toBytes(tableName);
- if (Bytes.equals(tableNameBytes, HConstants.ROOT_TABLE_NAME)) {
+ return getTableRegionsAndLocations(catalogTracker, Bytes.toBytes(tableName),
+ true);
+ }
+
+ /**
+ * @param catalogTracker
+ * @param tableName
+ * @return Return list of regioninfos and server addresses.
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public static List>
+ getTableRegionsAndLocations(final CatalogTracker catalogTracker,
+ final byte [] tableName, final boolean excludeOfflinedSplitParents)
+ throws IOException, InterruptedException {
+ if (Bytes.equals(tableName, HConstants.ROOT_TABLE_NAME)) {
// If root, do a bit of special handling.
ServerName serverName = catalogTracker.getRootLocation();
List> list =
@@ -514,27 +503,35 @@ public class MetaReader {
serverName));
return list;
}
- HRegionInterface metaServer =
- getCatalogRegionInterface(catalogTracker, tableNameBytes);
- List> regions =
- new ArrayList>();
- Scan scan = getScanForTableName(tableNameBytes);
- scan.addFamily(HConstants.CATALOG_FAMILY);
- long scannerid =
- metaServer.openScanner(getCatalogRegionNameForTable(tableNameBytes), scan);
- try {
- Result data;
- while((data = metaServer.next(scannerid)) != null) {
- if (data != null && data.size() > 0) {
- Pair region = metaRowToRegionPair(data);
- if (region == null) continue;
- regions.add(region);
+ // Make a version of CollectingVisitor that collects HRegionInfo and ServerAddress
+ CollectingVisitor> visitor =
+ new CollectingVisitor>() {
+ private Pair current = null;
+
+ @Override
+ public boolean visit(Result r) throws IOException {
+ HRegionInfo hri = getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER);
+ if (hri == null) {
+ LOG.warn("No serialized HRegionInfo in " + r);
+ return true;
}
+ if (!isInsideTable(hri, tableName)) return false;
+ if (excludeOfflinedSplitParents && hri.isSplitParent()) return true;
+ ServerName sn = getServerNameFromResult(r);
+ // Populate this.current so available when we call #add
+ this.current = new Pair(hri, sn);
+ // Else call super and add this Result to the collection.
+ return super.visit(r);
}
- return regions;
- } finally {
- metaServer.close(scannerid);
- }
+
+ @Override
+ void add(Result r) {
+ this.results.add(this.current);
+ }
+ };
+ fullScan(catalogTracker, visitor, getTableStartRowForMeta(tableName),
+ Bytes.equals(tableName, HConstants.META_TABLE_NAME));
+ return visitor.getResults();
}
/**
@@ -575,8 +572,6 @@ public class MetaReader {
public static void fullScanMetaAndPrint(
CatalogTracker catalogTracker)
throws IOException {
- final List regions =
- new ArrayList();
Visitor v = new Visitor() {
@Override
public boolean visit(Result r) throws IOException {
@@ -640,6 +635,38 @@ public class MetaReader {
/**
+ * Interpret the content of the cell at {@link HConstants#CATALOG_FAMILY} and
+ * qualifier as an HRegionInfo and return it, or null.
+ * @param r Result instance to pull from.
+ * @param qualifier Column family qualifier -- either
+ * {@link HConstants#SPLITA_QUALIFIER}, {@link HConstants#SPLITB_QUALIFIER} or
+ * {@link HConstants#REGIONINFO_QUALIFIER}.
+ * @return An HRegionInfo instance or null.
+ * @throws IOException
+ */
+ public static HRegionInfo getHRegionInfo(final Result r, byte [] qualifier)
+ throws IOException {
+ byte [] bytes = r.getValue(HConstants.CATALOG_FAMILY, qualifier);
+ if (bytes == null || bytes.length <= 0) return null;
+ return Writables.getHRegionInfoOrNull(bytes);
+ }
+
+ /**
+ * @param r Result to pull from.
+ * @param qualifier
+ * @return An {@link HServerAddress} instance made from content of r
+ * at qualifier.
+ */
+ public static HServerAddress getHServerAddress(final Result r,
+ byte [] qualifier) {
+ if (r == null || r.isEmpty()) return null;
+ byte [] value = r.getValue(HConstants.CATALOG_FAMILY, qualifier);
+ return value == null || value.length <= 0? null:
+ new HServerAddress(Addressing.
+ createInetSocketAddressFromHostAndPortStr(Bytes.toString(value)));
+ }
+
+ /**
* Implementations 'visit' a catalog table row.
*/
public interface Visitor {
@@ -651,4 +678,27 @@ public class MetaReader {
*/
public boolean visit(final Result r) throws IOException;
}
-}
+
+ /**
+ * A {@link Visitor} that collects content out of passed {@link Result}.
+ */
+ static abstract class CollectingVisitor implements Visitor {
+ final List results = new ArrayList();
+ @Override
+ public boolean visit(Result r) throws IOException {
+ if (r == null || r.isEmpty()) return true;
+ add(r);
+ return true;
+ }
+
+ abstract void add(Result r);
+
+ /**
+ * @return Collected results; wait till visits complete to collect all
+ * possible results
+ */
+ List getResults() {
+ return this.results;
+ }
+ }
+}
\ No newline at end of file
diff --git src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 29e267f..379f9d4 100644
--- src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -1565,19 +1565,22 @@ public class HBaseAdmin implements Abortable, Closeable {
* get the regions of a given table.
*
* @param tableName the name of the table
- * @return Ordered list of {@link HRegionInfo}. *
+ * @return Ordered list of {@link HRegionInfo}.
* @throws IOException
*/
public List getTableRegions(final byte[] tableName)
throws IOException {
CatalogTracker ct = getCatalogTracker();
- List Regions;
+ List Regions = null;
try {
Regions = MetaReader.getTableRegions(ct, tableName, true);
+ } catch (InterruptedException e) {
+ // I can't change this public methods API so convert the IE to a RE.
+ throw new RuntimeException(e);
} finally {
cleanupCatalogTracker(ct);
}
- return Regions;
+ return Regions;
}
public void close() throws IOException {
diff --git src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
index 1252473..3cc8e46 100644
--- src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
+++ src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
@@ -130,8 +130,10 @@ import org.apache.zookeeper.KeeperException;
*/
@SuppressWarnings("serial")
public class HConnectionManager {
- // A LRU Map of HConnectionKey -> HConnection (TableServer).
- private static final Map HBASE_INSTANCES;
+ // An LRU Map of HConnectionKey -> HConnection (TableServer). All
+ // access must be synchronized. This map is not private because tests
+ // need to be able to tinker with it.
+ static final Map HBASE_INSTANCES;
public static final int MAX_CACHED_HBASE_INSTANCES;
@@ -894,7 +896,7 @@ public class HConnectionManager {
deleteCachedLocation(tableName, row);
}
- // Query the root or meta region for the location of the meta region
+ // Query the root or meta region for the location of the meta region
regionInfoRow = server.getClosestRowBefore(
metaLocation.getRegionInfo().getRegionName(), metaKey,
HConstants.CATALOG_FAMILY);
@@ -1200,7 +1202,7 @@ public class HConnectionManager {
} catch (RemoteException e) {
LOG.warn("RemoteException connecting to RS", e);
// Throw what the RemoteException was carrying.
- throw RemoteExceptionHandler.decodeRemoteException(e);
+ throw e.unwrapRemoteException();
}
}
}
@@ -1232,19 +1234,22 @@ public class HConnectionManager {
public T getRegionServerWithRetries(ServerCallable callable)
throws IOException, RuntimeException {
- List exceptions = new ArrayList();
+ List exceptions =
+ new ArrayList();
for(int tries = 0; tries < numRetries; tries++) {
try {
- callable.instantiateServer(tries != 0);
callable.beforeCall();
+ callable.connect(tries != 0);
return callable.call();
} catch (Throwable t) {
callable.shouldRetry(t);
t = translateException(t);
- exceptions.add(t);
+ RetriesExhaustedException.ThrowableWithExtraContext qt =
+ new RetriesExhaustedException.ThrowableWithExtraContext(t,
+ System.currentTimeMillis(), callable.toString());
+ exceptions.add(qt);
if (tries == numRetries - 1) {
- throw new RetriesExhaustedException(callable.getServerName(),
- callable.getRegionName(), callable.getRow(), tries, exceptions);
+ throw new RetriesExhaustedException(tries, exceptions);
}
} finally {
callable.afterCall();
@@ -1253,7 +1258,7 @@ public class HConnectionManager {
Thread.sleep(getPauseTime(tries));
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- throw new IOException("Giving up trying to get region server: thread is interrupted.");
+ throw new IOException("Giving up after tries=" + tries, e);
}
}
return null;
@@ -1262,8 +1267,8 @@ public class HConnectionManager {
public T getRegionServerWithoutRetries(ServerCallable callable)
throws IOException, RuntimeException {
try {
- callable.instantiateServer(false);
callable.beforeCall();
+ callable.connect(false);
return callable.call();
} catch (Throwable t) {
Throwable t2 = translateException(t);
@@ -1288,7 +1293,7 @@ public class HConnectionManager {
return server.multi(multi);
}
@Override
- public void instantiateServer(boolean reload) throws IOException {
+ public void connect(boolean reload) throws IOException {
server =
connection.getHRegionConnection(loc.getHostname(), loc.getPort());
}
diff --git src/main/java/org/apache/hadoop/hbase/client/HTable.java src/main/java/org/apache/hadoop/hbase/client/HTable.java
index b5cf639..cc8bdcf 100644
--- src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.UnknownScannerException;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
@@ -191,7 +190,8 @@ public class HTable implements HTableInterface, Closeable {
}
this.connection = HConnectionManager.getConnection(conf);
this.scannerTimeout =
- (int) conf.getLong(HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY, HConstants.DEFAULT_HBASE_REGIONSERVER_LEASE_PERIOD);
+ (int) conf.getLong(HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY,
+ HConstants.DEFAULT_HBASE_REGIONSERVER_LEASE_PERIOD);
this.operationTimeout = HTableDescriptor.isMetaTable(tableName) ? HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT
: conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
@@ -415,8 +415,9 @@ public class HTable implements HTableInterface, Closeable {
}
};
MetaScanner.metaScan(configuration, visitor, this.tableName);
- return new Pair(startKeyList.toArray(new byte[startKeyList.size()][]),
- endKeyList.toArray(new byte[endKeyList.size()][]));
+ return new Pair(
+ startKeyList.toArray(new byte[startKeyList.size()][]),
+ endKeyList.toArray(new byte[endKeyList.size()][]));
}
/**
@@ -850,7 +851,7 @@ public class HTable implements HTableInterface, Closeable {
@Override
public void flushCommits() throws IOException {
try {
- connection.processBatchOfPuts(writeBuffer, tableName, pool);
+ this.connection.processBatchOfPuts(writeBuffer, tableName, pool);
} finally {
if (clearBufferOnFail) {
writeBuffer.clear();
diff --git src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
index da5b80d..f56ca17 100644
--- src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
+++ src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
@@ -187,7 +187,7 @@ public class MetaScanner {
if (LOG.isDebugEnabled()) {
LOG.debug("Scanning " + Bytes.toString(metaTableName) +
" starting at row=" + Bytes.toStringBinary(startRow) + " for max=" +
- rowUpperLimit + " rows");
+ rowUpperLimit + " rows using " + connection.toString());
}
callable = new ScannerCallable(connection, metaTableName, scan);
// Open scanner
diff --git src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java
index 89d2abe..ebc8253 100644
--- src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java
+++ src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java
@@ -15,9 +15,8 @@
*/
package org.apache.hadoop.hbase.client;
-import org.apache.hadoop.hbase.util.Bytes;
-
import java.io.IOException;
+import java.util.Date;
import java.util.List;
/**
@@ -36,28 +35,53 @@ public class RetriesExhaustedException extends IOException {
}
/**
+ * Datastructure that allows adding more info around Throwable incident.
+ */
+ public static class ThrowableWithExtraContext {
+ private final Throwable t;
+ private final long when;
+ private final String extras;
+
+ public ThrowableWithExtraContext(final Throwable t, final long when,
+ final String extras) {
+ this.t = t;
+ this.when = when;
+ this.extras = extras;
+ }
+
+ @Override
+ public String toString() {
+ return new Date(this.when).toString() + ", " + extras + ", " + t.toString();
+ }
+ }
+
+ /**
* Create a new RetriesExhaustedException from the list of prior failures.
- * @param serverName name of HRegionServer
- * @param regionName name of region
- * @param row The row we were pursuing when we ran out of retries
+ * @param callableVitals Details from the {@link ServerCallable} we were using
+ * when we got this exception.
* @param numTries The number of tries we made
* @param exceptions List of exceptions that failed before giving up
*/
- public RetriesExhaustedException(String serverName, final byte [] regionName,
- final byte [] row, int numTries, List exceptions) {
- super(getMessage(serverName, regionName, row, numTries, exceptions));
+ public RetriesExhaustedException(final String callableVitals, int numTries,
+ List exceptions) {
+ super(getMessage(callableVitals, numTries, exceptions));
+ }
+
+ /**
+ * Create a new RetriesExhaustedException from the list of prior failures.
+ * @param numTries
+ * @param exceptions List of exceptions that failed before giving up
+ */
+ public RetriesExhaustedException(final int numTries,
+ final List exceptions) {
+ super(getMessage(numTries, exceptions));
}
- private static String getMessage(String serverName, final byte [] regionName,
- final byte [] row,
- int numTries, List exceptions) {
- StringBuilder buffer = new StringBuilder("Trying to contact region server ");
- buffer.append(serverName);
- buffer.append(" for region ");
- buffer.append(regionName == null? "": Bytes.toStringBinary(regionName));
- buffer.append(", row '");
- buffer.append(row == null? "": Bytes.toStringBinary(row));
- buffer.append("', but failed after ");
+ private static String getMessage(String callableVitals, int numTries,
+ List exceptions) {
+ StringBuilder buffer = new StringBuilder("Failed contacting ");
+ buffer.append(callableVitals);
+ buffer.append(" after ");
buffer.append(numTries + 1);
buffer.append(" attempts.\nExceptions:\n");
for (Throwable t : exceptions) {
@@ -66,4 +90,16 @@ public class RetriesExhaustedException extends IOException {
}
return buffer.toString();
}
+
+ private static String getMessage(final int numTries,
+ final List exceptions) {
+ StringBuilder buffer = new StringBuilder("Failed after attempts=");
+ buffer.append(numTries + 1);
+ buffer.append(", exceptions:\n");
+ for (ThrowableWithExtraContext t : exceptions) {
+ buffer.append(t.toString());
+ buffer.append("\n");
+ }
+ return buffer.toString();
+ }
}
\ No newline at end of file
diff --git src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
index 4997143..cfe4701 100644
--- src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
+++ src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedWithDetailsException.java
@@ -152,4 +152,4 @@ extends RetriesExhaustedException {
return s;
}
-}
+}
\ No newline at end of file
diff --git src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
index 5ea38b4..78c00c5 100644
--- src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
+++ src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
@@ -58,9 +58,9 @@ public class ScannerCallable extends ServerCallable {
* @throws IOException
*/
@Override
- public void instantiateServer(boolean reload) throws IOException {
+ public void connect(boolean reload) throws IOException {
if (!instantiated || reload) {
- super.instantiateServer(reload);
+ super.connect(reload);
instantiated = true;
}
}
diff --git src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java
index 816f8b7..395ab97 100644
--- src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java
+++ src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java
@@ -31,7 +31,14 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.util.Bytes;
/**
- * Abstract class that implements Callable, used by retryable actions.
+ * Abstract class that implements {@link Callable}. Implementation stipulates
+ * return type and method we actually invoke on remote Server. Usually
+ * used inside a try/catch that fields usual connection failures all wrapped
+ * up in a retry loop.
+ * Call {@link #connect(boolean)} to connect to server hosting region
+ * that contains the passed row in the passed table before invoking
+ * {@link #call()}.
+ * @see HConnection#getRegionServerWithoutRetries(ServerCallable)
* @param the class that the ServerCallable handles
*/
public abstract class ServerCallable implements Callable {
@@ -44,9 +51,9 @@ public abstract class ServerCallable implements Callable {
protected long startTime, endTime;
/**
- * @param connection connection callable is on
- * @param tableName table name callable is on
- * @param row row we are querying
+ * @param connection Connection to use.
+ * @param tableName Table name to which row belongs.
+ * @param row The row we want in tableName.
*/
public ServerCallable(HConnection connection, byte [] tableName, byte [] row) {
this(connection, tableName, row, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
@@ -58,34 +65,37 @@ public abstract class ServerCallable implements Callable {
this.row = row;
this.callTimeout = callTimeout;
}
+
/**
- *
- * @param reload set this to true if connection should re-find the region
+ * Connect to the server hosting region with row from tablename.
+ * @param reload Set this to true if connection should re-find the region
* @throws IOException e
*/
- public void instantiateServer(boolean reload) throws IOException {
+ public void connect(final boolean reload) throws IOException {
this.location = connection.getRegionLocation(tableName, row, reload);
this.server = connection.getHRegionConnection(location.getHostname(),
location.getPort());
}
- /** @return the server name */
+ /** @return the server name
+ * @deprecated Just use {@link #toString()} instead.
+ */
public String getServerName() {
- if (location == null) {
- return null;
- }
+ if (location == null) return null;
return location.getHostnamePort();
}
- /** @return the region name */
+ /** @return the region name
+ * @deprecated Just use {@link #toString()} instead.
+ */
public byte[] getRegionName() {
- if (location == null) {
- return null;
- }
+ if (location == null) return null;
return location.getRegionInfo().getRegionName();
}
- /** @return the row */
+ /** @return the row
+ * @deprecated Just use {@link #toString()} instead.
+ */
public byte [] getRow() {
return row;
}
@@ -113,4 +123,4 @@ public abstract class ServerCallable implements Callable {
this.callTimeout = ((int) (this.endTime - this.startTime));
}
}
-}
\ No newline at end of file
+}
diff --git src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java
index 2a19fa1..0d1e642 100644
--- src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java
+++ src/main/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java
@@ -83,7 +83,7 @@ public interface HMasterInterface extends VersionedProtocol {
* @throws IOException
*/
public Pair getAlterStatus(byte[] tableName)
- throws IOException;
+ throws IOException;
/**
* Adds a column to the specified table
diff --git src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index c0170b4..cbb3dad 100644
--- src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -266,11 +266,12 @@ public class AssignmentManager extends ZooKeeperListener {
* @param tableName
* @return Pair indicating the status of the alter command
* @throws IOException
+ * @throws InterruptedException
*/
public Pair getReopenStatus(byte[] tableName)
- throws IOException {
- List hris = MetaReader.getTableRegions(
- this.master.getCatalogTracker(), tableName);
+ throws IOException, InterruptedException {
+ List hris =
+ MetaReader.getTableRegions(this.master.getCatalogTracker(), tableName);
Integer pending = 0;
for(HRegionInfo hri : hris) {
if(regionsToReopen.get(hri.getEncodedName()) != null) {
@@ -709,7 +710,7 @@ public class AssignmentManager extends ZooKeeperListener {
case RS_ZK_REGION_OPENING:
// Should see OPENING after we have asked it to OPEN or additional
// times after already being in state of OPENING
- if(regionState == null ||
+ if (regionState == null ||
(!regionState.isPendingOpen() && !regionState.isOpening())) {
LOG.warn("Received OPENING for region " +
prettyPrintedRegionName +
@@ -1729,7 +1730,6 @@ public class AssignmentManager extends ZooKeeperListener {
// Presume that master has stale data. Presume remote side just split.
// Presume that the split message when it comes in will fix up the master's
// in memory cluster state.
- return;
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException)t).unwrapRemoteException();
@@ -2044,7 +2044,7 @@ public class AssignmentManager extends ZooKeeperListener {
Map>> rebuildUserRegions()
throws IOException, KeeperException {
// Region assignment from META
- List results = MetaReader.fullScanOfResults(this.catalogTracker);
+ List results = MetaReader.fullScan(this.catalogTracker);
// Map of offline servers and their regions to be returned
Map>> offlineServers =
new TreeMap>>();
diff --git src/main/java/org/apache/hadoop/hbase/master/HMaster.java src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index cde36e1..abfb076 100644
--- src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -25,7 +25,6 @@ import java.lang.reflect.InvocationTargetException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
@@ -41,11 +40,9 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerLoad;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
-import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptors;
-import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.UnknownRegionException;
@@ -54,8 +51,8 @@ import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.MetaScanner;
-import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
+import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.executor.ExecutorService.ExecutorType;
import org.apache.hadoop.hbase.ipc.HBaseRPC;
@@ -63,7 +60,7 @@ import org.apache.hadoop.hbase.ipc.HBaseServer;
import org.apache.hadoop.hbase.ipc.HMasterInterface;
import org.apache.hadoop.hbase.ipc.HMasterRegionInterface;
import org.apache.hadoop.hbase.ipc.RpcServer;
-import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
import org.apache.hadoop.hbase.master.handler.DeleteTableHandler;
import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
@@ -71,18 +68,14 @@ import org.apache.hadoop.hbase.master.handler.ModifyTableHandler;
import org.apache.hadoop.hbase.master.handler.TableAddFamilyHandler;
import org.apache.hadoop.hbase.master.handler.TableDeleteFamilyHandler;
import org.apache.hadoop.hbase.master.handler.TableModifyFamilyHandler;
-import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
import org.apache.hadoop.hbase.master.metrics.MasterMetrics;
import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.replication.regionserver.Replication;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
-import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.InfoServer;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Sleeper;
@@ -508,8 +501,7 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
Result r =
catalogTracker.waitForRootServerConnectionDefault().get(
HRegionInfo.ROOT_REGIONINFO.getRegionName(), get);
- if (r != null && r.getBytes() != null)
- {
+ if (r != null && r.getBytes() != null) {
byte[] metaMigrated = r.getValue(HConstants.CATALOG_FAMILY,
HConstants.META_MIGRATION_QUALIFIER);
String migrated = Bytes.toString(metaMigrated);
@@ -994,16 +986,13 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
}
}
- /**
- * Get the number of regions of the table that have been updated by the alter.
- *
- * @return Pair indicating the number of regions updated Pair.getFirst is the
- * regions that are yet to be updated Pair.getSecond is the total number
- * of regions of the table
- */
public Pair getAlterStatus(byte[] tableName)
throws IOException {
- return this.assignmentManager.getReopenStatus(tableName);
+ try {
+ return this.assignmentManager.getReopenStatus(tableName);
+ } catch (InterruptedException e) {
+ throw new IOException("Interrupted", e);
+ }
}
public void addColumn(byte [] tableName, HColumnDescriptor column)
diff --git src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
index b6dc1c0..1885297 100644
--- src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
+++ src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
@@ -112,8 +112,15 @@ public class EnableTableHandler extends EventHandler {
while (true) {
// Get the regions of this table. We're done when all listed
// tables are onlined.
- List regionsInMeta =
- MetaReader.getTableRegions(this.ct, tableName, true);
+ List regionsInMeta;
+ try {
+ regionsInMeta = MetaReader.getTableRegions(this.ct, tableName, true);
+ } catch (InterruptedException e1) {
+ LOG.warn("getTableRegions was interrupted");
+ // Preserve the interrupt.
+ Thread.currentThread().interrupt();
+ break;
+ }
int countOfRegionsInTable = regionsInMeta.size();
List regions = regionsToAssign(regionsInMeta);
if (regions.size() == 0) {
diff --git src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
index 742aea4..34c3871 100644
--- src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
+++ src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.master.DeadServer;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Writables;
import org.apache.zookeeper.KeeperException;
/**
@@ -226,14 +225,16 @@ public class ServerShutdownHandler extends EventHandler {
if (!rit.isClosing() && !rit.isPendingClose()) {
LOG.debug("Removed " + rit.getRegion().getRegionNameAsString() +
" from list of regions to assign because in RIT");
- hris.remove(rit.getRegion());
+ if (hris != null) hris.remove(rit.getRegion());
}
}
- LOG.info("Reassigning " + (hris == null? 0: hris.size()) +
- " region(s) that " + serverName +
- " was carrying (skipping " + regionsInTransition.size() +
- " regions(s) that are already in transition)");
+ assert regionsInTransition != null;
+ LOG.info("Reassigning " + ((hris == null)? 0: hris.size()) +
+ " region(s) that " + (serverName == null? "null": serverName) +
+ " was carrying (skipping " +
+ regionsInTransition.size() +
+ " regions(s) that are already in transition)");
// Iterate regions that were on this server and assign them
if (hris != null) {
@@ -303,7 +304,7 @@ public class ServerShutdownHandler extends EventHandler {
final AssignmentManager assignmentManager,
final CatalogTracker catalogTracker)
throws IOException {
- HRegionInfo daughter = getHRegionInfo(result, qualifier);
+ HRegionInfo daughter = MetaReader.getHRegionInfo(result, qualifier);
if (daughter == null) return;
if (isDaughterMissing(catalogTracker, daughter)) {
LOG.info("Fixup; missing daughter " + daughter.getRegionNameAsString());
@@ -316,21 +317,6 @@ public class ServerShutdownHandler extends EventHandler {
}
/**
- * Interpret the content of the cell at {@link HConstants#CATALOG_FAMILY} and
- * qualifier as an HRegionInfo and return it, or null.
- * @param r Result instance to pull from.
- * @param qualifier Column family qualifier
- * @return An HRegionInfo instance or null.
- * @throws IOException
- */
- private static HRegionInfo getHRegionInfo(final Result r, byte [] qualifier)
- throws IOException {
- byte [] bytes = r.getValue(HConstants.CATALOG_FAMILY, qualifier);
- if (bytes == null || bytes.length <= 0) return null;
- return Writables.getHRegionInfoOrNull(bytes);
- }
-
- /**
* Look for presence of the daughter OR of a split of the daughter in .META.
* Daughter could have been split over on regionserver before a run of the
* catalogJanitor had chance to clear reference from parent.
@@ -372,7 +358,7 @@ public class ServerShutdownHandler extends EventHandler {
@Override
public boolean visit(Result r) throws IOException {
- HRegionInfo hri = getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER);
+ HRegionInfo hri = MetaReader.getHRegionInfo(r, HConstants.REGIONINFO_QUALIFIER);
if (hri == null) {
LOG.warn("No serialized HRegionInfo in " + r);
return true;
diff --git src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
index c374d6f..3113743 100644
--- src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
+++ src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
@@ -94,6 +94,9 @@ public abstract class TableEventHandler extends EventHandler {
LOG.warn("Error on reopening the regions");
}
}
+ } catch (InterruptedException e) {
+ LOG.error("Interrupted manipulation of table " + Bytes.toString(tableName), e);
+ Thread.currentThread().interrupt();
} catch (IOException e) {
LOG.error("Error manipulating table " + Bytes.toString(tableName), e);
} catch (KeeperException e) {
diff --git src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index cd809ba..f95ee45 100644
--- src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -100,7 +100,6 @@ import org.apache.hadoop.hbase.filter.WritableByteArrayComparable;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
import org.apache.hadoop.hbase.io.hfile.CacheStats;
-import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
import org.apache.hadoop.hbase.ipc.CoprocessorProtocol;
import org.apache.hadoop.hbase.ipc.HBaseRPC;
import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
diff --git src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java
index 1cf46fc..bdcc538 100644
--- src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java
+++ src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java
@@ -62,9 +62,6 @@ public class MemStore implements HeapSize {
"hbase.hregion.memstore.mslab.enabled";
private static final boolean USEMSLAB_DEFAULT = false;
- static final String RESEEKMAX_KEY =
- "hbase.hregion.memstore.reseek.maxkeys";
- private static final int RESEEKMAX_DEFAULT = 32;
private Configuration conf;
@@ -96,11 +93,6 @@ public class MemStore implements HeapSize {
MemStoreLAB allocator;
- // if a reseek has to scan over more than these number of keys, then
- // it morphs into a seek. A seek does a tree map-search while
- // reseek does a linear scan.
- int reseekNumKeys;
-
/**
* Default constructor. Used for tests.
*/
@@ -129,7 +121,6 @@ public class MemStore implements HeapSize {
} else {
this.allocator = null;
}
- this.reseekNumKeys = conf.getInt(RESEEKMAX_KEY, RESEEKMAX_DEFAULT);
}
void dump() {
@@ -649,9 +640,6 @@ public class MemStore implements HeapSize {
Iterator kvsetIt;
Iterator snapshotIt;
- // number of iterations in this reseek operation
- int numIterReseek;
-
/*
Some notes...
@@ -687,10 +675,6 @@ public class MemStore implements HeapSize {
// keep it.
ret = v;
}
- numIterReseek--;
- if (numIterReseek == 0) {
- break;
- }
}
return ret;
}
@@ -700,7 +684,6 @@ public class MemStore implements HeapSize {
close();
return false;
}
- numIterReseek = 0;
// kvset and snapshot will never be empty.
// if tailSet cant find anything, SS is empty (not null).
@@ -729,27 +712,14 @@ public class MemStore implements HeapSize {
@Override
public boolean reseek(KeyValue key) {
- numIterReseek = reseekNumKeys;
while (kvsetNextRow != null &&
comparator.compare(kvsetNextRow, key) < 0) {
kvsetNextRow = getNext(kvsetIt);
- // if we scanned enough entries but still not able to find the
- // kv we are looking for, better cut our costs and do a tree
- // scan using seek.
- if (kvsetNextRow == null && numIterReseek == 0) {
- return seek(key);
- }
}
while (snapshotNextRow != null &&
comparator.compare(snapshotNextRow, key) < 0) {
snapshotNextRow = getNext(snapshotIt);
- // if we scanned enough entries but still not able to find the
- // kv we are looking for, better cut our costs and do a tree
- // scan using seek.
- if (snapshotNextRow == null && numIterReseek == 0) {
- return seek(key);
- }
}
return (kvsetNextRow != null || snapshotNextRow != null);
}
diff --git src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
index 9023af8..d5f7361 100644
--- src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
+++ src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
@@ -20,22 +20,17 @@
package org.apache.hadoop.hbase;
import java.io.IOException;
-import java.util.List;
import java.util.ArrayList;
-import java.util.Map;
+import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaReader;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
-
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
@@ -106,10 +101,7 @@ public class TestRegionRebalancing extends HBaseClusterTestCase {
public void testRebalancing() throws IOException, InterruptedException {
CatalogTracker ct = new CatalogTracker(conf);
ct.start();
- Map regions = MetaReader.fullScan(ct);
- for (Map.Entry e: regions.entrySet()) {
- LOG.info(e);
- }
+ MetaReader.fullScanMetaAndPrint(ct);
table = new HTable(conf, "test");
assertEquals("Test table should have 20 regions",
20, table.getStartKeys().length);
diff --git src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java
index 632f147..edb22e7 100644
--- src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java
+++ src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java
@@ -35,16 +35,20 @@ import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HServerAddress;
+import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HConnection;
+import org.apache.hadoop.hbase.client.HConnectionManager;
+import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ServerCallable;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.util.Progressable;
@@ -155,27 +159,42 @@ public class TestCatalogTracker {
t.join();
}
- @Test public void testGetMetaServerConnectionFails()
+ @Test
+ public void testGetMetaServerConnectionFails()
throws IOException, InterruptedException, KeeperException {
- HConnection connection = Mockito.mock(HConnection.class);
- ConnectException connectException =
- new ConnectException("Connection refused");
- final HRegionInterface implementation =
- Mockito.mock(HRegionInterface.class);
- Mockito.when(implementation.get((byte [])Mockito.any(), (Get)Mockito.any())).
- thenThrow(connectException);
- Mockito.when(connection.getHRegionConnection((HServerAddress)Matchers.anyObject(), Matchers.anyBoolean())).
- thenReturn(implementation);
- Assert.assertNotNull(connection.getHRegionConnection(new HServerAddress(), false));
- final CatalogTracker ct = constructAndStartCatalogTracker(connection);
+ HConnection connection =
+ HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration());
try {
- RootLocationEditor.setRootLocation(this.watcher,
- new ServerName("example.com", 1234, System.currentTimeMillis()));
- Assert.assertFalse(ct.verifyMetaRegionLocation(100));
+ // Mock an HRegionInterface.
+ final HRegionInterface implementation = Mockito.mock(HRegionInterface.class);
+ // If a 'get' is called on mocked interface, throw connection refused.
+ Mockito.when(implementation.get((byte[]) Mockito.any(), (Get) Mockito.any())).
+ thenThrow(new ConnectException("Connection refused"));
+ // Make it so our implementation is returned when we do a connection.
+ // Need to fake out the location lookup stuff first.
+ ServerName sn = new ServerName("example.com", 1234, System.currentTimeMillis());
+ final HRegionLocation anyLocation =
+ new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, sn.getHostname(),
+ sn.getPort());
+ Mockito.when(connection.getRegionLocation((byte[]) Mockito.any(),
+ (byte[]) Mockito.any(), Mockito.anyBoolean())).
+ thenReturn(anyLocation);
+ Mockito.when(connection.getHRegionConnection(Mockito.anyString(),
+ Mockito.anyInt(), Matchers.anyBoolean())).
+ thenReturn(implementation);
+ // Now start up the catalogtracker with our doctored Connection.
+ final CatalogTracker ct = constructAndStartCatalogTracker(connection);
+ try {
+ RootLocationEditor.setRootLocation(this.watcher, sn);
+ Assert.assertFalse(ct.verifyMetaRegionLocation(100));
+ } finally {
+ // Clean out root location or later tests will be confused... they
+ // presume start fresh in zk.
+ RootLocationEditor.deleteRootLocation(this.watcher);
+ }
} finally {
- // Clean out root location or later tests will be confused... they presume
- // start fresh in zk.
- RootLocationEditor.deleteRootLocation(this.watcher);
+ // Clear out our doctored connection or could mess up subsequent tests.
+ HConnectionManager.deleteConnection(UTIL.getConfiguration(), true);
}
}
@@ -195,9 +214,9 @@ public class TestCatalogTracker {
Mockito.mock(HRegionInterface.class);
Mockito.when(implementation.getRegionInfo((byte [])Mockito.any())).
thenThrow(connectException);
- Mockito.when(connection.getHRegionConnection((HServerAddress)Matchers.anyObject(), Matchers.anyBoolean())).
+ Mockito.when(connection.getHRegionConnection(Mockito.anyString(),
+ Mockito.anyInt(), Mockito.anyBoolean())).
thenReturn(implementation);
- Assert.assertNotNull(connection.getHRegionConnection(new HServerAddress(), false));
final CatalogTracker ct = constructAndStartCatalogTracker(connection);
try {
RootLocationEditor.setRootLocation(this.watcher,
@@ -220,8 +239,14 @@ public class TestCatalogTracker {
@Test (expected = NotAllMetaRegionsOnlineException.class)
public void testTimeoutWaitForMeta()
throws IOException, InterruptedException {
- final CatalogTracker ct = constructAndStartCatalogTracker();
- ct.waitForMeta(100);
+ HConnection connection =
+ HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration());
+ try {
+ final CatalogTracker ct = constructAndStartCatalogTracker(connection);
+ ct.waitForMeta(100);
+ } finally {
+ HConnectionManager.deleteConnection(UTIL.getConfiguration(), true);
+ }
}
/**
@@ -254,62 +279,81 @@ public class TestCatalogTracker {
/**
* Test waiting on meta w/ no timeout specified.
- * @throws IOException
- * @throws InterruptedException
- * @throws KeeperException
+ * @throws Exception
*/
@Test public void testNoTimeoutWaitForMeta()
- throws IOException, InterruptedException, KeeperException {
+ throws Exception {
// Mock an HConnection and a HRegionInterface implementation. Have the
// HConnection return the HRI. Have the HRI return a few mocked up responses
// to make our test work.
- HConnection connection = Mockito.mock(HConnection.class);
- HRegionInterface mockHRI = Mockito.mock(HRegionInterface.class);
- // Make the HRI return an answer no matter how Get is called. Same for
- // getHRegionInfo. Thats enough for this test.
- Mockito.when(connection.getHRegionConnection((String)Mockito.any(),
- Matchers.anyInt())).thenReturn(mockHRI);
+ HConnection connection =
+ HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration());
+ try {
+ // Mock an HRegionInterface.
+ final HRegionInterface implementation = Mockito.mock(HRegionInterface.class);
+ // Make it so our implementation is returned when we do a connection.
+ // Need to fake out the location lookup stuff first.
+ ServerName sn = new ServerName("example.com", 1234, System.currentTimeMillis());
+ final HRegionLocation anyLocation =
+ new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, sn.getHostname(),
+ sn.getPort());
+ Mockito.when(connection.getRegionLocation((byte[]) Mockito.any(),
+ (byte[]) Mockito.any(), Mockito.anyBoolean())).
+ thenReturn(anyLocation);
+ // Have implementation returned which ever way getHRegionConnection is called.
+ Mockito.when(connection.getHRegionConnection(Mockito.anyString(),
+ Mockito.anyInt(), Matchers.anyBoolean())).
+ thenReturn(implementation);
+ Mockito.when(connection.getHRegionConnection(Mockito.anyString(),
+ Mockito.anyInt())).
+ thenReturn(implementation);
- final CatalogTracker ct = constructAndStartCatalogTracker(connection);
- ServerName hsa = ct.getMetaLocation();
- Assert.assertNull(hsa);
+ final CatalogTracker ct = constructAndStartCatalogTracker(connection);
+ ServerName hsa = ct.getMetaLocation();
+ Assert.assertNull(hsa);
- // Now test waiting on meta location getting set.
- Thread t = new WaitOnMetaThread(ct) {
- @Override
- void doWaiting() throws InterruptedException {
- this.ct.waitForMeta();
- }
- };
- startWaitAliveThenWaitItLives(t, 1000);
+ // Now test waiting on meta location getting set.
+ Thread t = new WaitOnMetaThread(ct) {
+ @Override
+ void doWaiting() throws InterruptedException {
+ this.ct.waitForMeta();
+ }
+ };
+ startWaitAliveThenWaitItLives(t, 1000);
- // Now the ct is up... set into the mocks some answers that make it look
- // like things have been getting assigned. Make it so we'll return a
- // location (no matter what the Get is). Same for getHRegionInfo -- always
- // just return the meta region.
- List kvs = new ArrayList();
- kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY,
- HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
- Bytes.toBytes(SN.getHostAndPort())));
- kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY,
- HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
- Bytes.toBytes(SN.getStartcode())));
- final Result result = new Result(kvs);
- Mockito.when(mockHRI.get((byte [])Mockito.any(), (Get)Mockito.any())).
- thenReturn(result);
- Mockito.when(mockHRI.getRegionInfo((byte [])Mockito.any())).
- thenReturn(HRegionInfo.FIRST_META_REGIONINFO);
- // This should trigger wake up of meta wait (Its the removal of the meta
- // region unassigned node that triggers catalogtrackers that a meta has
- // been assigned.
- String node = ct.getMetaNodeTracker().getNode();
- ZKUtil.createAndFailSilent(this.watcher, node);
- MetaEditor.updateMetaLocation(ct, HRegionInfo.FIRST_META_REGIONINFO, SN);
- ZKUtil.deleteNode(this.watcher, node);
- // Join the thread... should exit shortly.
- t.join();
- // Now meta is available.
- Assert.assertTrue(ct.getMetaLocation().equals(SN));
+ // Now the ct is up... set into the mocks some answers that make it look
+ // like things have been getting assigned. Make it so we'll return a
+ // location (no matter what the Get is). Same for getHRegionInfo -- always
+ // just return the meta region.
+ List kvs = new ArrayList();
+ kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY,
+ HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
+ Writables.getBytes(HRegionInfo.FIRST_META_REGIONINFO)));
+ kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY,
+ HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
+ Bytes.toBytes(SN.getHostAndPort())));
+ kvs.add(new KeyValue(HConstants.EMPTY_BYTE_ARRAY,
+ HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
+ Bytes.toBytes(SN.getStartcode())));
+ final Result result = new Result(kvs);
+ Mockito.when(connection.getRegionServerWithRetries((ServerCallable)Mockito.any())).
+ thenReturn(result);
+ Mockito.when(implementation.getRegionInfo((byte[]) Mockito.any())).
+ thenReturn(HRegionInfo.FIRST_META_REGIONINFO);
+ // This should trigger wake up of meta wait (Its the removal of the meta
+ // region unassigned node that triggers catalogtrackers that a meta has
+ // been assigned).
+ String node = ct.getMetaNodeTracker().getNode();
+ ZKUtil.createAndFailSilent(this.watcher, node);
+ MetaEditor.updateMetaLocation(ct, HRegionInfo.FIRST_META_REGIONINFO, SN);
+ ZKUtil.deleteNode(this.watcher, node);
+ // Join the thread... should exit shortly.
+ t.join();
+ // Now meta is available.
+ Assert.assertTrue(ct.getMetaLocation().equals(SN));
+ } finally {
+ HConnectionManager.deleteConnection(UTIL.getConfiguration(), true);
+ }
}
private void startWaitAliveThenWaitItLives(final Thread t, final int ms) {
diff --git src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java
index 960e781..1b75462 100644
--- src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java
+++ src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java
@@ -19,7 +19,10 @@
*/
package org.apache.hadoop.hbase.catalog;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.List;
@@ -34,8 +37,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
@@ -64,11 +65,15 @@ public class TestMetaReaderEditor {
};
@BeforeClass public static void beforeClass() throws Exception {
- UTIL.startMiniCluster();
+ UTIL.startMiniCluster(3);
}
@Before public void setup() throws IOException, InterruptedException {
Configuration c = new Configuration(UTIL.getConfiguration());
+ // Tests to 4 retries every 5 seconds. Make it try every 1 second so more
+ // responsive. 1 second is default as is ten retries.
+ c.setLong("hbase.client.pause", 1000);
+ c.setInt("hbase.client.retries.number", 10);
zkw = new ZooKeeperWatcher(c, "TestMetaReaderEditor", ABORTABLE);
ct = new CatalogTracker(zkw, c, ABORTABLE);
ct.start();
@@ -78,6 +83,109 @@ public class TestMetaReaderEditor {
UTIL.shutdownMiniCluster();
}
+ /**
+ * Does {@link MetaReader#getRegion(CatalogTracker, byte[])} and a write
+ * against .META. while its hosted server is restarted to prove our retrying
+ * works.
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ @Test (timeout = 180000) public void testRetrying()
+ throws IOException, InterruptedException {
+ final String name = "testRetrying";
+ LOG.info("Started " + name);
+ final byte [] nameBytes = Bytes.toBytes(name);
+ HTable t = UTIL.createTable(nameBytes, HConstants.CATALOG_FAMILY);
+ int regionCount = UTIL.createMultiRegions(t, HConstants.CATALOG_FAMILY);
+ // Test it works getting a region from just made user table.
+ final List regions =
+ testGettingTableRegions(this.ct, nameBytes, regionCount);
+ MetaTask reader = new MetaTask(this.ct, "reader") {
+ @Override
+ void metaTask() throws Throwable {
+ testGetRegion(this.ct, regions.get(0));
+ }
+ };
+ MetaTask writer = new MetaTask(this.ct, "writer") {
+ @Override
+ void metaTask() throws Throwable {
+ MetaEditor.addRegionToMeta(this.ct, regions.get(0));
+ }
+ };
+ reader.start();
+ writer.start();
+ // Make sure reader and writer are working.
+ assertTrue(reader.isProgressing());
+ assertTrue(writer.isProgressing());
+ // Kill server hosting meta. See if our reader/writer ride over the
+ // meta moves. They'll need to retry.
+ for (int i = 0; i < 1; i++) {
+ LOG.info("Restart=" + i);
+ UTIL.ensureSomeRegionServersAvailable(2);
+ int index = -1;
+ do {
+ index = UTIL.getMiniHBaseCluster().getServerWithMeta();
+ } while (index == -1);
+ UTIL.getMiniHBaseCluster().abortRegionServer(index);
+ UTIL.getMiniHBaseCluster().waitOnRegionServer(index);
+ }
+ assertTrue(reader.toString(), reader.isProgressing());
+ assertTrue(writer.toString(), writer.isProgressing());
+ reader.stop = true;
+ writer.stop = true;
+ reader.join();
+ writer.join();
+ }
+
+ /**
+ * Thread that runs a MetaReader/MetaEditor task until asked stop.
+ */
+ abstract static class MetaTask extends Thread {
+ boolean stop = false;
+ int count = 0;
+ Throwable t = null;
+ final CatalogTracker ct;
+
+ MetaTask(final CatalogTracker ct, final String name) {
+ super(name);
+ this.ct = ct;
+ }
+
+ @Override
+ public void run() {
+ try {
+ while(!this.stop) {
+ LOG.info("Before " + this.getName()+ ", count=" + this.count);
+ metaTask();
+ this.count += 1;
+ LOG.info("After " + this.getName() + ", count=" + this.count);
+ Thread.sleep(100);
+ }
+ } catch (Throwable t) {
+ LOG.info(this.getName() + " failed", t);
+ this.t = t;
+ }
+ }
+
+ boolean isProgressing() throws InterruptedException {
+ int currentCount = this.count;
+ while(currentCount == this.count) {
+ if (!isAlive()) return false;
+ if (this.t != null) return false;
+ Thread.sleep(10);
+ }
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return "count=" + this.count + ", t=" +
+ (this.t == null? "null": this.t.toString());
+ }
+
+ abstract void metaTask() throws Throwable;
+ }
+
@Test public void testGetRegionsCatalogTables()
throws IOException, InterruptedException {
List regions =
@@ -108,19 +216,9 @@ public class TestMetaReaderEditor {
@Test public void testGetRegion() throws IOException, InterruptedException {
final String name = "testGetRegion";
LOG.info("Started " + name);
- final byte [] nameBytes = Bytes.toBytes(name);
- HTable t = UTIL.createTable(nameBytes, HConstants.CATALOG_FAMILY);
- int regionCount = UTIL.createMultiRegions(t, HConstants.CATALOG_FAMILY);
-
- // Test it works getting a region from user table.
- List regions = MetaReader.getTableRegions(ct, nameBytes);
- assertEquals(regionCount, regions.size());
- Pair pair =
- MetaReader.getRegion(ct, regions.get(0).getRegionName());
- assertEquals(regions.get(0).getEncodedName(),
- pair.getFirst().getEncodedName());
// Test get on non-existent region.
- pair = MetaReader.getRegion(ct, Bytes.toBytes("nonexistent-region"));
+ Pair pair =
+ MetaReader.getRegion(ct, Bytes.toBytes("nonexistent-region"));
assertNull(pair);
// Test it works getting a region from meta/root.
pair =
@@ -131,7 +229,8 @@ public class TestMetaReaderEditor {
}
// Test for the optimization made in HBASE-3650
- @Test public void testScanMetaForTable() throws IOException {
+ @Test public void testScanMetaForTable()
+ throws IOException, InterruptedException {
final String name = "testScanMetaForTable";
LOG.info("Started " + name);
@@ -159,4 +258,25 @@ public class TestMetaReaderEditor {
}
assertEquals(1, MetaReader.getTableRegions(ct, greaterName).size());
}
-}
\ No newline at end of file
+
+ private static List testGettingTableRegions(final CatalogTracker ct,
+ final byte [] nameBytes, final int regionCount)
+ throws IOException, InterruptedException {
+ List regions = MetaReader.getTableRegions(ct, nameBytes);
+ assertEquals(regionCount, regions.size());
+ Pair pair =
+ MetaReader.getRegion(ct, regions.get(0).getRegionName());
+ assertEquals(regions.get(0).getEncodedName(),
+ pair.getFirst().getEncodedName());
+ return regions;
+ }
+
+ private static void testGetRegion(final CatalogTracker ct,
+ final HRegionInfo region)
+ throws IOException, InterruptedException {
+ Pair pair =
+ MetaReader.getRegion(ct, region.getRegionName());
+ assertEquals(region.getEncodedName(),
+ pair.getFirst().getEncodedName());
+ }
+}
diff --git src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
new file mode 100644
index 0000000..f08ff3d
--- /dev/null
+++ src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionImplementation;
+import org.apache.hadoop.hbase.client.HConnectionManager.HConnectionKey;
+import org.mockito.Mockito;
+
+/**
+ * {@link HConnection} testing utility.
+ */
+public class HConnectionTestingUtility {
+ /*
+ * Not part of {@link HBaseTestingUtility} because this class is not
+ * in same package as {@link HConnection}. Would have to reveal ugly
+ * {@link HConnectionManager} innards to HBaseTestingUtility to give it access.
+ */
+ /**
+ * Get a Mocked {@link HConnection} that goes with the passed conf
+ * configuration instance. Minimally the mock will return
+ * conf when {@link HConnection#getConfiguration()} is invoked.
+ * Be sure to shutdown the connection when done by calling
+ * {@link HConnectionManager#deleteConnection(Configuration, boolean)} else it
+ * will stick around; this is probably not what you want.
+ * @param conf configuration
+ * @return HConnection object for conf
+ * @throws ZooKeeperConnectionException
+ */
+ public static HConnection getMockedConnection(final Configuration conf)
+ throws ZooKeeperConnectionException {
+ HConnectionKey connectionKey = new HConnectionKey(conf);
+ synchronized (HConnectionManager.HBASE_INSTANCES) {
+ HConnectionImplementation connection =
+ HConnectionManager.HBASE_INSTANCES.get(connectionKey);
+ if (connection == null) {
+ connection = Mockito.mock(HConnectionImplementation.class);
+ Mockito.when(connection.getConfiguration()).thenReturn(conf);
+ HConnectionManager.HBASE_INSTANCES.put(connectionKey, connection);
+ }
+ return connection;
+ }
+ }
+}
diff --git src/test/java/org/apache/hadoop/hbase/client/TestHCM.java src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
index f09944e..f98baef 100644
--- src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
+++ src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
@@ -19,6 +19,10 @@
*/
package org.apache.hadoop.hbase.client;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.HashSet;
@@ -27,6 +31,8 @@ import java.util.Map;
import java.util.Random;
import java.util.Set;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -38,12 +44,6 @@ import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
/**
* This class is for testing HCM features
@@ -225,4 +225,4 @@ public class TestHCM {
Thread.sleep(50);
}
}
-}
+}
\ No newline at end of file