diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
index 160cba7..6c78b23 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
@@ -193,7 +193,7 @@ public class ClientScanner extends AbstractClientScanner {
done) {
close();
if (LOG.isDebugEnabled()) {
- LOG.debug("Finished with scanning at " + this.currentRegion);
+ LOG.debug("Finished scanning region " + this.currentRegion);
}
return false;
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
index 578959d..0686800 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaScanner.java
@@ -37,9 +37,8 @@ import java.util.NavigableMap;
import java.util.TreeMap;
/**
- * Scanner class that contains the .META. table scanning logic
- * and uses a Retryable scanner. Provided visitors will be called
- * for each row.
+ * Scanner class that contains the .META. table scanning logic.
+ * Provided visitors will be called for each row.
*
* Although public visibility, this is not a public-facing API and may evolve in
* minor releases.
@@ -123,114 +122,55 @@ public class MetaScanner {
public static void metaScan(Configuration configuration,
final MetaScannerVisitor visitor, final byte[] tableName,
final byte[] row, final int rowLimit, final byte[] metaTableName)
- throws IOException {
- try {
- HConnectionManager.execute(new HConnectable(configuration) {
- @Override
- public Void connect(HConnection connection) throws IOException {
- metaScan(conf, connection, visitor, tableName, row, rowLimit,
- metaTableName);
- return null;
- }
- });
- } finally {
- visitor.close();
- }
- }
-
- private static void metaScan(Configuration configuration, HConnection connection,
- MetaScannerVisitor visitor, byte [] tableName, byte[] row,
- int rowLimit, final byte [] metaTableName)
throws IOException {
int rowUpperLimit = rowLimit > 0 ? rowLimit: Integer.MAX_VALUE;
-
- // if row is not null, we want to use the startKey of the row's region as
- // the startRow for the meta scan.
+ HTable metaTable = new HTable(configuration, HConstants.META_TABLE_NAME);
+ // Calculate startrow for scan.
byte[] startRow;
- if (row != null) {
- // Scan starting at a particular row in a particular table
- assert tableName != null;
- byte[] searchRow =
- HRegionInfo.createRegionName(tableName, row, HConstants.NINES,
- false);
- HTable metaTable = null;
- try {
- metaTable = new HTable(configuration, HConstants.META_TABLE_NAME);
- Result startRowResult = metaTable.getRowOrBefore(searchRow,
- HConstants.CATALOG_FAMILY);
+ try {
+ if (row != null) {
+ // Scan starting at a particular row in a particular table
+ byte[] searchRow = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
+ Result startRowResult = metaTable.getRowOrBefore(searchRow, HConstants.CATALOG_FAMILY);
if (startRowResult == null) {
- throw new TableNotFoundException("Cannot find row in .META. for table: "
- + Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow));
+ throw new TableNotFoundException("Cannot find row in .META. for table: " +
+ Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow));
}
HRegionInfo regionInfo = getHRegionInfo(startRowResult);
if (regionInfo == null) {
throw new IOException("HRegionInfo was null or empty in Meta for " +
Bytes.toString(tableName) + ", row=" + Bytes.toStringBinary(searchRow));
}
-
byte[] rowBefore = regionInfo.getStartKey();
- startRow = HRegionInfo.createRegionName(tableName, rowBefore,
- HConstants.ZEROES, false);
- } finally {
- if (metaTable != null) {
- metaTable.close();
- }
+ startRow = HRegionInfo.createRegionName(tableName, rowBefore, HConstants.ZEROES, false);
+ } else if (tableName == null || tableName.length == 0) {
+ // Full META scan
+ startRow = HConstants.EMPTY_START_ROW;
+ } else {
+ // Scan META for an entire table
+ startRow = HRegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, HConstants.ZEROES, false);
}
- } else if (tableName == null || tableName.length == 0) {
- // Full META scan
- startRow = HConstants.EMPTY_START_ROW;
- } else {
- // Scan META for an entire table
- startRow = HRegionInfo.createRegionName(
- tableName, HConstants.EMPTY_START_ROW, HConstants.ZEROES, false);
- }
-
- // Scan over each meta region
- ScannerCallable callable;
- int rows = Math.min(rowLimit, configuration.getInt(
- HConstants.HBASE_META_SCANNER_CACHING,
- HConstants.DEFAULT_HBASE_META_SCANNER_CACHING));
- do {
final Scan scan = new Scan(startRow).addFamily(HConstants.CATALOG_FAMILY);
+ int rows = Math.min(rowLimit, configuration.getInt(HConstants.HBASE_META_SCANNER_CACHING,
+ HConstants.DEFAULT_HBASE_META_SCANNER_CACHING));
+ scan.setCaching(rows);
if (LOG.isDebugEnabled()) {
- LOG.debug("Scanning " + Bytes.toString(metaTableName) +
- " starting at row=" + Bytes.toStringBinary(startRow) + " for max=" +
- rowUpperLimit + " rows using " + connection.toString());
+ LOG.debug("Scanning " + Bytes.toString(metaTableName) + " starting at row=" +
+ Bytes.toStringBinary(startRow) + " for max=" + rowUpperLimit + " with caching=" + rows);
}
- callable = new ScannerCallable(connection, metaTableName, scan, null);
- // Open scanner
- callable.withRetries();
-
+ // Run the scan
+ ResultScanner scanner = metaTable.getScanner(scan);
+ Result result = null;
int processedRows = 0;
- try {
- callable.setCaching(rows);
- done: do {
- if (processedRows >= rowUpperLimit) {
- break;
- }
- //we have all the rows here
- Result [] rrs = callable.withRetries();
- if (rrs == null || rrs.length == 0 || rrs[0].size() == 0) {
- break; //exit completely
- }
- for (Result rr : rrs) {
- if (processedRows >= rowUpperLimit) {
- break done;
- }
- if (!visitor.processRow(rr))
- break done; //exit completely
- processedRows++;
- }
- //here, we didn't break anywhere. Check if we have more rows
- } while(true);
- // Advance the startRow to the end key of the current region
- startRow = callable.getHRegionInfo().getEndKey();
- } finally {
- // Close scanner
- callable.setClose();
- callable.withRetries();
+ while ((result = scanner.next()) != null) {
+ visitor.processRow(result);
+ processedRows++;
+ if (processedRows >= rowUpperLimit) break;
}
- } while (Bytes.compareTo(startRow, HConstants.LAST_ROW) != 0);
+ } finally {
+ if (visitor != null) visitor.close();
+ if (metaTable != null) metaTable.close();
+ }
}
/**
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
index c1d40fb..bde1588 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
@@ -201,12 +201,7 @@ public class ScannerCallable extends ServerCallable {
if (this.scanMetrics != null) {
this.scanMetrics.countOfNSRE.incrementAndGet();
}
- throw new DoNotRetryIOException("Reset scanner", ioe);
- } else if (ioe instanceof RegionServerStoppedException) {
- // Throw a DNRE so that we break out of cycle of calling RSSE
- // when what we need is to open scanner against new location.
- // Attach RSSE to signal client that it needs to resetup scanner.
- throw new DoNotRetryIOException("Reset scanner", ioe);
+ throw new DoNotRetryIOException("Resetting the scanner -- see exception cause", ioe);
} else {
// The outer layers will retry
throw ioe;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java
index 1b2e54a..6d16362 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ServerCallable.java
@@ -169,8 +169,7 @@ public abstract class ServerCallable implements Callable {
prepare(tries != 0); // if called with false, check table status on ZK
return call();
} catch (Throwable t) {
- LOG.warn("Received exception, tries=" + tries + ", numRetries=" + numRetries + ":" +
- t.getMessage());
+ LOG.warn("Call exception, tries=" + tries + ", numRetries=" + numRetries + ":" + t);
t = translateException(t);
// translateException throws an exception when we should not retry, i.e. when it's the
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionServerStoppedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionServerStoppedException.java
index 5ab1aed..4560a31 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionServerStoppedException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionServerStoppedException.java
@@ -20,16 +20,14 @@ package org.apache.hadoop.hbase.exceptions;
import org.apache.hadoop.classification.InterfaceAudience;
-import java.io.IOException;
-
/**
* Thrown by the region server when it is in shutting down state.
*/
@SuppressWarnings("serial")
@InterfaceAudience.Private
-public class RegionServerStoppedException extends IOException {
+public class RegionServerStoppedException extends DoNotRetryIOException {
public RegionServerStoppedException(String s) {
super(s);
}
-}
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 22d539a..605bc6f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -786,7 +786,8 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
} // for
} catch (Throwable t) {
if (!checkOOME(t)) {
- abort("Unhandled exception: " + t.getMessage(), t);
+ String prefix = t instanceof YouAreDeadException? "": "Unhandled: ";
+ abort(prefix + t.getMessage(), t);
}
}
// Run shutdown.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
index 4163a53..26336da 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
@@ -201,7 +201,7 @@ public class TestZooKeeper {
LOG.info("Starting testRegionServerSessionExpired");
int metaIndex = TEST_UTIL.getMiniHBaseCluster().getServerWithMeta();
TEST_UTIL.expireRegionServerSession(metaIndex);
- testSanity();
+ testSanity("testRegionServerSessionExpired");
}
// @Test Disabled because seems to make no sense expiring master session
@@ -210,7 +210,7 @@ public class TestZooKeeper {
public void testMasterSessionExpired() throws Exception {
LOG.info("Starting testMasterSessionExpired");
TEST_UTIL.expireMasterSession();
- testSanity();
+ testSanity("testMasterSessionExpired");
}
/**
@@ -220,27 +220,31 @@ public class TestZooKeeper {
*/
@Test(timeout = 60000)
public void testMasterZKSessionRecoveryFailure() throws Exception {
+ LOG.info("Starting testMasterZKSessionRecoveryFailure");
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
HMaster m = cluster.getMaster();
m.abort("Test recovery from zk session expired",
new KeeperException.SessionExpiredException());
assertFalse(m.isStopped());
- testSanity();
+ testSanity("testMasterZKSessionRecoveryFailure");
}
/**
* Make sure we can use the cluster
* @throws Exception
*/
- private void testSanity() throws Exception{
- HBaseAdmin admin =
- new HBaseAdmin(TEST_UTIL.getConfiguration());
- String tableName = "test"+System.currentTimeMillis();
+ private void testSanity(final String testName) throws Exception{
+ String tableName = testName + System.currentTimeMillis();
HTableDescriptor desc = new HTableDescriptor(tableName);
HColumnDescriptor family = new HColumnDescriptor("fam");
desc.addFamily(family);
LOG.info("Creating table " + tableName);
- admin.createTable(desc);
+ HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
+ try {
+ admin.createTable(desc);
+ } finally {
+ admin.close();
+ }
HTable table =
new HTable(new Configuration(TEST_UTIL.getConfiguration()), tableName);