commit dd5f589a94f1e4a197aa16bb13c62edd2c3f843a Author: Todd Lipcon Date: Tue Jun 22 13:44:52 2010 -0700 HBASE-2767. setMaxRecoveryErrorCount reflection fails after HDFS-1209 diff --git src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index faace00..32e86a4 100644 --- src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -73,7 +73,7 @@ import com.google.common.base.Preconditions; * logging levels nor make changes to configuration parameters. */ public class HBaseTestingUtility { - private final Log LOG = LogFactory.getLog(getClass()); + private final static Log LOG = LogFactory.getLog(HBaseTestingUtility.class); private final Configuration conf; private MiniZooKeeperCluster zkCluster = null; private MiniDFSCluster dfsCluster = null; @@ -887,7 +887,7 @@ public class HBaseTestingUtility { } /** - * Set maxRecoveryErrorCount in DFSClient. Currently its hard-coded to 5 and + * Set maxRecoveryErrorCount in DFSClient. In 0.20 pre-append its hard-coded to 5 and * makes tests linger. Here is the exception you'll see: *
    * 2010-06-15 11:52:28,511 WARN  [DataStreamer for file /hbase/.logs/hlog.1276627923013 block blk_928005470262850423_1021] hdfs.DFSClient$DFSOutputStream(2657): Error Recovery for block blk_928005470262850423_1021 failed  because recovery from primary datanode 127.0.0.1:53683 failed 4 times.  Pipeline was 127.0.0.1:53687, 127.0.0.1:53683. Will retry...
@@ -900,20 +900,23 @@ public class HBaseTestingUtility {
    * @throws IllegalArgumentException 
    */
   public static void setMaxRecoveryErrorCount(final OutputStream stream,
-      final int max)
-  throws SecurityException, NoSuchFieldException, IllegalArgumentException, IllegalAccessException {
-    Class [] clazzes = DFSClient.class.getDeclaredClasses();
-    for (Class clazz: clazzes) {
-      String className = clazz.getSimpleName();
-      if (className.equals("DFSOutputStream")) {
-        if (clazz.isInstance(stream)) {
-          Field maxRecoveryErrorCountField =
-            stream.getClass().getDeclaredField("maxRecoveryErrorCount");
-          maxRecoveryErrorCountField.setAccessible(true);
-          maxRecoveryErrorCountField.setInt(stream, max);
-          break;
+      final int max) {
+    try {
+      Class [] clazzes = DFSClient.class.getDeclaredClasses();
+      for (Class clazz: clazzes) {
+        String className = clazz.getSimpleName();
+        if (className.equals("DFSOutputStream")) {
+          if (clazz.isInstance(stream)) {
+            Field maxRecoveryErrorCountField =
+              stream.getClass().getDeclaredField("maxRecoveryErrorCount");
+            maxRecoveryErrorCountField.setAccessible(true);
+            maxRecoveryErrorCountField.setInt(stream, max);
+            break;
+          }
         }
       }
+    } catch (Exception e) {
+      LOG.info("Could not set max recovery field", e);
     }
   }
 }
diff --git src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
index 17a6505..9053d39 100644
--- src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
+++ src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java
@@ -26,6 +26,8 @@ import java.io.IOException;
 import java.util.List;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -50,12 +52,12 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.mortbay.log.Log;
 
 /**
  * Test replay of edits out of a WAL split.
  */
 public class TestWALReplay {
+  public static final Log LOG = LogFactory.getLog(TestWALReplay.class);
   private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   private final EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
   private Path hbaseRootDir = null;
@@ -68,14 +70,14 @@ public class TestWALReplay {
   public static void setUpBeforeClass() throws Exception {
     Configuration conf = TEST_UTIL.getConfiguration();
     conf.setBoolean("dfs.support.append", true);
-    // The below config not supported until 
+    // The below config supported by 0.20-append and CDH3b2
     conf.setInt("dfs.client.block.recovery.retries", 2);
     conf.setInt("hbase.regionserver.flushlogentries", 1);
     TEST_UTIL.startMiniDFSCluster(3);
     TEST_UTIL.setNameNodeNameSystemLeasePeriod(100, 10000);
     Path hbaseRootDir =
       TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));
-    Log.info("hbase.rootdir=" + hbaseRootDir);
+    LOG.info("hbase.rootdir=" + hbaseRootDir);
     conf.set(HConstants.HBASE_DIR, hbaseRootDir.toString());
   }
 
@@ -411,7 +413,7 @@ public class TestWALReplay {
     assertEquals(1, splits.size());
     // Make sure the file exists
     assertTrue(fs.exists(splits.get(0)));
-    Log.info("Split file=" + splits.get(0));
+    LOG.info("Split file=" + splits.get(0));
     return splits.get(0);
   }
 
@@ -424,13 +426,7 @@ public class TestWALReplay {
     HLog wal = new HLog(FileSystem.get(c), logDir, oldLogDir, c, null);
     // Set down maximum recovery so we dfsclient doesn't linger retrying something
     // long gone.
-    try {
-      HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
-    } catch (Exception e) {
-      // These exceptions should never happen... make RuntimeException of them
-      // if they do.
-      throw new RuntimeException(e);
-    }
+    HBaseTestingUtility.setMaxRecoveryErrorCount(wal.getOutputStream(), 1);
     return wal;
   }
 }
\ No newline at end of file