Index: src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (revision 1232543) +++ src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (working copy) @@ -1275,6 +1275,8 @@ LOG.info("Mini mapreduce cluster started"); conf.set("mapred.job.tracker", mrCluster.createJobConf().get("mapred.job.tracker")); + /* this for mrv2 support */ + conf.set("mapreduce.framework.name", "yarn"); } /** Index: src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java (revision 1232543) +++ src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java (working copy) @@ -325,13 +325,15 @@ // We start 3 servers and then stop 2 to avoid a directory naming conflict // when we stop/start a namenode later, as mentioned in HBASE-5163 List existingNodes = dfsCluster.getDataNodes(); - dfsCluster - .startDataNodes(TEST_UTIL.getConfiguration(), 3, true, null, null); - for (DataNode dn: existingNodes){ - dfsCluster.stopDataNode( dn.dnRegistration.getName() ); + int numDataNodes = 3; + dfsCluster.startDataNodes(TEST_UTIL.getConfiguration(), numDataNodes, true, + null, null); + for (int i = numDataNodes-1; i >= 0; i--){ + dfsCluster.stopDataNode( i ); } - assertTrue( + assertTrue("DataNodes " + dfsCluster.getDataNodes().size() + + " default replication " + fs.getDefaultReplication(), dfsCluster.getDataNodes().size() >= fs.getDefaultReplication() + 1); writeData(table, 2); Index: src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (revision 1232543) +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (working copy) @@ -456,7 +456,7 @@ if (os != null) { try { m = os.getWrappedStream().getClass(). - getMethod("getNumCurrentReplicas", new Class []{}); + getDeclaredMethod("getNumCurrentReplicas", new Class []{}); m.setAccessible(true); } catch (NoSuchMethodException e) { // Thrown if getNumCurrentReplicas() function isn't available @@ -471,7 +471,7 @@ LOG.info("Using getNumCurrentReplicas--HDFS-826"); } else { LOG.info("getNumCurrentReplicas--HDFS-826 not available; hdfs_out=" + - os + ", exception=" + exception.getMessage()); + os, exception); } return m; } Index: pom.xml =================================================================== --- pom.xml (revision 1232543) +++ pom.xml (working copy) @@ -1836,6 +1836,13 @@ hadoop-mapreduce-client-jobclient ${hadoop.version} true + test + + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + ${hadoop.version} + true test-jar test