diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java index a2b2935..c1f9eb6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSHDFSUtils.java @@ -27,19 +27,19 @@ import java.net.URI; import java.util.HashSet; import java.util.Map; import java.util.Set; -import java.util.Collection; -import com.google.common.collect.Sets; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException; +import com.google.common.collect.Sets; + /** * Implementation for hdfs @@ -63,32 +63,24 @@ public class FSHDFSUtils extends FSUtils { if (serviceName.startsWith("ha-hdfs")) { try { + String nameServiceId = serviceName.split(":")[1]; if (dfsUtilClazz == null) { dfsUtilClazz = Class.forName("org.apache.hadoop.hdfs.DFSUtil"); } if (getNNAddressesMethod == null) { - try { - // getNNServiceRpcAddressesForCluster is available only in version + // getRpcAddressesForNameserviceId is available only in version // equal to or later than Hadoop 2.6 getNNAddressesMethod = - dfsUtilClazz.getMethod("getNNServiceRpcAddressesForCluster", Configuration.class); - } catch (NoSuchMethodException e) { - // If hadoop version is older than hadoop 2.6 - getNNAddressesMethod = - dfsUtilClazz.getMethod("getNNServiceRpcAddresses", Configuration.class); - } - + dfsUtilClazz.getMethod("getRpcAddressesForNameserviceId", Configuration.class, + String.class, String.class); } - Map> addressMap = - (Map>) getNNAddressesMethod - .invoke(null, conf); - for (Map.Entry> entry : addressMap.entrySet()) { - Map nnMap = entry.getValue(); - for (Map.Entry e2 : nnMap.entrySet()) { - InetSocketAddress addr = e2.getValue(); - addresses.add(addr); - } + Map nnMap = + (Map) getNNAddressesMethod.invoke(null, conf, nameServiceId, + null); + for (Map.Entry e2 : nnMap.entrySet()) { + InetSocketAddress addr = e2.getValue(); + addresses.add(addr); } } catch (Exception e) { LOG.warn("DFSUtil.getNNServiceRpcAddresses failed. serviceName=" + serviceName, e); @@ -125,17 +117,6 @@ public class FSHDFSUtils extends FSUtils { if (srcServiceName.equals(desServiceName)) { return true; } - if (srcServiceName.startsWith("ha-hdfs") && desServiceName.startsWith("ha-hdfs")) { - Collection internalNameServices = - conf.getTrimmedStringCollection("dfs.internal.nameservices"); - if (!internalNameServices.isEmpty()) { - if (internalNameServices.contains(srcServiceName.split(":")[1])) { - return true; - } else { - return false; - } - } - } if (srcFs instanceof DistributedFileSystem && desFs instanceof DistributedFileSystem) { //If one serviceName is an HA format while the other is a non-HA format, // maybe they refer to the same FileSystem. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java index ea19ea7..d5369bc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSHDFSUtils.java @@ -143,6 +143,23 @@ public class TestFSHDFSUtils { desPath = new Path("/"); desFs = desPath.getFileSystem(conf); assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)); + + conf.set("dfs.nameservices", "haosong-hadoop,ns-2"); + conf.set("dfs.ha.namenodes.ns-2", "nn1,nn2"); + conf.set("dfs.client.failover.proxy.provider.ns-2", + "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"); + conf.set("dfs.namenode.rpc-address.ns-2.nn1", "127.0.0.10:8020"); + conf.set("dfs.namenode.rpc-address.ns-2.nn2", "127.10.2.10:8000"); + + srcPath = new Path("hdfs://haosong-hadoop/src"); + srcFs = srcPath.getFileSystem(conf); + desPath = new Path("hdfs://ns-2/"); + desFs = desPath.getFileSystem(conf); + assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)); + + desPath = new Path("hdfs://haosong-hadoop/other"); + desFs = desPath.getFileSystem(conf); + assertTrue(FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)); } /**