Index: hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java (revision 1356172) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java (working copy) @@ -1515,7 +1515,8 @@ // mangle the conf so that the fs parameter points to the minidfs we // just started up FileSystem fs = dfsCluster.getFileSystem(); - conf.set("fs.default.name", fs.getUri().toString()); + conf.set(HBaseConfiguration.handleHadoopDeprecation( + "fs.default.name", "fs.defaultFS"), fs.getUri().toString()); conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(zooKeeperPort)); Path parentdir = fs.getHomeDirectory(); conf.set(HConstants.HBASE_DIR, parentdir.toString()); Index: hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java (revision 1356172) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/ProcessBasedLocalHBaseCluster.java (working copy) @@ -42,6 +42,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.LargeTests; @@ -402,8 +403,8 @@ if (dfsCluster != null) { String fsURL = "hdfs://" + HConstants.LOCALHOST + ":" + dfsCluster.getNameNodePort(); - confMap.put("fs.default.name", fsURL); - confMap.put("fs.defaultFS", fsURL); + confMap.put(HBaseConfiguration.handleHadoopDeprecation( + "fs.default.name", "fs.defaultFS"), fsURL); confMap.put("hbase.rootdir", fsURL + "/hbase_test"); } Index: hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (revision 1356172) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java (working copy) @@ -428,9 +428,8 @@ // Set this just-started cluster as our filesystem. FileSystem fs = this.dfsCluster.getFileSystem(); - this.conf.set("fs.defaultFS", fs.getUri().toString()); - // Do old style too just to be safe. - this.conf.set("fs.default.name", fs.getUri().toString()); + conf.set(HBaseConfiguration.handleHadoopDeprecation( + "fs.default.name", "fs.defaultFS"), fs.getUri().toString()); // Wait for the cluster to be totally up this.dfsCluster.waitClusterUp(); Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java (revision 1356172) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java (working copy) @@ -3042,8 +3042,8 @@ Configuration conf = HBaseConfiguration.create(); Path hbasedir = new Path(conf.get(HConstants.HBASE_DIR)); URI defaultFs = hbasedir.getFileSystem(conf).getUri(); - conf.set("fs.defaultFS", defaultFs.toString()); // for hadoop 0.21+ - conf.set("fs.default.name", defaultFs.toString()); // for hadoop 0.20 + conf.set(HBaseConfiguration.handleHadoopDeprecation( + "fs.default.name", "fs.defaultFS"), defaultFs.toString()); HBaseFsck fsck = new HBaseFsck(conf); long sleepBeforeRerun = DEFAULT_SLEEP_BEFORE_RERUN; Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java (revision 1356172) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRepair.java (working copy) @@ -70,8 +70,8 @@ Configuration conf = HBaseConfiguration.create(); // Cover both bases, the old way of setting default fs and the new. // We're supposed to run on 0.20 and 0.21 anyways. - conf.set("fs.defaultFS", conf.get(HConstants.HBASE_DIR)); - conf.set("fs.default.name", conf.get(HConstants.HBASE_DIR)); + conf.set(HBaseConfiguration.handleHadoopDeprecation( + "fs.default.name", "fs.defaultFS"), conf.get(HConstants.HBASE_DIR)); HBaseFsck fsck = new HBaseFsck(conf); boolean fixHoles = false; @@ -89,8 +89,8 @@ i++; String path = args[i]; conf.set(HConstants.HBASE_DIR, path); - conf.set("fs.defaultFS", conf.get(HConstants.HBASE_DIR)); - conf.set("fs.default.name", conf.get(HConstants.HBASE_DIR)); + conf.set(HBaseConfiguration.handleHadoopDeprecation( + "fs.default.name", "fs.defaultFS"), conf.get(HConstants.HBASE_DIR)); } else if (cmd.equals("-sidelineDir")) { if (i == args.length - 1) { System.err.println("OfflineMetaRepair: -sidelineDir needs an HDFS path."); Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogPrettyPrinter.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogPrettyPrinter.java (revision 1356172) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogPrettyPrinter.java (working copy) @@ -360,9 +360,8 @@ } // get configuration, file system, and process the given files Configuration conf = HBaseConfiguration.create(); - conf.set("fs.defaultFS", - conf.get(org.apache.hadoop.hbase.HConstants.HBASE_DIR)); - conf.set("fs.default.name", + conf.set(HBaseConfiguration.handleHadoopDeprecation( + "fs.default.name", "fs.defaultFS"), conf.get(org.apache.hadoop.hbase.HConstants.HBASE_DIR)); // begin output printer.beginPersistentOutput(); Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (revision 1356172) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (working copy) @@ -1904,8 +1904,8 @@ Configuration conf = HBaseConfiguration.create(); for (int i = 1; i < args.length; i++) { try { - conf.set("fs.default.name", args[i]); - conf.set("fs.defaultFS", args[i]); + conf.set(HBaseConfiguration.handleHadoopDeprecation( + "fs.default.name", "fs.defaultFS"), args[i]); Path logPath = new Path(args[i]); split(conf, logPath); } catch (Throwable t) { Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (revision 1356172) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (working copy) @@ -1076,7 +1076,8 @@ // to match the filesystem on hbase.rootdir else underlying hadoop hdfs // accessors will be going against wrong filesystem (unless all is set // to defaults). - this.conf.set("fs.defaultFS", this.conf.get("hbase.rootdir")); + this.conf.set(HBaseConfiguration.handleHadoopDeprecation( + "fs.default.name", "fs.defaultFS"), this.conf.get(HConstants.HBASE_DIR)); // Get fs instance used by this RS this.fs = new HFileSystem(this.conf, this.useHBaseChecksum); this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR)); Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (revision 1356172) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (working copy) @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ClusterId; import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -100,8 +101,8 @@ // We're supposed to run on 0.20 and 0.21 anyways. this.fs = this.rootdir.getFileSystem(conf); String fsUri = this.fs.getUri().toString(); - conf.set("fs.default.name", fsUri); - conf.set("fs.defaultFS", fsUri); + conf.set(HBaseConfiguration.handleHadoopDeprecation( + "fs.default.name", "fs.defaultFS"), fsUri); this.distributedLogSplitting = conf.getBoolean(HConstants.DISTRIBUTED_LOG_SPLITTING_KEY, true); if (this.distributedLogSplitting) { Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (revision 1356172) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (working copy) @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.DeserializationException; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -1091,7 +1092,8 @@ protected RegionServerStartupResponse.Builder createConfigurationSubset() { RegionServerStartupResponse.Builder resp = addConfig( RegionServerStartupResponse.newBuilder(), HConstants.HBASE_DIR); - return addConfig(resp, "fs.default.name"); + return addConfig(resp, HBaseConfiguration.handleHadoopDeprecation( + "fs.default.name", "fs.defaultFS")); } private RegionServerStartupResponse.Builder addConfig( Index: hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java (revision 1356172) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java (working copy) @@ -171,9 +171,8 @@ */ public int run(String[] args) { conf = HBaseConfiguration.create(); - conf.set("fs.defaultFS", - conf.get(org.apache.hadoop.hbase.HConstants.HBASE_DIR)); - conf.set("fs.default.name", + conf.set(HBaseConfiguration.handleHadoopDeprecation( + "fs.default.name", "fs.defaultFS"), conf.get(org.apache.hadoop.hbase.HConstants.HBASE_DIR)); SchemaMetrics.configureGlobally(conf); try { Index: hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Compression.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Compression.java (revision 1356172) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/Compression.java (working copy) @@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.compress.CodecPool; import org.apache.hadoop.io.compress.CompressionCodec; @@ -210,7 +211,8 @@ Algorithm(String name) { this.conf = new Configuration(); - this.conf.setBoolean("hadoop.native.lib", true); + this.conf.setBoolean(HBaseConfiguration.handleHadoopDeprecation( + "hadoop.native.lib", "io.native.lib.available"), true); this.compressName = name; } Index: bin/region_status.rb =================================================================== --- bin/region_status.rb (revision 1356172) +++ bin/region_status.rb (working copy) @@ -61,7 +61,8 @@ org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase").setLevel(log_level) config = HBaseConfiguration.create -config.set 'fs.default.name', config.get(HConstants::HBASE_DIR) +config.set HBaseConfiguration.handleHadoopDeprecation( + "fs.default.name", "fs.defaultFS"), config.get(HConstants::HBASE_DIR) # wait until the master is running admin = nil Index: hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java =================================================================== --- hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java (revision 1356172) +++ hbase-common/src/main/java/org/apache/hadoop/hbase/HBaseConfiguration.java (working copy) @@ -147,4 +147,22 @@ } return isShowConf; } + + public static String handleHadoopDeprecation(String oldKey, String newKey) { + return !isDeprecationSupported() ? oldKey : newKey; + } + + private static int deprecationSupport = -1; + public static boolean isDeprecationSupported() { + if (deprecationSupport == -1) { + try { + Configuration.class.getMethod("handleDeprecation", new Class []{}); + deprecationSupport = 1; + } catch (SecurityException e) { + } catch (NoSuchMethodException e) { + deprecationSupport = 0; + } + } + return deprecationSupport == 1; + } }