From 79aa200707891a1d739b84e3b4c253d536db6f55 Mon Sep 17 00:00:00 2001 From: Guanghao Zhang Date: Wed, 2 Nov 2016 10:13:37 +0800 Subject: [PATCH] HBASE-16985 TestClusterId failed due to wrong hbase rootdir --- .../org/apache/hadoop/hbase/master/HMaster.java | 2 +- .../hadoop/hbase/regionserver/HRegionServer.java | 39 ++++++++++++++++------ 2 files changed, 29 insertions(+), 12 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 204577d..0136ff5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -406,7 +406,7 @@ public class HMaster extends HRegionServer implements MasterServices { this.rsFatals = new MemoryBoundedLogMessageBuffer( conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024)); - LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) + + LOG.info("hbase.rootdir=" + getRootDir() + ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false)); // Disable usage of meta replicas in the master diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 89f7a05..312e8c1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -583,16 +583,7 @@ public class HRegionServer extends HasThread implements } }; - // Set 'fs.defaultFS' to match the filesystem on hbase.rootdir else - // underlying hadoop hdfs accessors will be going against wrong filesystem - // (unless all is set to defaults). - FSUtils.setFsDefault(this.conf, FSUtils.getRootDir(this.conf)); - // Get fs instance used by this RS. Do we use checksum verification in the hbase? If hbase - // checksum verification enabled, then automatically switch off hdfs checksum verification. - boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true); - this.fs = new HFileSystem(this.conf, useHBaseChecksum); - this.rootDir = FSUtils.getRootDir(this.conf); - this.tableDescriptors = getFsTableDescriptors(); + initializeFileSystem(); service = new ExecutorService(getServerName().toShortString()); spanReceiverHost = SpanReceiverHost.getInstance(getConfiguration()); @@ -648,6 +639,19 @@ public class HRegionServer extends HasThread implements choreService.scheduleChore(compactedFileDischarger); } + private void initializeFileSystem() throws IOException { + // Set 'fs.defaultFS' to match the filesystem on hbase.rootdir else + // underlying hadoop hdfs accessors will be going against wrong filesystem + // (unless all is set to defaults). + FSUtils.setFsDefault(this.conf, FSUtils.getRootDir(this.conf)); + // Get fs instance used by this RS. Do we use checksum verification in the hbase? If hbase + // checksum verification enabled, then automatically switch off hdfs checksum verification. + boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true); + this.fs = new HFileSystem(this.conf, useHBaseChecksum); + this.rootDir = FSUtils.getRootDir(this.conf); + this.tableDescriptors = getFsTableDescriptors(); + } + protected TableDescriptors getFsTableDescriptors() throws IOException { return new FSTableDescriptors(this.conf, this.fs, this.rootDir, !canUpdateTableDescriptor(), false); @@ -1386,6 +1390,7 @@ public class HRegionServer extends HasThread implements protected void handleReportForDutyResponse(final RegionServerStartupResponse c) throws IOException { try { + boolean updateRootDir = false; for (NameStringPair e : c.getMapEntriesList()) { String key = e.getName(); // The hostname the master sees us as. @@ -1408,13 +1413,25 @@ public class HRegionServer extends HasThread implements } continue; } + String value = e.getValue(); + if (key.equals(HConstants.HBASE_DIR)) { + if (value != null && !value.equals(conf.get(HConstants.HBASE_DIR))) { + updateRootDir = true; + } + } + if (LOG.isDebugEnabled()) { - LOG.info("Config from master: " + key + "=" + value); + LOG.debug("Config from master: " + key + "=" + value); } this.conf.set(key, value); } + if (updateRootDir) { + // initialize file system by the config fs.defaultFS and hbase.rootdir from master + initializeFileSystem(); + } + // hack! Maps DFSClient => RegionServer for logs. HDFS made this // config param for task trackers, but we can piggyback off of it. if (this.conf.get("mapreduce.task.attempt.id") == null) { -- 1.9.1