From af7389364a3806bab3fb724ac33d279000fd1e57 Mon Sep 17 00:00:00 2001 From: Ashish Singhi Date: Thu, 5 Oct 2017 12:51:39 +0530 Subject: [PATCH] HBASE-18938 Backport HBASE-16985 to branch-1.3 --- .../hadoop/hbase/regionserver/HRegionServer.java | 28 +++++++++++++--------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 54934f3505..59598711d3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -578,17 +578,7 @@ public class HRegionServer extends HasThread implements useZKForAssignment = ConfigUtil.useZKForAssignment(conf); - // Set 'fs.defaultFS' to match the filesystem on hbase.rootdir else - // underlying hadoop hdfs accessors will be going against wrong filesystem - // (unless all is set to defaults). - FSUtils.setFsDefault(this.conf, FSUtils.getRootDir(this.conf)); - // Get fs instance used by this RS. Do we use checksum verification in the hbase? If hbase - // checksum verification enabled, then automatically switch off hdfs checksum verification. - boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true); - this.fs = new HFileSystem(this.conf, useHBaseChecksum); - this.rootDir = FSUtils.getRootDir(this.conf); - this.tableDescriptors = new FSTableDescriptors( - this.conf, this.fs, this.rootDir, !canUpdateTableDescriptor(), false); + initializeFileSystem(); service = new ExecutorService(getServerName().toShortString()); spanReceiverHost = SpanReceiverHost.getInstance(getConfiguration()); @@ -641,6 +631,20 @@ public class HRegionServer extends HasThread implements choreService.scheduleChore(compactedFileDischarger); } + private void initializeFileSystem() throws IOException { + // Set 'fs.defaultFS' to match the filesystem on hbase.rootdir else + // underlying hadoop hdfs accessors will be going against wrong filesystem + // (unless all is set to defaults). + FSUtils.setFsDefault(this.conf, FSUtils.getRootDir(this.conf)); + // Get fs instance used by this RS. Do we use checksum verification in the hbase? If hbase + // checksum verification enabled, then automatically switch off hdfs checksum verification. + boolean useHBaseChecksum = this.conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true); + this.fs = new HFileSystem(this.conf, useHBaseChecksum); + this.rootDir = FSUtils.getRootDir(this.conf); + this.tableDescriptors = new FSTableDescriptors( + this.conf, this.fs, this.rootDir, !canUpdateTableDescriptor(), false); + } + protected void setInitLatch(CountDownLatch latch) { this.initLatch = latch; } @@ -1402,6 +1406,8 @@ public class HRegionServer extends HasThread implements } this.conf.set(key, value); } + // initialize file system again by the config fs.defaultFS and hbase.rootdir from master + initializeFileSystem(); // hack! Maps DFSClient => RegionServer for logs. HDFS made this // config param for task trackers, but we can piggyback off of it. -- 2.15.0