This is an automated email from the ASF dual-hosted git repository. ramkrishna pushed a commit to branch branch-2 in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2 by this push: new 988be93 HBASE-25050 - We initialize Filesystems more than once. (#2419) 988be93 is described below commit 988be9321d4f2f93d1cf779d649468596c7f41f9 Author: ramkrish86 <ram_krish...@hotmail.com> AuthorDate: Tue Nov 24 15:31:04 2020 +0530 HBASE-25050 - We initialize Filesystems more than once. (#2419) * HBASE-25050 - We initialize Filesystems more than once. * Ensuring that calling the FS#get() will only ensure FS init. * Fix for testfailures. We should pass the entire path and no the scheme alone * Cases where we don't have a scheme for the URI * Address review comments * Add some comments on why FS#get(URI, conf) is getting used * Adding the comment as per Sean's review Signed-off-by: Sean Busbey <bus...@apache.org> Signed-off-by: Michael Stack <st...@apache.org> --- .../org/apache/hadoop/hbase/util/CommonFSUtils.java | 18 ++++++++++++++++++ .../apache/hadoop/hbase/util/TestCommonFSUtils.java | 17 +++++++++++++++++ .../java/org/apache/hadoop/hbase/fs/HFileSystem.java | 10 +++++++--- .../hadoop/hbase/regionserver/HRegionServer.java | 17 ++++++++++++++--- 4 files changed, 56 insertions(+), 6 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java index e694d96..63c9f32 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java @@ -321,6 +321,10 @@ public final class CommonFSUtils { c.set("fs.defaultFS", root.toString()); // for hadoop 0.21+ } + public static void setFsDefault(final Configuration c, final String uri) { + c.set("fs.defaultFS", uri); // for hadoop 0.21+ + } + public static FileSystem getRootDirFileSystem(final Configuration c) throws IOException { Path p = getRootDir(c); return p.getFileSystem(c); @@ -342,6 +346,20 @@ public final class CommonFSUtils { return p.makeQualified(fs.getUri(), fs.getWorkingDirectory()); } + /** + * Returns the URI in the strig format + * @param c configuration + * @param p path + * @return - the URI's to string format + * @throws IOException + */ + public static String getDirUri(final Configuration c, Path p) throws IOException { + if (p.toUri().getScheme() != null) { + return p.toUri().toString(); + } + return null; + } + @VisibleForTesting public static void setWALRootDir(final Configuration c, final Path root) { c.set(HBASE_WAL_DIR, root.toString()); diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestCommonFSUtils.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestCommonFSUtils.java index e6a427e..7d433c0 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestCommonFSUtils.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestCommonFSUtils.java @@ -28,6 +28,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtility; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Before; @@ -105,6 +106,22 @@ public class TestCommonFSUtils { assertEquals(walRoot, CommonFSUtils.getWALRootDir(conf)); } + @Test + public void testGetWALRootDirUsingUri() throws IOException { + Path root = new Path("file:///hbase/root"); + conf.set(HConstants.HBASE_DIR, root.toString()); + Path walRoot = new Path("file:///hbase/logroot"); + conf.set(CommonFSUtils.HBASE_WAL_DIR, walRoot.toString()); + String walDirUri = CommonFSUtils.getDirUri(conf, walRoot); + String rootDirUri = CommonFSUtils.getDirUri(conf, root); + CommonFSUtils.setFsDefault(this.conf, rootDirUri); + CommonFSUtils.setRootDir(conf, root); + assertEquals(root, CommonFSUtils.getRootDir(conf)); + CommonFSUtils.setFsDefault(this.conf, walDirUri); + CommonFSUtils.setWALRootDir(conf, walRoot); + assertEquals(walRoot, CommonFSUtils.getWALRootDir(conf)); + } + @Test(expected=IllegalStateException.class) public void testGetWALRootDirIllegalWALDir() throws IOException { Path root = new Path("file:///hbase/root"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java index 2be48b4..17b04ab 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java @@ -82,11 +82,15 @@ public class HFileSystem extends FilterFileSystem { // Create the default filesystem with checksum verification switched on. // By default, any operation to this FilterFileSystem occurs on // the underlying filesystem that has checksums switched on. - this.fs = FileSystem.get(conf); + // This FS#get(URI, conf) clearly indicates in the javadoc that if the FS is + // not created it will initialize the FS and return that created FS. If it is + // already created it will just return the FS that was already created. + // We take pains to funnel all of our FileSystem instantiation through this call to ensure + // we never need to call FS.initialize ourself so that we do not have to track any state to + // avoid calling initialize more than once. + this.fs = FileSystem.get(getDefaultUri(conf), conf); this.useHBaseChecksum = useHBaseChecksum; - fs.initialize(getDefaultUri(conf), conf); - // disable checksum verification for local fileSystem, see HBASE-11218 if (fs instanceof LocalFileSystem) { fs.setWriteChecksum(false); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 2797240..be98f77 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -753,17 +753,28 @@ public class HRegionServer extends Thread implements // Get fs instance used by this RS. Do we use checksum verification in the hbase? If hbase // checksum verification enabled, then automatically switch off hdfs checksum verification. boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true); - CommonFSUtils.setFsDefault(this.conf, CommonFSUtils.getWALRootDir(this.conf)); + String walDirUri = CommonFSUtils.getDirUri(this.conf, + new Path(conf.get(CommonFSUtils.HBASE_WAL_DIR, conf.get(HConstants.HBASE_DIR)))); + // set WAL's uri + if (walDirUri != null) { + CommonFSUtils.setFsDefault(this.conf, walDirUri); + } + // init the WALFs this.walFs = new HFileSystem(this.conf, useHBaseChecksum); this.walRootDir = CommonFSUtils.getWALRootDir(this.conf); // Set 'fs.defaultFS' to match the filesystem on hbase.rootdir else // underlying hadoop hdfs accessors will be going against wrong filesystem // (unless all is set to defaults). - CommonFSUtils.setFsDefault(this.conf, CommonFSUtils.getRootDir(this.conf)); + String rootDirUri = + CommonFSUtils.getDirUri(this.conf, new Path(conf.get(HConstants.HBASE_DIR))); + if (rootDirUri != null) { + CommonFSUtils.setFsDefault(this.conf, rootDirUri); + } + // init the filesystem this.dataFs = new HFileSystem(this.conf, useHBaseChecksum); this.dataRootDir = CommonFSUtils.getRootDir(this.conf); this.tableDescriptors = new FSTableDescriptors(this.dataFs, this.dataRootDir, - !canUpdateTableDescriptor(), cacheTableDescriptor()); + !canUpdateTableDescriptor(), cacheTableDescriptor()); } protected void login(UserProvider user, String host) throws IOException {