Repository: hbase
Updated Branches:
  refs/heads/branch-1 a25cf7bbd -> 50f7894cc


HBASE-20691 Change the default WAL storage policy back to "NONE""

This partially reverts HBASE-19858 and adds more doc about why we choose "NONE" 
as the default.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/50f7894c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/50f7894c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/50f7894c

Branch: refs/heads/branch-1
Commit: 50f7894cc488bd5d5f40284a9a95d7a79bff93b2
Parents: a25cf7b
Author: Yu Li <l...@apache.org>
Authored: Wed Jul 4 14:21:33 2018 +0800
Committer: Yu Li <l...@apache.org>
Committed: Thu Jul 5 15:26:56 2018 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/HConstants.java     |  8 ++-
 .../org/apache/hadoop/hbase/master/HMaster.java |  5 +-
 .../hadoop/hbase/regionserver/wal/FSHLog.java   |  5 +-
 .../org/apache/hadoop/hbase/util/FSUtils.java   | 63 +++++++++++--------
 .../apache/hadoop/hbase/util/TestFSUtils.java   | 64 ++++++++++++++++++--
 5 files changed, 110 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/50f7894c/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 88a352a..3d1115b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1056,7 +1056,13 @@ public final class HConstants {
    * Valid values are: HOT, COLD, WARM, ALL_SSD, ONE_SSD, LAZY_PERSIST
    * See 
http://hadoop.apache.org/docs/r2.7.3/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html*/
   public static final String WAL_STORAGE_POLICY = "hbase.wal.storage.policy";
-  public static final String DEFAULT_WAL_STORAGE_POLICY = "HOT";
+  /**
+   * "NONE" is not a valid storage policy and means we defer the policy to 
HDFS. @see
+   * <a 
href="https://issues.apache.org/jira/browse/HBASE-20691";>HBASE-20691</a>
+   */
+  public static final String DEFER_TO_HDFS_STORAGE_POLICY = "NONE";
+  /** By default we defer the WAL storage policy to HDFS */
+  public static final String DEFAULT_WAL_STORAGE_POLICY = 
DEFER_TO_HDFS_STORAGE_POLICY;
 
   /** Region in Transition metrics threshold time */
   public static final String METRICS_RIT_STUCK_WARNING_THRESHOLD =

http://git-wip-us.apache.org/repos/asf/hbase/blob/50f7894c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index d228b57..cfa6aa2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1324,8 +1324,9 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
       }
     }
     // Now that it exists, set the log policy
-    FSUtils.setStoragePolicy(walFs, conf, walDir, 
HConstants.WAL_STORAGE_POLICY,
-      HConstants.DEFAULT_WAL_STORAGE_POLICY);
+    String storagePolicy =
+        conf.get(HConstants.WAL_STORAGE_POLICY, 
HConstants.DEFAULT_WAL_STORAGE_POLICY);
+    FSUtils.setStoragePolicy(walFs, walDir, storagePolicy);
 
     procedureStore = new WALProcedureStore(conf, walFs, walDir,
         new MasterProcedureEnv.WALStoreLeaseRecovery(this));

http://git-wip-us.apache.org/repos/asf/hbase/blob/50f7894c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index 9cef728..141eafb 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -469,8 +469,9 @@ public class FSHLog implements WAL {
     }
     // Now that it exists, set the storage policy for the entire directory of 
wal files related to
     // this FSHLog instance
-    FSUtils.setStoragePolicy(fs, conf, this.fullPathLogDir, 
HConstants.WAL_STORAGE_POLICY,
-      HConstants.DEFAULT_WAL_STORAGE_POLICY);
+    String storagePolicy =
+        conf.get(HConstants.WAL_STORAGE_POLICY, 
HConstants.DEFAULT_WAL_STORAGE_POLICY);
+    FSUtils.setStoragePolicy(fs, this.fullPathLogDir, storagePolicy);
     this.logFileSuffix = (suffix == null) ? "" : URLEncoder.encode(suffix, 
"UTF8");
     this.prefixPathStr = new Path(fullPathLogDir,
         logFilePrefix + WAL_FILE_NAME_DELIMITER).toString();

http://git-wip-us.apache.org/repos/asf/hbase/blob/50f7894c/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index f3e2c63..a6fccfe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -122,28 +122,6 @@ public abstract class FSUtils {
     super();
   }
 
-  /**
-   * Sets storage policy for given path according to config setting.
-   * If the passed path is a directory, we'll set the storage policy for all 
files
-   * created in the future in said directory. Note that this change in storage
-   * policy takes place at the HDFS level; it will persist beyond this RS's 
lifecycle.
-   * If we're running on a version of HDFS that doesn't support the given 
storage policy
-   * (or storage policies at all), then we'll issue a log message and continue.
-   *
-   * See 
http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/ArchivalStorage.html
-   *
-   * @param fs We only do anything if an instance of DistributedFileSystem
-   * @param conf used to look up storage policy with given key; not modified.
-   * @param path the Path whose storage policy is to be set
-   * @param policyKey e.g. HConstants.WAL_STORAGE_POLICY
-   * @param defaultPolicy usually should be the policy NONE to delegate to HDFS
-   */
-  public static void setStoragePolicy(final FileSystem fs, final Configuration 
conf,
-      final Path path, final String policyKey, final String defaultPolicy) {
-    String storagePolicy = conf.get(policyKey, 
defaultPolicy).toUpperCase(Locale.ROOT);
-    setStoragePolicy(fs, path, storagePolicy);
-  }
-
   private static final Map<FileSystem, Boolean> warningMap =
       new ConcurrentHashMap<FileSystem, Boolean>();
 
@@ -164,18 +142,37 @@ public abstract class FSUtils {
    */
   public static void setStoragePolicy(final FileSystem fs, final Path path,
       final String storagePolicy) {
+    try {
+      setStoragePolicy(fs, path, storagePolicy, false);
+    } catch (IOException e) {
+      // should never arrive here
+      LOG.warn("We have chosen not to throw exception but some unexpectedly 
thrown out", e);
+    }
+  }
+
+  static void setStoragePolicy(final FileSystem fs, final Path path, final 
String storagePolicy,
+      boolean throwException) throws IOException {
     if (storagePolicy == null) {
       if (LOG.isTraceEnabled()) {
         LOG.trace("We were passed a null storagePolicy, exiting early.");
       }
       return;
     }
-    final String trimmedStoragePolicy = storagePolicy.trim();
+    String trimmedStoragePolicy = storagePolicy.trim();
     if (trimmedStoragePolicy.isEmpty()) {
       if (LOG.isTraceEnabled()) {
         LOG.trace("We were passed an empty storagePolicy, exiting early.");
       }
       return;
+    } else {
+      trimmedStoragePolicy = trimmedStoragePolicy.toUpperCase(Locale.ROOT);
+    }
+    if (trimmedStoragePolicy.equals(HConstants.DEFER_TO_HDFS_STORAGE_POLICY)) {
+      if (LOG.isTraceEnabled()) {
+        LOG.trace(
+          "We were passed the defer-to-hdfs policy " + trimmedStoragePolicy + 
", exiting early.");
+      }
+      return;
     }
     boolean distributed = false;
     try {
@@ -194,7 +191,16 @@ public abstract class FSUtils {
       return;
     }
     if (distributed) {
-      invokeSetStoragePolicy(fs, path, trimmedStoragePolicy);
+      try {
+        invokeSetStoragePolicy(fs, path, trimmedStoragePolicy);
+      } catch (IOException e) {
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("Failed to invoke set storage policy API on FS", e);
+        }
+        if (throwException) {
+          throw e;
+        }
+      }
     }
   }
 
@@ -202,13 +208,15 @@ public abstract class FSUtils {
    * All args have been checked and are good. Run the setStoragePolicy 
invocation.
    */
   private static void invokeSetStoragePolicy(final FileSystem fs, final Path 
path,
-      final String storagePolicy) {
+      final String storagePolicy) throws IOException {
     Method m = null;
+    Exception toThrow = null;
     try {
       m = fs.getClass().getDeclaredMethod("setStoragePolicy",
         new Class<?>[] { Path.class, String.class });
       m.setAccessible(true);
     } catch (NoSuchMethodException e) {
+      toThrow = e;
       final String msg = "FileSystem doesn't support setStoragePolicy; 
HDFS-6584 not available";
       if (!warningMap.containsKey(fs)) {
         warningMap.put(fs, true);
@@ -218,6 +226,7 @@ public abstract class FSUtils {
       }
       m = null;
     } catch (SecurityException e) {
+      toThrow = e;
       final String msg = "No access to setStoragePolicy on FileSystem; 
HDFS-6584 not available";
       if (!warningMap.containsKey(fs)) {
         warningMap.put(fs, true);
@@ -234,6 +243,7 @@ public abstract class FSUtils {
           LOG.debug("Set storagePolicy=" + storagePolicy + " for path=" + 
path);
         }
       } catch (Exception e) {
+        toThrow = e;
         // This swallows FNFE, should we be throwing it? seems more likely to 
indicate dev
         // misuse than a runtime problem with HDFS.
         if (!warningMap.containsKey(fs)) {
@@ -258,6 +268,9 @@ public abstract class FSUtils {
         }
       }
     }
+    if (toThrow != null) {
+      throw new IOException(toThrow);
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/50f7894c/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
----------------------------------------------------------------------
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
index 5fa9633..38a78ee 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java
@@ -29,6 +29,8 @@ import java.io.File;
 import java.io.IOException;
 import java.util.UUID;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -42,7 +44,9 @@ import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.Assert;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -51,6 +55,8 @@ import org.junit.experimental.categories.Category;
  */
 @Category(MediumTests.class)
 public class TestFSUtils {
+  private static final Log LOG = LogFactory.getLog(TestFSUtils.class);
+
   /**
    * Test path compare and prefix checking.
    * @throws IOException
@@ -364,14 +370,28 @@ public class TestFSUtils {
       Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
       fs.mkdirs(testDir);
 
-      FSUtils.setStoragePolicy(fs, conf, testDir, 
HConstants.WAL_STORAGE_POLICY,
-          HConstants.DEFAULT_WAL_STORAGE_POLICY);
+      String storagePolicy =
+          conf.get(HConstants.WAL_STORAGE_POLICY, 
HConstants.DEFAULT_WAL_STORAGE_POLICY);
+      FSUtils.setStoragePolicy(fs, testDir, storagePolicy);
 
       String file = UUID.randomUUID().toString();
       Path p = new Path(testDir, file);
       WriteDataToHDFS(fs, p, 4096);
-      // will assert existance before deleting.
-      cleanupFile(fs, testDir);
+      try (HFileSystem hfs = new HFileSystem(fs)) {
+        String policySet = hfs.getStoragePolicyName(p);
+        LOG.debug("The storage policy of path " + p + " is " + policySet);
+        if (policy.equals(HConstants.DEFER_TO_HDFS_STORAGE_POLICY)
+            || policy.equals(INVALID_STORAGE_POLICY)) {
+          String hdfsDefaultPolicy = 
hfs.getStoragePolicyName(hfs.getHomeDirectory());
+          LOG.debug("The default hdfs storage policy (indicated by home path: "
+              + hfs.getHomeDirectory() + ") is " + hdfsDefaultPolicy);
+          Assert.assertEquals(hdfsDefaultPolicy, policySet);
+        } else {
+          Assert.assertEquals(policy, policySet);
+        }
+        // will assert existance before deleting.
+        cleanupFile(fs, testDir);
+      }
     } finally {
       cluster.shutdown();
     }
@@ -379,19 +399,53 @@ public class TestFSUtils {
 
   @Test
   public void testSetStoragePolicyDefault() throws Exception {
+    verifyNoHDFSApiInvocationForDefaultPolicy();
     verifyFileInDirWithStoragePolicy(HConstants.DEFAULT_WAL_STORAGE_POLICY);
   }
 
+  /**
+   * Note: currently the default policy is set to defer to HDFS and this case 
is to verify the
+   * logic, will need to remove the check if the default policy is changed
+   */
+  private void verifyNoHDFSApiInvocationForDefaultPolicy() {
+    FileSystem testFs = new AlwaysFailSetStoragePolicyFileSystem();
+    // There should be no exception thrown when setting to default storage 
policy, which indicates
+    // the HDFS API hasn't been called
+    try {
+      FSUtils.setStoragePolicy(testFs, new Path("non-exist"), 
HConstants.DEFAULT_WAL_STORAGE_POLICY,
+        true);
+    } catch (IOException e) {
+      Assert.fail("Should have bypassed the FS API when setting default 
storage policy");
+    }
+    // There should be exception thrown when given non-default storage policy, 
which indicates the
+    // HDFS API has been called
+    try {
+      FSUtils.setStoragePolicy(testFs, new Path("non-exist"), "HOT", true);
+      Assert.fail("Should have invoked the FS API but haven't");
+    } catch (IOException e) {
+      // expected given an invalid path
+    }
+  }
+
+  class AlwaysFailSetStoragePolicyFileSystem extends DistributedFileSystem {
+    @Override
+    public void setStoragePolicy(final Path src, final String policyName) 
throws IOException {
+      throw new IOException("The setStoragePolicy method is invoked");
+    }
+  }
+
   /* might log a warning, but still work. (always warning on Hadoop < 2.6.0) */
   @Test
   public void testSetStoragePolicyValidButMaybeNotPresent() throws Exception {
     verifyFileInDirWithStoragePolicy("ALL_SSD");
   }
 
+  final String INVALID_STORAGE_POLICY = "1772";
+
   /* should log a warning, but still work. (different warning on Hadoop < 
2.6.0) */
   @Test
   public void testSetStoragePolicyInvalid() throws Exception {
-    verifyFileInDirWithStoragePolicy("1772");
+    verifyFileInDirWithStoragePolicy(INVALID_STORAGE_POLICY);
   }
 
   @Test

Reply via email to