busbey commented on a change in pull request #1036: HBASE-23686 Revert binary 
incompatible change in ByteRangeUtils and removed reflections in CommonFSUtils
URL: https://github.com/apache/hbase/pull/1036#discussion_r366471989
 
 

 ##########
 File path: 
hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
 ##########
 @@ -568,83 +521,54 @@ static void setStoragePolicy(final FileSystem fs, final 
Path path, final String
    */
   private static void invokeSetStoragePolicy(final FileSystem fs, final Path 
path,
       final String storagePolicy) throws IOException {
-    Method m = null;
     Exception toThrow = null;
+
     try {
-      m = fs.getClass().getDeclaredMethod("setStoragePolicy", Path.class, 
String.class);
-      m.setAccessible(true);
-    } catch (NoSuchMethodException e) {
-      toThrow = e;
-      final String msg = "FileSystem doesn't support setStoragePolicy; 
HDFS-6584, HDFS-9345 " +
-          "not available. This is normal and expected on earlier Hadoop 
versions.";
-      if (!warningMap.containsKey(fs)) {
-        warningMap.put(fs, true);
-        LOG.warn(msg, e);
-      } else if (LOG.isDebugEnabled()) {
-        LOG.debug(msg, e);
+      fs.setStoragePolicy(path, storagePolicy);
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Set storagePolicy={} for path={}", storagePolicy, path);
       }
-      m = null;
-    } catch (SecurityException e) {
+    } catch (Exception e) {
       toThrow = e;
-      final String msg = "No access to setStoragePolicy on FileSystem from the 
SecurityManager; " +
-          "HDFS-6584, HDFS-9345 not available. This is unusual and probably 
warrants an email " +
-          "to the user@hbase mailing list. Please be sure to include a link to 
your configs, and " +
-          "logs that include this message and period of time before it. Logs 
around service " +
-          "start up will probably be useful as well.";
+      // This swallows FNFE, should we be throwing it? seems more likely to 
indicate dev
+      // misuse than a runtime problem with HDFS.
       if (!warningMap.containsKey(fs)) {
         warningMap.put(fs, true);
-        LOG.warn(msg, e);
+        LOG.warn("Unable to set storagePolicy=" + storagePolicy + " for path=" 
+ path + ". " +
+            "DEBUG log level might have more details.", e);
       } else if (LOG.isDebugEnabled()) {
-        LOG.debug(msg, e);
+        LOG.debug("Unable to set storagePolicy=" + storagePolicy + " for 
path=" + path, e);
       }
-      m = null; // could happen on setAccessible() or getDeclaredMethod()
-    }
-    if (m != null) {
-      try {
-        m.invoke(fs, path, storagePolicy);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Set storagePolicy={} for path={}", storagePolicy, path);
-        }
-      } catch (Exception e) {
-        toThrow = e;
-        // This swallows FNFE, should we be throwing it? seems more likely to 
indicate dev
-        // misuse than a runtime problem with HDFS.
-        if (!warningMap.containsKey(fs)) {
-          warningMap.put(fs, true);
-          LOG.warn("Unable to set storagePolicy=" + storagePolicy + " for 
path=" + path + ". " +
-              "DEBUG log level might have more details.", e);
-        } else if (LOG.isDebugEnabled()) {
-          LOG.debug("Unable to set storagePolicy=" + storagePolicy + " for 
path=" + path, e);
-        }
-        // check for lack of HDFS-7228
-        if (e instanceof InvocationTargetException) {
-          final Throwable exception = e.getCause();
-          if (exception instanceof RemoteException &&
-              HadoopIllegalArgumentException.class.getName().equals(
-                ((RemoteException)exception).getClassName())) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Given storage policy, '" +storagePolicy +"', was 
rejected and probably " +
-                "isn't a valid policy for the version of Hadoop you're 
running. I.e. if you're " +
-                "trying to use SSD related policies then you're likely missing 
HDFS-7228. For " +
-                "more information see the 'ArchivalStorage' docs for your 
Hadoop release.");
-            }
-          // Hadoop 2.8+, 3.0-a1+ added FileSystem.setStoragePolicy with a 
default implementation
-          // that throws UnsupportedOperationException
-          } else if (exception instanceof UnsupportedOperationException) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("The underlying FileSystem implementation doesn't 
support " +
-                  "setStoragePolicy. This is probably intentional on their 
part, since HDFS-9345 " +
-                  "appears to be present in your version of Hadoop. For more 
information check " +
-                  "the Hadoop documentation on 'ArchivalStorage', the Hadoop 
FileSystem " +
-                  "specification docs from HADOOP-11981, and/or related 
documentation from the " +
-                  "provider of the underlying FileSystem (its name should 
appear in the " +
-                  "stacktrace that accompanies this message). Note in 
particular that Hadoop's " +
-                  "local filesystem implementation doesn't support storage 
policies.", exception);
-            }
+      // check for lack of HDFS-7228
+      if (e instanceof InvocationTargetException) {
 
 Review comment:
   This should be two direct checks for `RemoteException` and 
`UnsupportedOperationException` respectively. Now that we're not using 
reflection it won't be wrapped in `InvocationTargetException`

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to